linux/drivers/scsi/scsi.c
<<
>>
Prefs
   1/*
   2 *  scsi.c Copyright (C) 1992 Drew Eckhardt
   3 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
   4 *         Copyright (C) 2002, 2003 Christoph Hellwig
   5 *
   6 *  generic mid-level SCSI driver
   7 *      Initial versions: Drew Eckhardt
   8 *      Subsequent revisions: Eric Youngdale
   9 *
  10 *  <drew@colorado.edu>
  11 *
  12 *  Bug correction thanks go to :
  13 *      Rik Faith <faith@cs.unc.edu>
  14 *      Tommy Thorn <tthorn>
  15 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
  16 *
  17 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
  18 *  add scatter-gather, multiple outstanding request, and other
  19 *  enhancements.
  20 *
  21 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
  22 *  support added by Michael Neuffer <mike@i-connect.net>
  23 *
  24 *  Added request_module("scsi_hostadapter") for kerneld:
  25 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
  26 *  Bjorn Ekwall  <bj0rn@blox.se>
  27 *  (changed to kmod)
  28 *
  29 *  Major improvements to the timeout, abort, and reset processing,
  30 *  as well as performance modifications for large queue depths by
  31 *  Leonard N. Zubkoff <lnz@dandelion.com>
  32 *
  33 *  Converted cli() code to spinlocks, Ingo Molnar
  34 *
  35 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  36 *
  37 *  out_of_space hacks, D. Gilbert (dpg) 990608
  38 */
  39
  40#include <linux/module.h>
  41#include <linux/moduleparam.h>
  42#include <linux/kernel.h>
  43#include <linux/timer.h>
  44#include <linux/string.h>
  45#include <linux/slab.h>
  46#include <linux/blkdev.h>
  47#include <linux/delay.h>
  48#include <linux/init.h>
  49#include <linux/completion.h>
  50#include <linux/unistd.h>
  51#include <linux/spinlock.h>
  52#include <linux/kmod.h>
  53#include <linux/interrupt.h>
  54#include <linux/notifier.h>
  55#include <linux/cpu.h>
  56#include <linux/mutex.h>
  57#include <linux/async.h>
  58#include <asm/unaligned.h>
  59
  60#include <scsi/scsi.h>
  61#include <scsi/scsi_cmnd.h>
  62#include <scsi/scsi_dbg.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_driver.h>
  65#include <scsi/scsi_eh.h>
  66#include <scsi/scsi_host.h>
  67#include <scsi/scsi_tcq.h>
  68
  69#include "scsi_priv.h"
  70#include "scsi_logging.h"
  71
  72#define CREATE_TRACE_POINTS
  73#include <trace/events/scsi.h>
  74
  75static void scsi_done(struct scsi_cmnd *cmd);
  76
  77/*
  78 * Definitions and constants.
  79 */
  80
  81/*
  82 * Note - the initial logging level can be set here to log events at boot time.
  83 * After the system is up, you may enable logging via the /proc interface.
  84 */
  85unsigned int scsi_logging_level;
  86#if defined(CONFIG_SCSI_LOGGING)
  87EXPORT_SYMBOL(scsi_logging_level);
  88#endif
  89
  90/* sd, scsi core and power management need to coordinate flushing async actions */
  91ASYNC_DOMAIN(scsi_sd_probe_domain);
  92EXPORT_SYMBOL(scsi_sd_probe_domain);
  93
  94/*
  95 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
  96 * asynchronous system resume operations.  It is marked 'exclusive' to avoid
  97 * being included in the async_synchronize_full() that is invoked by
  98 * dpm_resume()
  99 */
 100ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
 101EXPORT_SYMBOL(scsi_sd_pm_domain);
 102
 103/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
 104 * You may not alter any existing entry (although adding new ones is
 105 * encouraged once assigned by ANSI/INCITS T10
 106 */
 107static const char *const scsi_device_types[] = {
 108        "Direct-Access    ",
 109        "Sequential-Access",
 110        "Printer          ",
 111        "Processor        ",
 112        "WORM             ",
 113        "CD-ROM           ",
 114        "Scanner          ",
 115        "Optical Device   ",
 116        "Medium Changer   ",
 117        "Communications   ",
 118        "ASC IT8          ",
 119        "ASC IT8          ",
 120        "RAID             ",
 121        "Enclosure        ",
 122        "Direct-Access-RBC",
 123        "Optical card     ",
 124        "Bridge controller",
 125        "Object storage   ",
 126        "Automation/Drive ",
 127};
 128
 129/**
 130 * scsi_device_type - Return 17 char string indicating device type.
 131 * @type: type number to look up
 132 */
 133
 134const char * scsi_device_type(unsigned type)
 135{
 136        if (type == 0x1e)
 137                return "Well-known LUN   ";
 138        if (type == 0x1f)
 139                return "No Device        ";
 140        if (type >= ARRAY_SIZE(scsi_device_types))
 141                return "Unknown          ";
 142        return scsi_device_types[type];
 143}
 144
 145EXPORT_SYMBOL(scsi_device_type);
 146
 147struct scsi_host_cmd_pool {
 148        struct kmem_cache       *cmd_slab;
 149        struct kmem_cache       *sense_slab;
 150        unsigned int            users;
 151        char                    *cmd_name;
 152        char                    *sense_name;
 153        unsigned int            slab_flags;
 154        gfp_t                   gfp_mask;
 155};
 156
 157static struct scsi_host_cmd_pool scsi_cmd_pool = {
 158        .cmd_name       = "scsi_cmd_cache",
 159        .sense_name     = "scsi_sense_cache",
 160        .slab_flags     = SLAB_HWCACHE_ALIGN,
 161};
 162
 163static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
 164        .cmd_name       = "scsi_cmd_cache(DMA)",
 165        .sense_name     = "scsi_sense_cache(DMA)",
 166        .slab_flags     = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
 167        .gfp_mask       = __GFP_DMA,
 168};
 169
 170static DEFINE_MUTEX(host_cmd_pool_mutex);
 171
 172/**
 173 * scsi_host_free_command - internal function to release a command
 174 * @shost:      host to free the command for
 175 * @cmd:        command to release
 176 *
 177 * the command must previously have been allocated by
 178 * scsi_host_alloc_command.
 179 */
 180static void
 181scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 182{
 183        struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 184
 185        if (cmd->prot_sdb)
 186                kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
 187        kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 188        kmem_cache_free(pool->cmd_slab, cmd);
 189}
 190
 191/**
 192 * scsi_host_alloc_command - internal function to allocate command
 193 * @shost:      SCSI host whose pool to allocate from
 194 * @gfp_mask:   mask for the allocation
 195 *
 196 * Returns a fully allocated command with sense buffer and protection
 197 * data buffer (where applicable) or NULL on failure
 198 */
 199static struct scsi_cmnd *
 200scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 201{
 202        struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 203        struct scsi_cmnd *cmd;
 204
 205        cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
 206        if (!cmd)
 207                goto fail;
 208
 209        cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
 210                                             gfp_mask | pool->gfp_mask);
 211        if (!cmd->sense_buffer)
 212                goto fail_free_cmd;
 213
 214        if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
 215                cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
 216                if (!cmd->prot_sdb)
 217                        goto fail_free_sense;
 218        }
 219
 220        return cmd;
 221
 222fail_free_sense:
 223        kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 224fail_free_cmd:
 225        kmem_cache_free(pool->cmd_slab, cmd);
 226fail:
 227        return NULL;
 228}
 229
 230/**
 231 * __scsi_get_command - Allocate a struct scsi_cmnd
 232 * @shost: host to transmit command
 233 * @gfp_mask: allocation mask
 234 *
 235 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
 236 *              host's free_list if necessary.
 237 */
 238struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 239{
 240        struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
 241
 242        if (unlikely(!cmd)) {
 243                unsigned long flags;
 244
 245                spin_lock_irqsave(&shost->free_list_lock, flags);
 246                if (likely(!list_empty(&shost->free_list))) {
 247                        cmd = list_entry(shost->free_list.next,
 248                                         struct scsi_cmnd, list);
 249                        list_del_init(&cmd->list);
 250                }
 251                spin_unlock_irqrestore(&shost->free_list_lock, flags);
 252
 253                if (cmd) {
 254                        void *buf, *prot;
 255
 256                        buf = cmd->sense_buffer;
 257                        prot = cmd->prot_sdb;
 258
 259                        memset(cmd, 0, sizeof(*cmd));
 260
 261                        cmd->sense_buffer = buf;
 262                        cmd->prot_sdb = prot;
 263                }
 264        }
 265
 266        return cmd;
 267}
 268EXPORT_SYMBOL_GPL(__scsi_get_command);
 269
 270/**
 271 * scsi_get_command - Allocate and setup a scsi command block
 272 * @dev: parent scsi device
 273 * @gfp_mask: allocator flags
 274 *
 275 * Returns:     The allocated scsi command structure.
 276 */
 277struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
 278{
 279        struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
 280        unsigned long flags;
 281
 282        if (unlikely(cmd == NULL))
 283                return NULL;
 284
 285        cmd->device = dev;
 286        INIT_LIST_HEAD(&cmd->list);
 287        INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 288        spin_lock_irqsave(&dev->list_lock, flags);
 289        list_add_tail(&cmd->list, &dev->cmd_list);
 290        spin_unlock_irqrestore(&dev->list_lock, flags);
 291        cmd->jiffies_at_alloc = jiffies;
 292        return cmd;
 293}
 294EXPORT_SYMBOL(scsi_get_command);
 295
 296/**
 297 * __scsi_put_command - Free a struct scsi_cmnd
 298 * @shost: dev->host
 299 * @cmd: Command to free
 300 */
 301void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 302{
 303        unsigned long flags;
 304
 305        if (unlikely(list_empty(&shost->free_list))) {
 306                spin_lock_irqsave(&shost->free_list_lock, flags);
 307                if (list_empty(&shost->free_list)) {
 308                        list_add(&cmd->list, &shost->free_list);
 309                        cmd = NULL;
 310                }
 311                spin_unlock_irqrestore(&shost->free_list_lock, flags);
 312        }
 313
 314        if (likely(cmd != NULL))
 315                scsi_host_free_command(shost, cmd);
 316}
 317EXPORT_SYMBOL(__scsi_put_command);
 318
 319/**
 320 * scsi_put_command - Free a scsi command block
 321 * @cmd: command block to free
 322 *
 323 * Returns:     Nothing.
 324 *
 325 * Notes:       The command must not belong to any lists.
 326 */
 327void scsi_put_command(struct scsi_cmnd *cmd)
 328{
 329        unsigned long flags;
 330
 331        /* serious error if the command hasn't come from a device list */
 332        spin_lock_irqsave(&cmd->device->list_lock, flags);
 333        BUG_ON(list_empty(&cmd->list));
 334        list_del_init(&cmd->list);
 335        spin_unlock_irqrestore(&cmd->device->list_lock, flags);
 336
 337        cancel_delayed_work(&cmd->abort_work);
 338
 339        __scsi_put_command(cmd->device->host, cmd);
 340}
 341EXPORT_SYMBOL(scsi_put_command);
 342
 343static struct scsi_host_cmd_pool *
 344scsi_find_host_cmd_pool(struct Scsi_Host *shost)
 345{
 346        if (shost->hostt->cmd_size)
 347                return shost->hostt->cmd_pool;
 348        if (shost->unchecked_isa_dma)
 349                return &scsi_cmd_dma_pool;
 350        return &scsi_cmd_pool;
 351}
 352
 353static void
 354scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
 355{
 356        kfree(pool->sense_name);
 357        kfree(pool->cmd_name);
 358        kfree(pool);
 359}
 360
 361static struct scsi_host_cmd_pool *
 362scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
 363{
 364        struct scsi_host_template *hostt = shost->hostt;
 365        struct scsi_host_cmd_pool *pool;
 366
 367        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 368        if (!pool)
 369                return NULL;
 370
 371        pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
 372        pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
 373        if (!pool->cmd_name || !pool->sense_name) {
 374                scsi_free_host_cmd_pool(pool);
 375                return NULL;
 376        }
 377
 378        pool->slab_flags = SLAB_HWCACHE_ALIGN;
 379        if (shost->unchecked_isa_dma) {
 380                pool->slab_flags |= SLAB_CACHE_DMA;
 381                pool->gfp_mask = __GFP_DMA;
 382        }
 383
 384        if (hostt->cmd_size)
 385                hostt->cmd_pool = pool;
 386
 387        return pool;
 388}
 389
 390static struct scsi_host_cmd_pool *
 391scsi_get_host_cmd_pool(struct Scsi_Host *shost)
 392{
 393        struct scsi_host_template *hostt = shost->hostt;
 394        struct scsi_host_cmd_pool *retval = NULL, *pool;
 395        size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
 396
 397        /*
 398         * Select a command slab for this host and create it if not
 399         * yet existent.
 400         */
 401        mutex_lock(&host_cmd_pool_mutex);
 402        pool = scsi_find_host_cmd_pool(shost);
 403        if (!pool) {
 404                pool = scsi_alloc_host_cmd_pool(shost);
 405                if (!pool)
 406                        goto out;
 407        }
 408
 409        if (!pool->users) {
 410                pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
 411                                                   pool->slab_flags, NULL);
 412                if (!pool->cmd_slab)
 413                        goto out_free_pool;
 414
 415                pool->sense_slab = kmem_cache_create(pool->sense_name,
 416                                                     SCSI_SENSE_BUFFERSIZE, 0,
 417                                                     pool->slab_flags, NULL);
 418                if (!pool->sense_slab)
 419                        goto out_free_slab;
 420        }
 421
 422        pool->users++;
 423        retval = pool;
 424out:
 425        mutex_unlock(&host_cmd_pool_mutex);
 426        return retval;
 427
 428out_free_slab:
 429        kmem_cache_destroy(pool->cmd_slab);
 430out_free_pool:
 431        if (hostt->cmd_size) {
 432                scsi_free_host_cmd_pool(pool);
 433                hostt->cmd_pool = NULL;
 434        }
 435        goto out;
 436}
 437
 438static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
 439{
 440        struct scsi_host_template *hostt = shost->hostt;
 441        struct scsi_host_cmd_pool *pool;
 442
 443        mutex_lock(&host_cmd_pool_mutex);
 444        pool = scsi_find_host_cmd_pool(shost);
 445
 446        /*
 447         * This may happen if a driver has a mismatched get and put
 448         * of the command pool; the driver should be implicated in
 449         * the stack trace
 450         */
 451        BUG_ON(pool->users == 0);
 452
 453        if (!--pool->users) {
 454                kmem_cache_destroy(pool->cmd_slab);
 455                kmem_cache_destroy(pool->sense_slab);
 456                if (hostt->cmd_size) {
 457                        scsi_free_host_cmd_pool(pool);
 458                        hostt->cmd_pool = NULL;
 459                }
 460        }
 461        mutex_unlock(&host_cmd_pool_mutex);
 462}
 463
 464/**
 465 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
 466 * @shost: host to allocate the freelist for.
 467 *
 468 * Description: The command freelist protects against system-wide out of memory
 469 * deadlock by preallocating one SCSI command structure for each host, so the
 470 * system can always write to a swap file on a device associated with that host.
 471 *
 472 * Returns:     Nothing.
 473 */
 474int scsi_setup_command_freelist(struct Scsi_Host *shost)
 475{
 476        const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
 477        struct scsi_cmnd *cmd;
 478
 479        spin_lock_init(&shost->free_list_lock);
 480        INIT_LIST_HEAD(&shost->free_list);
 481
 482        shost->cmd_pool = scsi_get_host_cmd_pool(shost);
 483        if (!shost->cmd_pool)
 484                return -ENOMEM;
 485
 486        /*
 487         * Get one backup command for this host.
 488         */
 489        cmd = scsi_host_alloc_command(shost, gfp_mask);
 490        if (!cmd) {
 491                scsi_put_host_cmd_pool(shost);
 492                shost->cmd_pool = NULL;
 493                return -ENOMEM;
 494        }
 495        list_add(&cmd->list, &shost->free_list);
 496        return 0;
 497}
 498
 499/**
 500 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
 501 * @shost: host whose freelist is going to be destroyed
 502 */
 503void scsi_destroy_command_freelist(struct Scsi_Host *shost)
 504{
 505        /*
 506         * If cmd_pool is NULL the free list was not initialized, so
 507         * do not attempt to release resources.
 508         */
 509        if (!shost->cmd_pool)
 510                return;
 511
 512        while (!list_empty(&shost->free_list)) {
 513                struct scsi_cmnd *cmd;
 514
 515                cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
 516                list_del_init(&cmd->list);
 517                scsi_host_free_command(shost, cmd);
 518        }
 519        shost->cmd_pool = NULL;
 520        scsi_put_host_cmd_pool(shost);
 521}
 522
 523#ifdef CONFIG_SCSI_LOGGING
 524void scsi_log_send(struct scsi_cmnd *cmd)
 525{
 526        unsigned int level;
 527
 528        /*
 529         * If ML QUEUE log level is greater than or equal to:
 530         *
 531         * 1: nothing (match completion)
 532         *
 533         * 2: log opcode + command of all commands
 534         *
 535         * 3: same as 2 plus dump cmd address
 536         *
 537         * 4: same as 3 plus dump extra junk
 538         */
 539        if (unlikely(scsi_logging_level)) {
 540                level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
 541                                       SCSI_LOG_MLQUEUE_BITS);
 542                if (level > 1) {
 543                        scmd_printk(KERN_INFO, cmd, "Send: ");
 544                        if (level > 2)
 545                                printk("0x%p ", cmd);
 546                        printk("\n");
 547                        scsi_print_command(cmd);
 548                        if (level > 3) {
 549                                printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
 550                                       " queuecommand 0x%p\n",
 551                                        scsi_sglist(cmd), scsi_bufflen(cmd),
 552                                        cmd->device->host->hostt->queuecommand);
 553
 554                        }
 555                }
 556        }
 557}
 558
 559void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 560{
 561        unsigned int level;
 562
 563        /*
 564         * If ML COMPLETE log level is greater than or equal to:
 565         *
 566         * 1: log disposition, result, opcode + command, and conditionally
 567         * sense data for failures or non SUCCESS dispositions.
 568         *
 569         * 2: same as 1 but for all command completions.
 570         *
 571         * 3: same as 2 plus dump cmd address
 572         *
 573         * 4: same as 3 plus dump extra junk
 574         */
 575        if (unlikely(scsi_logging_level)) {
 576                level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 577                                       SCSI_LOG_MLCOMPLETE_BITS);
 578                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
 579                    (level > 1)) {
 580                        scmd_printk(KERN_INFO, cmd, "Done: ");
 581                        if (level > 2)
 582                                printk("0x%p ", cmd);
 583                        /*
 584                         * Dump truncated values, so we usually fit within
 585                         * 80 chars.
 586                         */
 587                        switch (disposition) {
 588                        case SUCCESS:
 589                                printk("SUCCESS\n");
 590                                break;
 591                        case NEEDS_RETRY:
 592                                printk("RETRY\n");
 593                                break;
 594                        case ADD_TO_MLQUEUE:
 595                                printk("MLQUEUE\n");
 596                                break;
 597                        case FAILED:
 598                                printk("FAILED\n");
 599                                break;
 600                        case TIMEOUT_ERROR:
 601                                /* 
 602                                 * If called via scsi_times_out.
 603                                 */
 604                                printk("TIMEOUT\n");
 605                                break;
 606                        default:
 607                                printk("UNKNOWN\n");
 608                        }
 609                        scsi_print_result(cmd);
 610                        scsi_print_command(cmd);
 611                        if (status_byte(cmd->result) & CHECK_CONDITION)
 612                                scsi_print_sense("", cmd);
 613                        if (level > 3)
 614                                scmd_printk(KERN_INFO, cmd,
 615                                            "scsi host busy %d failed %d\n",
 616                                            cmd->device->host->host_busy,
 617                                            cmd->device->host->host_failed);
 618                }
 619        }
 620}
 621#endif
 622
 623/**
 624 * scsi_cmd_get_serial - Assign a serial number to a command
 625 * @host: the scsi host
 626 * @cmd: command to assign serial number to
 627 *
 628 * Description: a serial number identifies a request for error recovery
 629 * and debugging purposes.  Protected by the Host_Lock of host.
 630 */
 631void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 632{
 633        cmd->serial_number = host->cmd_serial_number++;
 634        if (cmd->serial_number == 0) 
 635                cmd->serial_number = host->cmd_serial_number++;
 636}
 637EXPORT_SYMBOL(scsi_cmd_get_serial);
 638
 639/**
 640 * scsi_dispatch_command - Dispatch a command to the low-level driver.
 641 * @cmd: command block we are dispatching.
 642 *
 643 * Return: nonzero return request was rejected and device's queue needs to be
 644 * plugged.
 645 */
 646int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
 647{
 648        struct Scsi_Host *host = cmd->device->host;
 649        int rtn = 0;
 650
 651        atomic_inc(&cmd->device->iorequest_cnt);
 652
 653        /* check if the device is still usable */
 654        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
 655                /* in SDEV_DEL we error all commands. DID_NO_CONNECT
 656                 * returns an immediate error upwards, and signals
 657                 * that the device is no longer present */
 658                cmd->result = DID_NO_CONNECT << 16;
 659                scsi_done(cmd);
 660                /* return 0 (because the command has been processed) */
 661                goto out;
 662        }
 663
 664        /* Check to see if the scsi lld made this device blocked. */
 665        if (unlikely(scsi_device_blocked(cmd->device))) {
 666                /* 
 667                 * in blocked state, the command is just put back on
 668                 * the device queue.  The suspend state has already
 669                 * blocked the queue so future requests should not
 670                 * occur until the device transitions out of the
 671                 * suspend state.
 672                 */
 673
 674                scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
 675
 676                SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
 677
 678                /*
 679                 * NOTE: rtn is still zero here because we don't need the
 680                 * queue to be plugged on return (it's already stopped)
 681                 */
 682                goto out;
 683        }
 684
 685        /* 
 686         * If SCSI-2 or lower, store the LUN value in cmnd.
 687         */
 688        if (cmd->device->scsi_level <= SCSI_2 &&
 689            cmd->device->scsi_level != SCSI_UNKNOWN) {
 690                cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
 691                               (cmd->device->lun << 5 & 0xe0);
 692        }
 693
 694        scsi_log_send(cmd);
 695
 696        /*
 697         * Before we queue this command, check if the command
 698         * length exceeds what the host adapter can handle.
 699         */
 700        if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
 701                SCSI_LOG_MLQUEUE(3,
 702                        printk("queuecommand : command too long. "
 703                               "cdb_size=%d host->max_cmd_len=%d\n",
 704                               cmd->cmd_len, cmd->device->host->max_cmd_len));
 705                cmd->result = (DID_ABORT << 16);
 706
 707                scsi_done(cmd);
 708                goto out;
 709        }
 710
 711        if (unlikely(host->shost_state == SHOST_DEL)) {
 712                cmd->result = (DID_NO_CONNECT << 16);
 713                scsi_done(cmd);
 714        } else {
 715                trace_scsi_dispatch_cmd_start(cmd);
 716                cmd->scsi_done = scsi_done;
 717                rtn = host->hostt->queuecommand(host, cmd);
 718        }
 719
 720        if (rtn) {
 721                trace_scsi_dispatch_cmd_error(cmd, rtn);
 722                if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
 723                    rtn != SCSI_MLQUEUE_TARGET_BUSY)
 724                        rtn = SCSI_MLQUEUE_HOST_BUSY;
 725
 726                scsi_queue_insert(cmd, rtn);
 727
 728                SCSI_LOG_MLQUEUE(3,
 729                    printk("queuecommand : request rejected\n"));
 730        }
 731
 732 out:
 733        SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
 734        return rtn;
 735}
 736
 737/**
 738 * scsi_done - Invoke completion on finished SCSI command.
 739 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
 740 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
 741 *
 742 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
 743 * which regains ownership of the SCSI command (de facto) from a LLDD, and
 744 * calls blk_complete_request() for further processing.
 745 *
 746 * This function is interrupt context safe.
 747 */
 748static void scsi_done(struct scsi_cmnd *cmd)
 749{
 750        trace_scsi_dispatch_cmd_done(cmd);
 751        blk_complete_request(cmd->request);
 752}
 753
 754/**
 755 * scsi_finish_command - cleanup and pass command back to upper layer
 756 * @cmd: the command
 757 *
 758 * Description: Pass command off to upper layer for finishing of I/O
 759 *              request, waking processes that are waiting on results,
 760 *              etc.
 761 */
 762void scsi_finish_command(struct scsi_cmnd *cmd)
 763{
 764        struct scsi_device *sdev = cmd->device;
 765        struct scsi_target *starget = scsi_target(sdev);
 766        struct Scsi_Host *shost = sdev->host;
 767        struct scsi_driver *drv;
 768        unsigned int good_bytes;
 769
 770        scsi_device_unbusy(sdev);
 771
 772        /*
 773         * Clear the flags which say that the device/host is no longer
 774         * capable of accepting new commands.  These are set in scsi_queue.c
 775         * for both the queue full condition on a device, and for a
 776         * host full condition on the host.
 777         *
 778         * XXX(hch): What about locking?
 779         */
 780        shost->host_blocked = 0;
 781        starget->target_blocked = 0;
 782        sdev->device_blocked = 0;
 783
 784        /*
 785         * If we have valid sense information, then some kind of recovery
 786         * must have taken place.  Make a note of this.
 787         */
 788        if (SCSI_SENSE_VALID(cmd))
 789                cmd->result |= (DRIVER_SENSE << 24);
 790
 791        SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
 792                                "Notifying upper driver of completion "
 793                                "(result %x)\n", cmd->result));
 794
 795        good_bytes = scsi_bufflen(cmd);
 796        if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
 797                int old_good_bytes = good_bytes;
 798                drv = scsi_cmd_to_driver(cmd);
 799                if (drv->done)
 800                        good_bytes = drv->done(cmd);
 801                /*
 802                 * USB may not give sense identifying bad sector and
 803                 * simply return a residue instead, so subtract off the
 804                 * residue if drv->done() error processing indicates no
 805                 * change to the completion length.
 806                 */
 807                if (good_bytes == old_good_bytes)
 808                        good_bytes -= scsi_get_resid(cmd);
 809        }
 810        scsi_io_completion(cmd, good_bytes);
 811}
 812EXPORT_SYMBOL(scsi_finish_command);
 813
 814/**
 815 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
 816 * @sdev: SCSI Device in question
 817 * @tagged: Do we use tagged queueing (non-0) or do we treat
 818 *          this device as an untagged device (0)
 819 * @tags: Number of tags allowed if tagged queueing enabled,
 820 *        or number of commands the low level driver can
 821 *        queue up in non-tagged mode (as per cmd_per_lun).
 822 *
 823 * Returns:     Nothing
 824 *
 825 * Lock Status: None held on entry
 826 *
 827 * Notes:       Low level drivers may call this at any time and we will do
 828 *              the right thing depending on whether or not the device is
 829 *              currently active and whether or not it even has the
 830 *              command blocks built yet.
 831 */
 832void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
 833{
 834        unsigned long flags;
 835
 836        /*
 837         * refuse to set tagged depth to an unworkable size
 838         */
 839        if (tags <= 0)
 840                return;
 841
 842        spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
 843
 844        /*
 845         * Check to see if the queue is managed by the block layer.
 846         * If it is, and we fail to adjust the depth, exit.
 847         *
 848         * Do not resize the tag map if it is a host wide share bqt,
 849         * because the size should be the hosts's can_queue. If there
 850         * is more IO than the LLD's can_queue (so there are not enuogh
 851         * tags) request_fn's host queue ready check will handle it.
 852         */
 853        if (!sdev->host->bqt) {
 854                if (blk_queue_tagged(sdev->request_queue) &&
 855                    blk_queue_resize_tags(sdev->request_queue, tags) != 0)
 856                        goto out;
 857        }
 858
 859        sdev->queue_depth = tags;
 860        switch (tagged) {
 861                case MSG_ORDERED_TAG:
 862                        sdev->ordered_tags = 1;
 863                        sdev->simple_tags = 1;
 864                        break;
 865                case MSG_SIMPLE_TAG:
 866                        sdev->ordered_tags = 0;
 867                        sdev->simple_tags = 1;
 868                        break;
 869                default:
 870                        sdev_printk(KERN_WARNING, sdev,
 871                                    "scsi_adjust_queue_depth, bad queue type, "
 872                                    "disabled\n");
 873                case 0:
 874                        sdev->ordered_tags = sdev->simple_tags = 0;
 875                        sdev->queue_depth = tags;
 876                        break;
 877        }
 878 out:
 879        spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 880}
 881EXPORT_SYMBOL(scsi_adjust_queue_depth);
 882
 883/**
 884 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
 885 * @sdev: SCSI Device in question
 886 * @depth: Current number of outstanding SCSI commands on this device,
 887 *         not counting the one returned as QUEUE_FULL.
 888 *
 889 * Description: This function will track successive QUEUE_FULL events on a
 890 *              specific SCSI device to determine if and when there is a
 891 *              need to adjust the queue depth on the device.
 892 *
 893 * Returns:     0 - No change needed, >0 - Adjust queue depth to this new depth,
 894 *              -1 - Drop back to untagged operation using host->cmd_per_lun
 895 *                      as the untagged command depth
 896 *
 897 * Lock Status: None held on entry
 898 *
 899 * Notes:       Low level drivers may call this at any time and we will do
 900 *              "The Right Thing."  We are interrupt context safe.
 901 */
 902int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 903{
 904
 905        /*
 906         * Don't let QUEUE_FULLs on the same
 907         * jiffies count, they could all be from
 908         * same event.
 909         */
 910        if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
 911                return 0;
 912
 913        sdev->last_queue_full_time = jiffies;
 914        if (sdev->last_queue_full_depth != depth) {
 915                sdev->last_queue_full_count = 1;
 916                sdev->last_queue_full_depth = depth;
 917        } else {
 918                sdev->last_queue_full_count++;
 919        }
 920
 921        if (sdev->last_queue_full_count <= 10)
 922                return 0;
 923        if (sdev->last_queue_full_depth < 8) {
 924                /* Drop back to untagged */
 925                scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
 926                return -1;
 927        }
 928        
 929        if (sdev->ordered_tags)
 930                scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
 931        else
 932                scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
 933        return depth;
 934}
 935EXPORT_SYMBOL(scsi_track_queue_full);
 936
 937/**
 938 * scsi_vpd_inquiry - Request a device provide us with a VPD page
 939 * @sdev: The device to ask
 940 * @buffer: Where to put the result
 941 * @page: Which Vital Product Data to return
 942 * @len: The length of the buffer
 943 *
 944 * This is an internal helper function.  You probably want to use
 945 * scsi_get_vpd_page instead.
 946 *
 947 * Returns size of the vpd page on success or a negative error number.
 948 */
 949static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
 950                                                        u8 page, unsigned len)
 951{
 952        int result;
 953        unsigned char cmd[16];
 954
 955        if (len < 4)
 956                return -EINVAL;
 957
 958        cmd[0] = INQUIRY;
 959        cmd[1] = 1;             /* EVPD */
 960        cmd[2] = page;
 961        cmd[3] = len >> 8;
 962        cmd[4] = len & 0xff;
 963        cmd[5] = 0;             /* Control byte */
 964
 965        /*
 966         * I'm not convinced we need to try quite this hard to get VPD, but
 967         * all the existing users tried this hard.
 968         */
 969        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
 970                                  len, NULL, 30 * HZ, 3, NULL);
 971        if (result)
 972                return -EIO;
 973
 974        /* Sanity check that we got the page back that we asked for */
 975        if (buffer[1] != page)
 976                return -EIO;
 977
 978        return get_unaligned_be16(&buffer[2]) + 4;
 979}
 980
 981/**
 982 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
 983 * @sdev: The device to ask
 984 * @page: Which Vital Product Data to return
 985 * @buf: where to store the VPD
 986 * @buf_len: number of bytes in the VPD buffer area
 987 *
 988 * SCSI devices may optionally supply Vital Product Data.  Each 'page'
 989 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
 990 * If the device supports this VPD page, this routine returns a pointer
 991 * to a buffer containing the data from that page.  The caller is
 992 * responsible for calling kfree() on this pointer when it is no longer
 993 * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
 994 */
 995int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 996                      int buf_len)
 997{
 998        int i, result;
 999
1000        if (sdev->skip_vpd_pages)
1001                goto fail;
1002
1003        /* Ask for all the pages supported by this device */
1004        result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1005        if (result < 4)
1006                goto fail;
1007
1008        /* If the user actually wanted this page, we can skip the rest */
1009        if (page == 0)
1010                return 0;
1011
1012        for (i = 4; i < min(result, buf_len); i++)
1013                if (buf[i] == page)
1014                        goto found;
1015
1016        if (i < result && i >= buf_len)
1017                /* ran off the end of the buffer, give us benefit of doubt */
1018                goto found;
1019        /* The device claims it doesn't support the requested page */
1020        goto fail;
1021
1022 found:
1023        result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
1024        if (result < 0)
1025                goto fail;
1026
1027        return 0;
1028
1029 fail:
1030        return -EINVAL;
1031}
1032EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1033
1034/**
1035 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
1036 * @sdev: The device to ask
1037 *
1038 * Attach the 'Device Identification' VPD page (0x83) and the
1039 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
1040 * structure. This information can be used to identify the device
1041 * uniquely.
1042 */
1043void scsi_attach_vpd(struct scsi_device *sdev)
1044{
1045        int result, i;
1046        int vpd_len = SCSI_VPD_PG_LEN;
1047        int pg80_supported = 0;
1048        int pg83_supported = 0;
1049        unsigned char *vpd_buf;
1050
1051        if (sdev->skip_vpd_pages)
1052                return;
1053retry_pg0:
1054        vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1055        if (!vpd_buf)
1056                return;
1057
1058        /* Ask for all the pages supported by this device */
1059        result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
1060        if (result < 0) {
1061                kfree(vpd_buf);
1062                return;
1063        }
1064        if (result > vpd_len) {
1065                vpd_len = result;
1066                kfree(vpd_buf);
1067                goto retry_pg0;
1068        }
1069
1070        for (i = 4; i < result; i++) {
1071                if (vpd_buf[i] == 0x80)
1072                        pg80_supported = 1;
1073                if (vpd_buf[i] == 0x83)
1074                        pg83_supported = 1;
1075        }
1076        kfree(vpd_buf);
1077        vpd_len = SCSI_VPD_PG_LEN;
1078
1079        if (pg80_supported) {
1080retry_pg80:
1081                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1082                if (!vpd_buf)
1083                        return;
1084
1085                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1086                if (result < 0) {
1087                        kfree(vpd_buf);
1088                        return;
1089                }
1090                if (result > vpd_len) {
1091                        vpd_len = result;
1092                        kfree(vpd_buf);
1093                        goto retry_pg80;
1094                }
1095                sdev->vpd_pg80_len = result;
1096                sdev->vpd_pg80 = vpd_buf;
1097                vpd_len = SCSI_VPD_PG_LEN;
1098        }
1099
1100        if (pg83_supported) {
1101retry_pg83:
1102                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1103                if (!vpd_buf)
1104                        return;
1105
1106                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1107                if (result < 0) {
1108                        kfree(vpd_buf);
1109                        return;
1110                }
1111                if (result > vpd_len) {
1112                        vpd_len = result;
1113                        kfree(vpd_buf);
1114                        goto retry_pg83;
1115                }
1116                sdev->vpd_pg83_len = result;
1117                sdev->vpd_pg83 = vpd_buf;
1118        }
1119}
1120
1121/**
1122 * scsi_report_opcode - Find out if a given command opcode is supported
1123 * @sdev:       scsi device to query
1124 * @buffer:     scratch buffer (must be at least 20 bytes long)
1125 * @len:        length of buffer
1126 * @opcode:     opcode for command to look up
1127 *
1128 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
1129 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1130 * unsupported and 1 if the device claims to support the command.
1131 */
1132int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1133                       unsigned int len, unsigned char opcode)
1134{
1135        unsigned char cmd[16];
1136        struct scsi_sense_hdr sshdr;
1137        int result;
1138
1139        if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
1140                return -EINVAL;
1141
1142        memset(cmd, 0, 16);
1143        cmd[0] = MAINTENANCE_IN;
1144        cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1145        cmd[2] = 1;             /* One command format */
1146        cmd[3] = opcode;
1147        put_unaligned_be32(len, &cmd[6]);
1148        memset(buffer, 0, len);
1149
1150        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1151                                  &sshdr, 30 * HZ, 3, NULL);
1152
1153        if (result && scsi_sense_valid(&sshdr) &&
1154            sshdr.sense_key == ILLEGAL_REQUEST &&
1155            (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
1156                return -EINVAL;
1157
1158        if ((buffer[1] & 3) == 3) /* Command supported */
1159                return 1;
1160
1161        return 0;
1162}
1163EXPORT_SYMBOL(scsi_report_opcode);
1164
1165/**
1166 * scsi_device_get  -  get an additional reference to a scsi_device
1167 * @sdev:       device to get a reference to
1168 *
1169 * Description: Gets a reference to the scsi_device and increments the use count
1170 * of the underlying LLDD module.  You must hold host_lock of the
1171 * parent Scsi_Host or already have a reference when calling this.
1172 */
1173int scsi_device_get(struct scsi_device *sdev)
1174{
1175        if (sdev->sdev_state == SDEV_DEL)
1176                return -ENXIO;
1177        if (!get_device(&sdev->sdev_gendev))
1178                return -ENXIO;
1179        /* We can fail this if we're doing SCSI operations
1180         * from module exit (like cache flush) */
1181        try_module_get(sdev->host->hostt->module);
1182
1183        return 0;
1184}
1185EXPORT_SYMBOL(scsi_device_get);
1186
1187/**
1188 * scsi_device_put  -  release a reference to a scsi_device
1189 * @sdev:       device to release a reference on.
1190 *
1191 * Description: Release a reference to the scsi_device and decrements the use
1192 * count of the underlying LLDD module.  The device is freed once the last
1193 * user vanishes.
1194 */
1195void scsi_device_put(struct scsi_device *sdev)
1196{
1197#ifdef CONFIG_MODULE_UNLOAD
1198        struct module *module = sdev->host->hostt->module;
1199
1200        /* The module refcount will be zero if scsi_device_get()
1201         * was called from a module removal routine */
1202        if (module && module_refcount(module) != 0)
1203                module_put(module);
1204#endif
1205        put_device(&sdev->sdev_gendev);
1206}
1207EXPORT_SYMBOL(scsi_device_put);
1208
1209/* helper for shost_for_each_device, see that for documentation */
1210struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1211                                           struct scsi_device *prev)
1212{
1213        struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1214        struct scsi_device *next = NULL;
1215        unsigned long flags;
1216
1217        spin_lock_irqsave(shost->host_lock, flags);
1218        while (list->next != &shost->__devices) {
1219                next = list_entry(list->next, struct scsi_device, siblings);
1220                /* skip devices that we can't get a reference to */
1221                if (!scsi_device_get(next))
1222                        break;
1223                next = NULL;
1224                list = list->next;
1225        }
1226        spin_unlock_irqrestore(shost->host_lock, flags);
1227
1228        if (prev)
1229                scsi_device_put(prev);
1230        return next;
1231}
1232EXPORT_SYMBOL(__scsi_iterate_devices);
1233
1234/**
1235 * starget_for_each_device  -  helper to walk all devices of a target
1236 * @starget:    target whose devices we want to iterate over.
1237 * @data:       Opaque passed to each function call.
1238 * @fn:         Function to call on each device
1239 *
1240 * This traverses over each device of @starget.  The devices have
1241 * a reference that must be released by scsi_host_put when breaking
1242 * out of the loop.
1243 */
1244void starget_for_each_device(struct scsi_target *starget, void *data,
1245                     void (*fn)(struct scsi_device *, void *))
1246{
1247        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1248        struct scsi_device *sdev;
1249
1250        shost_for_each_device(sdev, shost) {
1251                if ((sdev->channel == starget->channel) &&
1252                    (sdev->id == starget->id))
1253                        fn(sdev, data);
1254        }
1255}
1256EXPORT_SYMBOL(starget_for_each_device);
1257
1258/**
1259 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
1260 * @starget:    target whose devices we want to iterate over.
1261 * @data:       parameter for callback @fn()
1262 * @fn:         callback function that is invoked for each device
1263 *
1264 * This traverses over each device of @starget.  It does _not_
1265 * take a reference on the scsi_device, so the whole loop must be
1266 * protected by shost->host_lock.
1267 *
1268 * Note:  The only reason why drivers would want to use this is because
1269 * they need to access the device list in irq context.  Otherwise you
1270 * really want to use starget_for_each_device instead.
1271 **/
1272void __starget_for_each_device(struct scsi_target *starget, void *data,
1273                               void (*fn)(struct scsi_device *, void *))
1274{
1275        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1276        struct scsi_device *sdev;
1277
1278        __shost_for_each_device(sdev, shost) {
1279                if ((sdev->channel == starget->channel) &&
1280                    (sdev->id == starget->id))
1281                        fn(sdev, data);
1282        }
1283}
1284EXPORT_SYMBOL(__starget_for_each_device);
1285
1286/**
1287 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1288 * @starget:    SCSI target pointer
1289 * @lun:        SCSI Logical Unit Number
1290 *
1291 * Description: Looks up the scsi_device with the specified @lun for a given
1292 * @starget.  The returned scsi_device does not have an additional
1293 * reference.  You must hold the host's host_lock over this call and
1294 * any access to the returned scsi_device. A scsi_device in state
1295 * SDEV_DEL is skipped.
1296 *
1297 * Note:  The only reason why drivers should use this is because
1298 * they need to access the device list in irq context.  Otherwise you
1299 * really want to use scsi_device_lookup_by_target instead.
1300 **/
1301struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1302                                                   uint lun)
1303{
1304        struct scsi_device *sdev;
1305
1306        list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1307                if (sdev->sdev_state == SDEV_DEL)
1308                        continue;
1309                if (sdev->lun ==lun)
1310                        return sdev;
1311        }
1312
1313        return NULL;
1314}
1315EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1316
1317/**
1318 * scsi_device_lookup_by_target - find a device given the target
1319 * @starget:    SCSI target pointer
1320 * @lun:        SCSI Logical Unit Number
1321 *
1322 * Description: Looks up the scsi_device with the specified @lun for a given
1323 * @starget.  The returned scsi_device has an additional reference that
1324 * needs to be released with scsi_device_put once you're done with it.
1325 **/
1326struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1327                                                 uint lun)
1328{
1329        struct scsi_device *sdev;
1330        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1331        unsigned long flags;
1332
1333        spin_lock_irqsave(shost->host_lock, flags);
1334        sdev = __scsi_device_lookup_by_target(starget, lun);
1335        if (sdev && scsi_device_get(sdev))
1336                sdev = NULL;
1337        spin_unlock_irqrestore(shost->host_lock, flags);
1338
1339        return sdev;
1340}
1341EXPORT_SYMBOL(scsi_device_lookup_by_target);
1342
1343/**
1344 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1345 * @shost:      SCSI host pointer
1346 * @channel:    SCSI channel (zero if only one channel)
1347 * @id:         SCSI target number (physical unit number)
1348 * @lun:        SCSI Logical Unit Number
1349 *
1350 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1351 * for a given host. The returned scsi_device does not have an additional
1352 * reference.  You must hold the host's host_lock over this call and any access
1353 * to the returned scsi_device.
1354 *
1355 * Note:  The only reason why drivers would want to use this is because
1356 * they need to access the device list in irq context.  Otherwise you
1357 * really want to use scsi_device_lookup instead.
1358 **/
1359struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1360                uint channel, uint id, uint lun)
1361{
1362        struct scsi_device *sdev;
1363
1364        list_for_each_entry(sdev, &shost->__devices, siblings) {
1365                if (sdev->channel == channel && sdev->id == id &&
1366                                sdev->lun ==lun)
1367                        return sdev;
1368        }
1369
1370        return NULL;
1371}
1372EXPORT_SYMBOL(__scsi_device_lookup);
1373
1374/**
1375 * scsi_device_lookup - find a device given the host
1376 * @shost:      SCSI host pointer
1377 * @channel:    SCSI channel (zero if only one channel)
1378 * @id:         SCSI target number (physical unit number)
1379 * @lun:        SCSI Logical Unit Number
1380 *
1381 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1382 * for a given host.  The returned scsi_device has an additional reference that
1383 * needs to be released with scsi_device_put once you're done with it.
1384 **/
1385struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1386                uint channel, uint id, uint lun)
1387{
1388        struct scsi_device *sdev;
1389        unsigned long flags;
1390
1391        spin_lock_irqsave(shost->host_lock, flags);
1392        sdev = __scsi_device_lookup(shost, channel, id, lun);
1393        if (sdev && scsi_device_get(sdev))
1394                sdev = NULL;
1395        spin_unlock_irqrestore(shost->host_lock, flags);
1396
1397        return sdev;
1398}
1399EXPORT_SYMBOL(scsi_device_lookup);
1400
1401MODULE_DESCRIPTION("SCSI core");
1402MODULE_LICENSE("GPL");
1403
1404module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1405MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1406
1407static int __init init_scsi(void)
1408{
1409        int error;
1410
1411        error = scsi_init_queue();
1412        if (error)
1413                return error;
1414        error = scsi_init_procfs();
1415        if (error)
1416                goto cleanup_queue;
1417        error = scsi_init_devinfo();
1418        if (error)
1419                goto cleanup_procfs;
1420        error = scsi_init_hosts();
1421        if (error)
1422                goto cleanup_devlist;
1423        error = scsi_init_sysctl();
1424        if (error)
1425                goto cleanup_hosts;
1426        error = scsi_sysfs_register();
1427        if (error)
1428                goto cleanup_sysctl;
1429
1430        scsi_netlink_init();
1431
1432        printk(KERN_NOTICE "SCSI subsystem initialized\n");
1433        return 0;
1434
1435cleanup_sysctl:
1436        scsi_exit_sysctl();
1437cleanup_hosts:
1438        scsi_exit_hosts();
1439cleanup_devlist:
1440        scsi_exit_devinfo();
1441cleanup_procfs:
1442        scsi_exit_procfs();
1443cleanup_queue:
1444        scsi_exit_queue();
1445        printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1446               -error);
1447        return error;
1448}
1449
1450static void __exit exit_scsi(void)
1451{
1452        scsi_netlink_exit();
1453        scsi_sysfs_unregister();
1454        scsi_exit_sysctl();
1455        scsi_exit_hosts();
1456        scsi_exit_devinfo();
1457        scsi_exit_procfs();
1458        scsi_exit_queue();
1459        async_unregister_domain(&scsi_sd_probe_domain);
1460}
1461
1462subsys_initcall(init_scsi);
1463module_exit(exit_scsi);
1464
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.