linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1#ifndef _SCSI_SCSI_HOST_H
   2#define _SCSI_SCSI_HOST_H
   3
   4#include <linux/device.h>
   5#include <linux/list.h>
   6#include <linux/types.h>
   7#include <linux/workqueue.h>
   8#include <linux/mutex.h>
   9#include <scsi/scsi.h>
  10
  11struct request_queue;
  12struct block_device;
  13struct completion;
  14struct module;
  15struct scsi_cmnd;
  16struct scsi_device;
  17struct scsi_target;
  18struct Scsi_Host;
  19struct scsi_host_cmd_pool;
  20struct scsi_transport_template;
  21struct blk_queue_tags;
  22
  23
  24/*
  25 * The various choices mean:
  26 * NONE: Self evident.  Host adapter is not capable of scatter-gather.
  27 * ALL:  Means that the host adapter module can do scatter-gather,
  28 *       and that there is no limit to the size of the table to which
  29 *       we scatter/gather data.  The value we set here is the maximum
  30 *       single element sglist.  To use chained sglists, the adapter
  31 *       has to set a value beyond ALL (and correctly use the chain
  32 *       handling API.
  33 * Anything else:  Indicates the maximum number of chains that can be
  34 *       used in one scatter-gather request.
  35 */
  36#define SG_NONE 0
  37#define SG_ALL  SCSI_MAX_SG_SEGMENTS
  38
  39#define MODE_UNKNOWN 0x00
  40#define MODE_INITIATOR 0x01
  41#define MODE_TARGET 0x02
  42
  43#define DISABLE_CLUSTERING 0
  44#define ENABLE_CLUSTERING 1
  45
  46struct scsi_host_template {
  47        struct module *module;
  48        const char *name;
  49
  50        /*
  51         * Used to initialize old-style drivers.  For new-style drivers
  52         * just perform all work in your module initialization function.
  53         *
  54         * Status:  OBSOLETE
  55         */
  56        int (* detect)(struct scsi_host_template *);
  57
  58        /*
  59         * Used as unload callback for hosts with old-style drivers.
  60         *
  61         * Status: OBSOLETE
  62         */
  63        int (* release)(struct Scsi_Host *);
  64
  65        /*
  66         * The info function will return whatever useful information the
  67         * developer sees fit.  If not provided, then the name field will
  68         * be used instead.
  69         *
  70         * Status: OPTIONAL
  71         */
  72        const char *(* info)(struct Scsi_Host *);
  73
  74        /*
  75         * Ioctl interface
  76         *
  77         * Status: OPTIONAL
  78         */
  79        int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  80
  81
  82#ifdef CONFIG_COMPAT
  83        /* 
  84         * Compat handler. Handle 32bit ABI.
  85         * When unknown ioctl is passed return -ENOIOCTLCMD.
  86         *
  87         * Status: OPTIONAL
  88         */
  89        int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  90#endif
  91
  92        /*
  93         * The queuecommand function is used to queue up a scsi
  94         * command block to the LLDD.  When the driver finished
  95         * processing the command the done callback is invoked.
  96         *
  97         * If queuecommand returns 0, then the HBA has accepted the
  98         * command.  The done() function must be called on the command
  99         * when the driver has finished with it. (you may call done on the
 100         * command before queuecommand returns, but in this case you
 101         * *must* return 0 from queuecommand).
 102         *
 103         * Queuecommand may also reject the command, in which case it may
 104         * not touch the command and must not call done() for it.
 105         *
 106         * There are two possible rejection returns:
 107         *
 108         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
 109         *   allow commands to other devices serviced by this host.
 110         *
 111         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
 112         *   host temporarily.
 113         *
 114         * For compatibility, any other non-zero return is treated the
 115         * same as SCSI_MLQUEUE_HOST_BUSY.
 116         *
 117         * NOTE: "temporarily" means either until the next command for#
 118         * this device/host completes, or a period of time determined by
 119         * I/O pressure in the system if there are no other outstanding
 120         * commands.
 121         *
 122         * STATUS: REQUIRED
 123         */
 124        int (* queuecommand)(struct scsi_cmnd *,
 125                             void (*done)(struct scsi_cmnd *));
 126
 127        /*
 128         * The transfer functions are used to queue a scsi command to
 129         * the LLD. When the driver is finished processing the command
 130         * the done callback is invoked.
 131         *
 132         * This is called to inform the LLD to transfer
 133         * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
 134         * number of scatterlist entried in the command and
 135         * scsi_sglist(cmd) returns the scatterlist.
 136         *
 137         * return values: see queuecommand
 138         *
 139         * If the LLD accepts the cmd, it should set the result to an
 140         * appropriate value when completed before calling the done function.
 141         *
 142         * STATUS: REQUIRED FOR TARGET DRIVERS
 143         */
 144        /* TODO: rename */
 145        int (* transfer_response)(struct scsi_cmnd *,
 146                                  void (*done)(struct scsi_cmnd *));
 147
 148        /*
 149         * This is an error handling strategy routine.  You don't need to
 150         * define one of these if you don't want to - there is a default
 151         * routine that is present that should work in most cases.  For those
 152         * driver authors that have the inclination and ability to write their
 153         * own strategy routine, this is where it is specified.  Note - the
 154         * strategy routine is *ALWAYS* run in the context of the kernel eh
 155         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 156         * handler when you execute this, and you are also guaranteed to
 157         * *NOT* have any other commands being queued while you are in the
 158         * strategy routine. When you return from this function, operations
 159         * return to normal.
 160         *
 161         * See scsi_error.c scsi_unjam_host for additional comments about
 162         * what this function should and should not be attempting to do.
 163         *
 164         * Status: REQUIRED     (at least one of them)
 165         */
 166        int (* eh_abort_handler)(struct scsi_cmnd *);
 167        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 168        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 169        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 170        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 171
 172        /*
 173         * Before the mid layer attempts to scan for a new device where none
 174         * currently exists, it will call this entry in your driver.  Should
 175         * your driver need to allocate any structs or perform any other init
 176         * items in order to send commands to a currently unused target/lun
 177         * combo, then this is where you can perform those allocations.  This
 178         * is specifically so that drivers won't have to perform any kind of
 179         * "is this a new device" checks in their queuecommand routine,
 180         * thereby making the hot path a bit quicker.
 181         *
 182         * Return values: 0 on success, non-0 on failure
 183         *
 184         * Deallocation:  If we didn't find any devices at this ID, you will
 185         * get an immediate call to slave_destroy().  If we find something
 186         * here then you will get a call to slave_configure(), then the
 187         * device will be used for however long it is kept around, then when
 188         * the device is removed from the system (or * possibly at reboot
 189         * time), you will then get a call to slave_destroy().  This is
 190         * assuming you implement slave_configure and slave_destroy.
 191         * However, if you allocate memory and hang it off the device struct,
 192         * then you must implement the slave_destroy() routine at a minimum
 193         * in order to avoid leaking memory
 194         * each time a device is tore down.
 195         *
 196         * Status: OPTIONAL
 197         */
 198        int (* slave_alloc)(struct scsi_device *);
 199
 200        /*
 201         * Once the device has responded to an INQUIRY and we know the
 202         * device is online, we call into the low level driver with the
 203         * struct scsi_device *.  If the low level device driver implements
 204         * this function, it *must* perform the task of setting the queue
 205         * depth on the device.  All other tasks are optional and depend
 206         * on what the driver supports and various implementation details.
 207         * 
 208         * Things currently recommended to be handled at this time include:
 209         *
 210         * 1.  Setting the device queue depth.  Proper setting of this is
 211         *     described in the comments for scsi_adjust_queue_depth.
 212         * 2.  Determining if the device supports the various synchronous
 213         *     negotiation protocols.  The device struct will already have
 214         *     responded to INQUIRY and the results of the standard items
 215         *     will have been shoved into the various device flag bits, eg.
 216         *     device->sdtr will be true if the device supports SDTR messages.
 217         * 3.  Allocating command structs that the device will need.
 218         * 4.  Setting the default timeout on this device (if needed).
 219         * 5.  Anything else the low level driver might want to do on a device
 220         *     specific setup basis...
 221         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 222         *     as offline on error so that no access will occur.  If you return
 223         *     non-0, your slave_destroy routine will never get called for this
 224         *     device, so don't leave any loose memory hanging around, clean
 225         *     up after yourself before returning non-0
 226         *
 227         * Status: OPTIONAL
 228         */
 229        int (* slave_configure)(struct scsi_device *);
 230
 231        /*
 232         * Immediately prior to deallocating the device and after all activity
 233         * has ceased the mid layer calls this point so that the low level
 234         * driver may completely detach itself from the scsi device and vice
 235         * versa.  The low level driver is responsible for freeing any memory
 236         * it allocated in the slave_alloc or slave_configure calls. 
 237         *
 238         * Status: OPTIONAL
 239         */
 240        void (* slave_destroy)(struct scsi_device *);
 241
 242        /*
 243         * Before the mid layer attempts to scan for a new device attached
 244         * to a target where no target currently exists, it will call this
 245         * entry in your driver.  Should your driver need to allocate any
 246         * structs or perform any other init items in order to send commands
 247         * to a currently unused target, then this is where you can perform
 248         * those allocations.
 249         *
 250         * Return values: 0 on success, non-0 on failure
 251         *
 252         * Status: OPTIONAL
 253         */
 254        int (* target_alloc)(struct scsi_target *);
 255
 256        /*
 257         * Immediately prior to deallocating the target structure, and
 258         * after all activity to attached scsi devices has ceased, the
 259         * midlayer calls this point so that the driver may deallocate
 260         * and terminate any references to the target.
 261         *
 262         * Status: OPTIONAL
 263         */
 264        void (* target_destroy)(struct scsi_target *);
 265
 266        /*
 267         * If a host has the ability to discover targets on its own instead
 268         * of scanning the entire bus, it can fill in this function and
 269         * call scsi_scan_host().  This function will be called periodically
 270         * until it returns 1 with the scsi_host and the elapsed time of
 271         * the scan in jiffies.
 272         *
 273         * Status: OPTIONAL
 274         */
 275        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 276
 277        /*
 278         * If the host wants to be called before the scan starts, but
 279         * after the midlayer has set up ready for the scan, it can fill
 280         * in this function.
 281         *
 282         * Status: OPTIONAL
 283         */
 284        void (* scan_start)(struct Scsi_Host *);
 285
 286        /*
 287         * Fill in this function to allow the queue depth of this host
 288         * to be changeable (on a per device basis).  Returns either
 289         * the current queue depth setting (may be different from what
 290         * was passed in) or an error.  An error should only be
 291         * returned if the requested depth is legal but the driver was
 292         * unable to set it.  If the requested depth is illegal, the
 293         * driver should set and return the closest legal queue depth.
 294         *
 295         * Status: OPTIONAL
 296         */
 297        int (* change_queue_depth)(struct scsi_device *, int);
 298
 299        /*
 300         * Fill in this function to allow the changing of tag types
 301         * (this also allows the enabling/disabling of tag command
 302         * queueing).  An error should only be returned if something
 303         * went wrong in the driver while trying to set the tag type.
 304         * If the driver doesn't support the requested tag type, then
 305         * it should set the closest type it does support without
 306         * returning an error.  Returns the actual tag type set.
 307         *
 308         * Status: OPTIONAL
 309         */
 310        int (* change_queue_type)(struct scsi_device *, int);
 311
 312        /*
 313         * This function determines the BIOS parameters for a given
 314         * harddisk.  These tend to be numbers that are made up by
 315         * the host adapter.  Parameters:
 316         * size, device, list (heads, sectors, cylinders)
 317         *
 318         * Status: OPTIONAL
 319         */
 320        int (* bios_param)(struct scsi_device *, struct block_device *,
 321                        sector_t, int []);
 322
 323        /*
 324         * Can be used to export driver statistics and other infos to the
 325         * world outside the kernel ie. userspace and it also provides an
 326         * interface to feed the driver with information.
 327         *
 328         * Status: OBSOLETE
 329         */
 330        int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
 331
 332        /*
 333         * This is an optional routine that allows the transport to become
 334         * involved when a scsi io timer fires. The return value tells the
 335         * timer routine how to finish the io timeout handling:
 336         * EH_HANDLED:          I fixed the error, please complete the command
 337         * EH_RESET_TIMER:      I need more time, reset the timer and
 338         *                      begin counting again
 339         * EH_NOT_HANDLED       Begin normal error recovery
 340         *
 341         * Status: OPTIONAL
 342         */
 343        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 344
 345        /*
 346         * Name of proc directory
 347         */
 348        const char *proc_name;
 349
 350        /*
 351         * Used to store the procfs directory if a driver implements the
 352         * proc_info method.
 353         */
 354        struct proc_dir_entry *proc_dir;
 355
 356        /*
 357         * This determines if we will use a non-interrupt driven
 358         * or an interrupt driven scheme.  It is set to the maximum number
 359         * of simultaneous commands a given host adapter will accept.
 360         */
 361        int can_queue;
 362
 363        /*
 364         * In many instances, especially where disconnect / reconnect are
 365         * supported, our host also has an ID on the SCSI bus.  If this is
 366         * the case, then it must be reserved.  Please set this_id to -1 if
 367         * your setup is in single initiator mode, and the host lacks an
 368         * ID.
 369         */
 370        int this_id;
 371
 372        /*
 373         * This determines the degree to which the host adapter is capable
 374         * of scatter-gather.
 375         */
 376        unsigned short sg_tablesize;
 377
 378        /*
 379         * Set this if the host adapter has limitations beside segment count.
 380         */
 381        unsigned short max_sectors;
 382
 383        /*
 384         * DMA scatter gather segment boundary limit. A segment crossing this
 385         * boundary will be split in two.
 386         */
 387        unsigned long dma_boundary;
 388
 389        /*
 390         * This specifies "machine infinity" for host templates which don't
 391         * limit the transfer size.  Note this limit represents an absolute
 392         * maximum, and may be over the transfer limits allowed for
 393         * individual devices (e.g. 256 for SCSI-1).
 394         */
 395#define SCSI_DEFAULT_MAX_SECTORS        1024
 396
 397        /*
 398         * True if this host adapter can make good use of linked commands.
 399         * This will allow more than one command to be queued to a given
 400         * unit on a given host.  Set this to the maximum number of command
 401         * blocks to be provided for each device.  Set this to 1 for one
 402         * command block per lun, 2 for two, etc.  Do not set this to 0.
 403         * You should make sure that the host adapter will do the right thing
 404         * before you try setting this above 1.
 405         */
 406        short cmd_per_lun;
 407
 408        /*
 409         * present contains counter indicating how many boards of this
 410         * type were found when we did the scan.
 411         */
 412        unsigned char present;
 413
 414        /*
 415         * This specifies the mode that a LLD supports.
 416         */
 417        unsigned supported_mode:2;
 418
 419        /*
 420         * True if this host adapter uses unchecked DMA onto an ISA bus.
 421         */
 422        unsigned unchecked_isa_dma:1;
 423
 424        /*
 425         * True if this host adapter can make good use of clustering.
 426         * I originally thought that if the tablesize was large that it
 427         * was a waste of CPU cycles to prepare a cluster list, but
 428         * it works out that the Buslogic is faster if you use a smaller
 429         * number of segments (i.e. use clustering).  I guess it is
 430         * inefficient.
 431         */
 432        unsigned use_clustering:1;
 433
 434        /*
 435         * True for emulated SCSI host adapters (e.g. ATAPI).
 436         */
 437        unsigned emulated:1;
 438
 439        /*
 440         * True if the low-level driver performs its own reset-settle delays.
 441         */
 442        unsigned skip_settle_delay:1;
 443
 444        /*
 445         * True if we are using ordered write support.
 446         */
 447        unsigned ordered_tag:1;
 448
 449        /*
 450         * Countdown for host blocking with no commands outstanding.
 451         */
 452        unsigned int max_host_blocked;
 453
 454        /*
 455         * Default value for the blocking.  If the queue is empty,
 456         * host_blocked counts down in the request_fn until it restarts
 457         * host operations as zero is reached.  
 458         *
 459         * FIXME: This should probably be a value in the template
 460         */
 461#define SCSI_DEFAULT_HOST_BLOCKED       7
 462
 463        /*
 464         * Pointer to the sysfs class properties for this host, NULL terminated.
 465         */
 466        struct device_attribute **shost_attrs;
 467
 468        /*
 469         * Pointer to the SCSI device properties for this host, NULL terminated.
 470         */
 471        struct device_attribute **sdev_attrs;
 472
 473        /*
 474         * List of hosts per template.
 475         *
 476         * This is only for use by scsi_module.c for legacy templates.
 477         * For these access to it is synchronized implicitly by
 478         * module_init/module_exit.
 479         */
 480        struct list_head legacy_hosts;
 481};
 482
 483/*
 484 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 485 * (for the ascii descriptions) and the state model enforcer:
 486 * scsi_host_set_state()
 487 */
 488enum scsi_host_state {
 489        SHOST_CREATED = 1,
 490        SHOST_RUNNING,
 491        SHOST_CANCEL,
 492        SHOST_DEL,
 493        SHOST_RECOVERY,
 494        SHOST_CANCEL_RECOVERY,
 495        SHOST_DEL_RECOVERY,
 496};
 497
 498struct Scsi_Host {
 499        /*
 500         * __devices is protected by the host_lock, but you should
 501         * usually use scsi_device_lookup / shost_for_each_device
 502         * to access it and don't care about locking yourself.
 503         * In the rare case of beeing in irq context you can use
 504         * their __ prefixed variants with the lock held. NEVER
 505         * access this list directly from a driver.
 506         */
 507        struct list_head        __devices;
 508        struct list_head        __targets;
 509        
 510        struct scsi_host_cmd_pool *cmd_pool;
 511        spinlock_t              free_list_lock;
 512        struct list_head        free_list; /* backup store of cmd structs */
 513        struct list_head        starved_list;
 514
 515        spinlock_t              default_lock;
 516        spinlock_t              *host_lock;
 517
 518        struct mutex            scan_mutex;/* serialize scanning activity */
 519
 520        struct list_head        eh_cmd_q;
 521        struct task_struct    * ehandler;  /* Error recovery thread. */
 522        struct completion     * eh_action; /* Wait for specific actions on the
 523                                              host. */
 524        wait_queue_head_t       host_wait;
 525        struct scsi_host_template *hostt;
 526        struct scsi_transport_template *transportt;
 527
 528        /*
 529         * Area to keep a shared tag map (if needed, will be
 530         * NULL if not).
 531         */
 532        struct blk_queue_tag    *bqt;
 533
 534        /*
 535         * The following two fields are protected with host_lock;
 536         * however, eh routines can safely access during eh processing
 537         * without acquiring the lock.
 538         */
 539        unsigned int host_busy;            /* commands actually active on low-level */
 540        unsigned int host_failed;          /* commands that failed. */
 541        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 542    
 543        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 544        int resetting; /* if set, it means that last_reset is a valid value */
 545        unsigned long last_reset;
 546
 547        /*
 548         * These three parameters can be used to allow for wide scsi,
 549         * and for host adapters that support multiple busses
 550         * The first two should be set to 1 more than the actual max id
 551         * or lun (i.e. 8 for normal systems).
 552         */
 553        unsigned int max_id;
 554        unsigned int max_lun;
 555        unsigned int max_channel;
 556
 557        /*
 558         * This is a unique identifier that must be assigned so that we
 559         * have some way of identifying each detected host adapter properly
 560         * and uniquely.  For hosts that do not support more than one card
 561         * in the system at one time, this does not need to be set.  It is
 562         * initialized to 0 in scsi_register.
 563         */
 564        unsigned int unique_id;
 565
 566        /*
 567         * The maximum length of SCSI commands that this host can accept.
 568         * Probably 12 for most host adapters, but could be 16 for others.
 569         * or 260 if the driver supports variable length cdbs.
 570         * For drivers that don't set this field, a value of 12 is
 571         * assumed.
 572         */
 573        unsigned short max_cmd_len;
 574
 575        int this_id;
 576        int can_queue;
 577        short cmd_per_lun;
 578        short unsigned int sg_tablesize;
 579        short unsigned int max_sectors;
 580        unsigned long dma_boundary;
 581        /* 
 582         * Used to assign serial numbers to the cmds.
 583         * Protected by the host lock.
 584         */
 585        unsigned long cmd_serial_number;
 586        
 587        unsigned active_mode:2;
 588        unsigned unchecked_isa_dma:1;
 589        unsigned use_clustering:1;
 590        unsigned use_blk_tcq:1;
 591
 592        /*
 593         * Host has requested that no further requests come through for the
 594         * time being.
 595         */
 596        unsigned host_self_blocked:1;
 597    
 598        /*
 599         * Host uses correct SCSI ordering not PC ordering. The bit is
 600         * set for the minority of drivers whose authors actually read
 601         * the spec ;).
 602         */
 603        unsigned reverse_ordering:1;
 604
 605        /*
 606         * Ordered write support
 607         */
 608        unsigned ordered_tag:1;
 609
 610        /* Task mgmt function in progress */
 611        unsigned tmf_in_progress:1;
 612
 613        /* Asynchronous scan in progress */
 614        unsigned async_scan:1;
 615
 616        /*
 617         * Optional work queue to be utilized by the transport
 618         */
 619        char work_q_name[20];
 620        struct workqueue_struct *work_q;
 621
 622        /*
 623         * Host has rejected a command because it was busy.
 624         */
 625        unsigned int host_blocked;
 626
 627        /*
 628         * Value host_blocked counts down from
 629         */
 630        unsigned int max_host_blocked;
 631
 632        /* Protection Information */
 633        unsigned int prot_capabilities;
 634        unsigned char prot_guard_type;
 635
 636        /*
 637         * q used for scsi_tgt msgs, async events or any other requests that
 638         * need to be processed in userspace
 639         */
 640        struct request_queue *uspace_req_q;
 641
 642        /* legacy crap */
 643        unsigned long base;
 644        unsigned long io_port;
 645        unsigned char n_io_port;
 646        unsigned char dma_channel;
 647        unsigned int  irq;
 648        
 649
 650        enum scsi_host_state shost_state;
 651
 652        /* ldm bits */
 653        struct device           shost_gendev, shost_dev;
 654
 655        /*
 656         * List of hosts per template.
 657         *
 658         * This is only for use by scsi_module.c for legacy templates.
 659         * For these access to it is synchronized implicitly by
 660         * module_init/module_exit.
 661         */
 662        struct list_head sht_legacy_list;
 663
 664        /*
 665         * Points to the transport data (if any) which is allocated
 666         * separately
 667         */
 668        void *shost_data;
 669
 670        /*
 671         * We should ensure that this is aligned, both for better performance
 672         * and also because some compilers (m68k) don't automatically force
 673         * alignment to a long boundary.
 674         */
 675        unsigned long hostdata[0]  /* Used for storage of host specific stuff */
 676                __attribute__ ((aligned (sizeof(unsigned long))));
 677};
 678
 679#define         class_to_shost(d)       \
 680        container_of(d, struct Scsi_Host, shost_dev)
 681
 682#define shost_printk(prefix, shost, fmt, a...)  \
 683        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 684
 685static inline void *shost_priv(struct Scsi_Host *shost)
 686{
 687        return (void *)shost->hostdata;
 688}
 689
 690int scsi_is_host_device(const struct device *);
 691
 692static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 693{
 694        while (!scsi_is_host_device(dev)) {
 695                if (!dev->parent)
 696                        return NULL;
 697                dev = dev->parent;
 698        }
 699        return container_of(dev, struct Scsi_Host, shost_gendev);
 700}
 701
 702static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 703{
 704        return shost->shost_state == SHOST_RECOVERY ||
 705                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 706                shost->shost_state == SHOST_DEL_RECOVERY ||
 707                shost->tmf_in_progress;
 708}
 709
 710extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 711extern void scsi_flush_work(struct Scsi_Host *);
 712
 713extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 714extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
 715extern void scsi_scan_host(struct Scsi_Host *);
 716extern void scsi_rescan_device(struct device *);
 717extern void scsi_remove_host(struct Scsi_Host *);
 718extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 719extern void scsi_host_put(struct Scsi_Host *t);
 720extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 721extern const char *scsi_host_state_name(enum scsi_host_state);
 722
 723extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
 724
 725static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 726{
 727        return shost->shost_gendev.parent;
 728}
 729
 730/**
 731 * scsi_host_scan_allowed - Is scanning of this host allowed
 732 * @shost:      Pointer to Scsi_Host.
 733 **/
 734static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 735{
 736        return shost->shost_state == SHOST_RUNNING;
 737}
 738
 739extern void scsi_unblock_requests(struct Scsi_Host *);
 740extern void scsi_block_requests(struct Scsi_Host *);
 741
 742struct class_container;
 743
 744extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
 745                                                void (*) (struct request_queue *));
 746/*
 747 * These two functions are used to allocate and free a pseudo device
 748 * which will connect to the host adapter itself rather than any
 749 * physical device.  You must deallocate when you are done with the
 750 * thing.  This physical pseudo-device isn't real and won't be available
 751 * from any high-level drivers.
 752 */
 753extern void scsi_free_host_dev(struct scsi_device *);
 754extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
 755
 756/*
 757 * DIF defines the exchange of protection information between
 758 * initiator and SBC block device.
 759 *
 760 * DIX defines the exchange of protection information between OS and
 761 * initiator.
 762 */
 763enum scsi_host_prot_capabilities {
 764        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 765        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 766        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 767
 768        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 769        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 770        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 771        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 772};
 773
 774/*
 775 * SCSI hosts which support the Data Integrity Extensions must
 776 * indicate their capabilities by setting the prot_capabilities using
 777 * this call.
 778 */
 779static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 780{
 781        shost->prot_capabilities = mask;
 782}
 783
 784static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 785{
 786        return shost->prot_capabilities;
 787}
 788
 789static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 790{
 791        switch (target_type) {
 792        case 1: return shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION;
 793        case 2: return shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION;
 794        case 3: return shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION;
 795        }
 796
 797        return 0;
 798}
 799
 800static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 801{
 802        switch (target_type) {
 803        case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION;
 804        case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION;
 805        case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION;
 806        case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION;
 807        }
 808
 809        return 0;
 810}
 811
 812/*
 813 * All DIX-capable initiators must support the T10-mandated CRC
 814 * checksum.  Controllers can optionally implement the IP checksum
 815 * scheme which has much lower impact on system performance.  Note
 816 * that the main rationale for the checksum is to match integrity
 817 * metadata with data.  Detecting bit errors are a job for ECC memory
 818 * and buses.
 819 */
 820
 821enum scsi_host_guard_type {
 822        SHOST_DIX_GUARD_CRC = 1 << 0,
 823        SHOST_DIX_GUARD_IP  = 1 << 1,
 824};
 825
 826static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 827{
 828        shost->prot_guard_type = type;
 829}
 830
 831static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 832{
 833        return shost->prot_guard_type;
 834}
 835
 836/* legacy interfaces */
 837extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
 838extern void scsi_unregister(struct Scsi_Host *);
 839extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 840
 841#endif /* _SCSI_SCSI_HOST_H */
 842