linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1#ifndef _SCSI_SCSI_HOST_H
   2#define _SCSI_SCSI_HOST_H
   3
   4#include <linux/device.h>
   5#include <linux/list.h>
   6#include <linux/types.h>
   7#include <linux/workqueue.h>
   8#include <linux/mutex.h>
   9#include <scsi/scsi.h>
  10
  11struct request_queue;
  12struct block_device;
  13struct completion;
  14struct module;
  15struct scsi_cmnd;
  16struct scsi_device;
  17struct scsi_target;
  18struct Scsi_Host;
  19struct scsi_host_cmd_pool;
  20struct scsi_transport_template;
  21struct blk_queue_tags;
  22
  23
  24/*
  25 * The various choices mean:
  26 * NONE: Self evident.  Host adapter is not capable of scatter-gather.
  27 * ALL:  Means that the host adapter module can do scatter-gather,
  28 *       and that there is no limit to the size of the table to which
  29 *       we scatter/gather data.  The value we set here is the maximum
  30 *       single element sglist.  To use chained sglists, the adapter
  31 *       has to set a value beyond ALL (and correctly use the chain
  32 *       handling API.
  33 * Anything else:  Indicates the maximum number of chains that can be
  34 *       used in one scatter-gather request.
  35 */
  36#define SG_NONE 0
  37#define SG_ALL  SCSI_MAX_SG_SEGMENTS
  38
  39#define MODE_UNKNOWN 0x00
  40#define MODE_INITIATOR 0x01
  41#define MODE_TARGET 0x02
  42
  43#define DISABLE_CLUSTERING 0
  44#define ENABLE_CLUSTERING 1
  45
  46enum {
  47        SCSI_QDEPTH_DEFAULT,    /* default requested change, e.g. from sysfs */
  48        SCSI_QDEPTH_QFULL,      /* scsi-ml requested due to queue full */
  49        SCSI_QDEPTH_RAMP_UP,    /* scsi-ml requested due to threshold event */
  50};
  51
  52struct scsi_host_template {
  53        struct module *module;
  54        const char *name;
  55
  56        /*
  57         * Used to initialize old-style drivers.  For new-style drivers
  58         * just perform all work in your module initialization function.
  59         *
  60         * Status:  OBSOLETE
  61         */
  62        int (* detect)(struct scsi_host_template *);
  63
  64        /*
  65         * Used as unload callback for hosts with old-style drivers.
  66         *
  67         * Status: OBSOLETE
  68         */
  69        int (* release)(struct Scsi_Host *);
  70
  71        /*
  72         * The info function will return whatever useful information the
  73         * developer sees fit.  If not provided, then the name field will
  74         * be used instead.
  75         *
  76         * Status: OPTIONAL
  77         */
  78        const char *(* info)(struct Scsi_Host *);
  79
  80        /*
  81         * Ioctl interface
  82         *
  83         * Status: OPTIONAL
  84         */
  85        int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  86
  87
  88#ifdef CONFIG_COMPAT
  89        /* 
  90         * Compat handler. Handle 32bit ABI.
  91         * When unknown ioctl is passed return -ENOIOCTLCMD.
  92         *
  93         * Status: OPTIONAL
  94         */
  95        int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  96#endif
  97
  98        /*
  99         * The queuecommand function is used to queue up a scsi
 100         * command block to the LLDD.  When the driver finished
 101         * processing the command the done callback is invoked.
 102         *
 103         * If queuecommand returns 0, then the HBA has accepted the
 104         * command.  The done() function must be called on the command
 105         * when the driver has finished with it. (you may call done on the
 106         * command before queuecommand returns, but in this case you
 107         * *must* return 0 from queuecommand).
 108         *
 109         * Queuecommand may also reject the command, in which case it may
 110         * not touch the command and must not call done() for it.
 111         *
 112         * There are two possible rejection returns:
 113         *
 114         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
 115         *   allow commands to other devices serviced by this host.
 116         *
 117         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
 118         *   host temporarily.
 119         *
 120         * For compatibility, any other non-zero return is treated the
 121         * same as SCSI_MLQUEUE_HOST_BUSY.
 122         *
 123         * NOTE: "temporarily" means either until the next command for#
 124         * this device/host completes, or a period of time determined by
 125         * I/O pressure in the system if there are no other outstanding
 126         * commands.
 127         *
 128         * STATUS: REQUIRED
 129         */
 130        int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
 131
 132        /*
 133         * The transfer functions are used to queue a scsi command to
 134         * the LLD. When the driver is finished processing the command
 135         * the done callback is invoked.
 136         *
 137         * This is called to inform the LLD to transfer
 138         * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
 139         * number of scatterlist entried in the command and
 140         * scsi_sglist(cmd) returns the scatterlist.
 141         *
 142         * return values: see queuecommand
 143         *
 144         * If the LLD accepts the cmd, it should set the result to an
 145         * appropriate value when completed before calling the done function.
 146         *
 147         * STATUS: REQUIRED FOR TARGET DRIVERS
 148         */
 149        /* TODO: rename */
 150        int (* transfer_response)(struct scsi_cmnd *,
 151                                  void (*done)(struct scsi_cmnd *));
 152
 153        /*
 154         * This is an error handling strategy routine.  You don't need to
 155         * define one of these if you don't want to - there is a default
 156         * routine that is present that should work in most cases.  For those
 157         * driver authors that have the inclination and ability to write their
 158         * own strategy routine, this is where it is specified.  Note - the
 159         * strategy routine is *ALWAYS* run in the context of the kernel eh
 160         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 161         * handler when you execute this, and you are also guaranteed to
 162         * *NOT* have any other commands being queued while you are in the
 163         * strategy routine. When you return from this function, operations
 164         * return to normal.
 165         *
 166         * See scsi_error.c scsi_unjam_host for additional comments about
 167         * what this function should and should not be attempting to do.
 168         *
 169         * Status: REQUIRED     (at least one of them)
 170         */
 171        int (* eh_abort_handler)(struct scsi_cmnd *);
 172        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 173        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 174        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 175        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 176
 177        /*
 178         * Before the mid layer attempts to scan for a new device where none
 179         * currently exists, it will call this entry in your driver.  Should
 180         * your driver need to allocate any structs or perform any other init
 181         * items in order to send commands to a currently unused target/lun
 182         * combo, then this is where you can perform those allocations.  This
 183         * is specifically so that drivers won't have to perform any kind of
 184         * "is this a new device" checks in their queuecommand routine,
 185         * thereby making the hot path a bit quicker.
 186         *
 187         * Return values: 0 on success, non-0 on failure
 188         *
 189         * Deallocation:  If we didn't find any devices at this ID, you will
 190         * get an immediate call to slave_destroy().  If we find something
 191         * here then you will get a call to slave_configure(), then the
 192         * device will be used for however long it is kept around, then when
 193         * the device is removed from the system (or * possibly at reboot
 194         * time), you will then get a call to slave_destroy().  This is
 195         * assuming you implement slave_configure and slave_destroy.
 196         * However, if you allocate memory and hang it off the device struct,
 197         * then you must implement the slave_destroy() routine at a minimum
 198         * in order to avoid leaking memory
 199         * each time a device is tore down.
 200         *
 201         * Status: OPTIONAL
 202         */
 203        int (* slave_alloc)(struct scsi_device *);
 204
 205        /*
 206         * Once the device has responded to an INQUIRY and we know the
 207         * device is online, we call into the low level driver with the
 208         * struct scsi_device *.  If the low level device driver implements
 209         * this function, it *must* perform the task of setting the queue
 210         * depth on the device.  All other tasks are optional and depend
 211         * on what the driver supports and various implementation details.
 212         * 
 213         * Things currently recommended to be handled at this time include:
 214         *
 215         * 1.  Setting the device queue depth.  Proper setting of this is
 216         *     described in the comments for scsi_adjust_queue_depth.
 217         * 2.  Determining if the device supports the various synchronous
 218         *     negotiation protocols.  The device struct will already have
 219         *     responded to INQUIRY and the results of the standard items
 220         *     will have been shoved into the various device flag bits, eg.
 221         *     device->sdtr will be true if the device supports SDTR messages.
 222         * 3.  Allocating command structs that the device will need.
 223         * 4.  Setting the default timeout on this device (if needed).
 224         * 5.  Anything else the low level driver might want to do on a device
 225         *     specific setup basis...
 226         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 227         *     as offline on error so that no access will occur.  If you return
 228         *     non-0, your slave_destroy routine will never get called for this
 229         *     device, so don't leave any loose memory hanging around, clean
 230         *     up after yourself before returning non-0
 231         *
 232         * Status: OPTIONAL
 233         */
 234        int (* slave_configure)(struct scsi_device *);
 235
 236        /*
 237         * Immediately prior to deallocating the device and after all activity
 238         * has ceased the mid layer calls this point so that the low level
 239         * driver may completely detach itself from the scsi device and vice
 240         * versa.  The low level driver is responsible for freeing any memory
 241         * it allocated in the slave_alloc or slave_configure calls. 
 242         *
 243         * Status: OPTIONAL
 244         */
 245        void (* slave_destroy)(struct scsi_device *);
 246
 247        /*
 248         * Before the mid layer attempts to scan for a new device attached
 249         * to a target where no target currently exists, it will call this
 250         * entry in your driver.  Should your driver need to allocate any
 251         * structs or perform any other init items in order to send commands
 252         * to a currently unused target, then this is where you can perform
 253         * those allocations.
 254         *
 255         * Return values: 0 on success, non-0 on failure
 256         *
 257         * Status: OPTIONAL
 258         */
 259        int (* target_alloc)(struct scsi_target *);
 260
 261        /*
 262         * Immediately prior to deallocating the target structure, and
 263         * after all activity to attached scsi devices has ceased, the
 264         * midlayer calls this point so that the driver may deallocate
 265         * and terminate any references to the target.
 266         *
 267         * Status: OPTIONAL
 268         */
 269        void (* target_destroy)(struct scsi_target *);
 270
 271        /*
 272         * If a host has the ability to discover targets on its own instead
 273         * of scanning the entire bus, it can fill in this function and
 274         * call scsi_scan_host().  This function will be called periodically
 275         * until it returns 1 with the scsi_host and the elapsed time of
 276         * the scan in jiffies.
 277         *
 278         * Status: OPTIONAL
 279         */
 280        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 281
 282        /*
 283         * If the host wants to be called before the scan starts, but
 284         * after the midlayer has set up ready for the scan, it can fill
 285         * in this function.
 286         *
 287         * Status: OPTIONAL
 288         */
 289        void (* scan_start)(struct Scsi_Host *);
 290
 291        /*
 292         * Fill in this function to allow the queue depth of this host
 293         * to be changeable (on a per device basis).  Returns either
 294         * the current queue depth setting (may be different from what
 295         * was passed in) or an error.  An error should only be
 296         * returned if the requested depth is legal but the driver was
 297         * unable to set it.  If the requested depth is illegal, the
 298         * driver should set and return the closest legal queue depth.
 299         *
 300         * Status: OPTIONAL
 301         */
 302        int (* change_queue_depth)(struct scsi_device *, int, int);
 303
 304        /*
 305         * Fill in this function to allow the changing of tag types
 306         * (this also allows the enabling/disabling of tag command
 307         * queueing).  An error should only be returned if something
 308         * went wrong in the driver while trying to set the tag type.
 309         * If the driver doesn't support the requested tag type, then
 310         * it should set the closest type it does support without
 311         * returning an error.  Returns the actual tag type set.
 312         *
 313         * Status: OPTIONAL
 314         */
 315        int (* change_queue_type)(struct scsi_device *, int);
 316
 317        /*
 318         * This function determines the BIOS parameters for a given
 319         * harddisk.  These tend to be numbers that are made up by
 320         * the host adapter.  Parameters:
 321         * size, device, list (heads, sectors, cylinders)
 322         *
 323         * Status: OPTIONAL
 324         */
 325        int (* bios_param)(struct scsi_device *, struct block_device *,
 326                        sector_t, int []);
 327
 328        /*
 329         * This function is called when one or more partitions on the
 330         * device reach beyond the end of the device.
 331         *
 332         * Status: OPTIONAL
 333         */
 334        void (*unlock_native_capacity)(struct scsi_device *);
 335
 336        /*
 337         * Can be used to export driver statistics and other infos to the
 338         * world outside the kernel ie. userspace and it also provides an
 339         * interface to feed the driver with information.
 340         *
 341         * Status: OBSOLETE
 342         */
 343        int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
 344
 345        /*
 346         * This is an optional routine that allows the transport to become
 347         * involved when a scsi io timer fires. The return value tells the
 348         * timer routine how to finish the io timeout handling:
 349         * EH_HANDLED:          I fixed the error, please complete the command
 350         * EH_RESET_TIMER:      I need more time, reset the timer and
 351         *                      begin counting again
 352         * EH_NOT_HANDLED       Begin normal error recovery
 353         *
 354         * Status: OPTIONAL
 355         */
 356        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 357
 358        /* This is an optional routine that allows transport to initiate
 359         * LLD adapter or firmware reset using sysfs attribute.
 360         *
 361         * Return values: 0 on success, -ve value on failure.
 362         *
 363         * Status: OPTIONAL
 364         */
 365
 366        int (*host_reset)(struct Scsi_Host *shost, int reset_type);
 367#define SCSI_ADAPTER_RESET      1
 368#define SCSI_FIRMWARE_RESET     2
 369
 370
 371        /*
 372         * Name of proc directory
 373         */
 374        const char *proc_name;
 375
 376        /*
 377         * Used to store the procfs directory if a driver implements the
 378         * proc_info method.
 379         */
 380        struct proc_dir_entry *proc_dir;
 381
 382        /*
 383         * This determines if we will use a non-interrupt driven
 384         * or an interrupt driven scheme.  It is set to the maximum number
 385         * of simultaneous commands a given host adapter will accept.
 386         */
 387        int can_queue;
 388
 389        /*
 390         * In many instances, especially where disconnect / reconnect are
 391         * supported, our host also has an ID on the SCSI bus.  If this is
 392         * the case, then it must be reserved.  Please set this_id to -1 if
 393         * your setup is in single initiator mode, and the host lacks an
 394         * ID.
 395         */
 396        int this_id;
 397
 398        /*
 399         * This determines the degree to which the host adapter is capable
 400         * of scatter-gather.
 401         */
 402        unsigned short sg_tablesize;
 403        unsigned short sg_prot_tablesize;
 404
 405        /*
 406         * Set this if the host adapter has limitations beside segment count.
 407         */
 408        unsigned short max_sectors;
 409
 410        /*
 411         * DMA scatter gather segment boundary limit. A segment crossing this
 412         * boundary will be split in two.
 413         */
 414        unsigned long dma_boundary;
 415
 416        /*
 417         * This specifies "machine infinity" for host templates which don't
 418         * limit the transfer size.  Note this limit represents an absolute
 419         * maximum, and may be over the transfer limits allowed for
 420         * individual devices (e.g. 256 for SCSI-1).
 421         */
 422#define SCSI_DEFAULT_MAX_SECTORS        1024
 423
 424        /*
 425         * True if this host adapter can make good use of linked commands.
 426         * This will allow more than one command to be queued to a given
 427         * unit on a given host.  Set this to the maximum number of command
 428         * blocks to be provided for each device.  Set this to 1 for one
 429         * command block per lun, 2 for two, etc.  Do not set this to 0.
 430         * You should make sure that the host adapter will do the right thing
 431         * before you try setting this above 1.
 432         */
 433        short cmd_per_lun;
 434
 435        /*
 436         * present contains counter indicating how many boards of this
 437         * type were found when we did the scan.
 438         */
 439        unsigned char present;
 440
 441        /*
 442         * This specifies the mode that a LLD supports.
 443         */
 444        unsigned supported_mode:2;
 445
 446        /*
 447         * True if this host adapter uses unchecked DMA onto an ISA bus.
 448         */
 449        unsigned unchecked_isa_dma:1;
 450
 451        /*
 452         * True if this host adapter can make good use of clustering.
 453         * I originally thought that if the tablesize was large that it
 454         * was a waste of CPU cycles to prepare a cluster list, but
 455         * it works out that the Buslogic is faster if you use a smaller
 456         * number of segments (i.e. use clustering).  I guess it is
 457         * inefficient.
 458         */
 459        unsigned use_clustering:1;
 460
 461        /*
 462         * True for emulated SCSI host adapters (e.g. ATAPI).
 463         */
 464        unsigned emulated:1;
 465
 466        /*
 467         * True if the low-level driver performs its own reset-settle delays.
 468         */
 469        unsigned skip_settle_delay:1;
 470
 471        /*
 472         * True if we are using ordered write support.
 473         */
 474        unsigned ordered_tag:1;
 475
 476        /*
 477         * Countdown for host blocking with no commands outstanding.
 478         */
 479        unsigned int max_host_blocked;
 480
 481        /*
 482         * Default value for the blocking.  If the queue is empty,
 483         * host_blocked counts down in the request_fn until it restarts
 484         * host operations as zero is reached.  
 485         *
 486         * FIXME: This should probably be a value in the template
 487         */
 488#define SCSI_DEFAULT_HOST_BLOCKED       7
 489
 490        /*
 491         * Pointer to the sysfs class properties for this host, NULL terminated.
 492         */
 493        struct device_attribute **shost_attrs;
 494
 495        /*
 496         * Pointer to the SCSI device properties for this host, NULL terminated.
 497         */
 498        struct device_attribute **sdev_attrs;
 499
 500        /*
 501         * List of hosts per template.
 502         *
 503         * This is only for use by scsi_module.c for legacy templates.
 504         * For these access to it is synchronized implicitly by
 505         * module_init/module_exit.
 506         */
 507        struct list_head legacy_hosts;
 508
 509        /*
 510         * Vendor Identifier associated with the host
 511         *
 512         * Note: When specifying vendor_id, be sure to read the
 513         *   Vendor Type and ID formatting requirements specified in
 514         *   scsi_netlink.h
 515         */
 516        u64 vendor_id;
 517};
 518
 519/*
 520 * Temporary #define for host lock push down. Can be removed when all
 521 * drivers have been updated to take advantage of unlocked
 522 * queuecommand.
 523 *
 524 */
 525#define DEF_SCSI_QCMD(func_name) \
 526        int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
 527        {                                                               \
 528                unsigned long irq_flags;                                \
 529                int rc;                                                 \
 530                spin_lock_irqsave(shost->host_lock, irq_flags);         \
 531                scsi_cmd_get_serial(shost, cmd);                        \
 532                rc = func_name##_lck (cmd, cmd->scsi_done);                     \
 533                spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
 534                return rc;                                              \
 535        }
 536
 537
 538/*
 539 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 540 * (for the ascii descriptions) and the state model enforcer:
 541 * scsi_host_set_state()
 542 */
 543enum scsi_host_state {
 544        SHOST_CREATED = 1,
 545        SHOST_RUNNING,
 546        SHOST_CANCEL,
 547        SHOST_DEL,
 548        SHOST_RECOVERY,
 549        SHOST_CANCEL_RECOVERY,
 550        SHOST_DEL_RECOVERY,
 551};
 552
 553struct Scsi_Host {
 554        /*
 555         * __devices is protected by the host_lock, but you should
 556         * usually use scsi_device_lookup / shost_for_each_device
 557         * to access it and don't care about locking yourself.
 558         * In the rare case of beeing in irq context you can use
 559         * their __ prefixed variants with the lock held. NEVER
 560         * access this list directly from a driver.
 561         */
 562        struct list_head        __devices;
 563        struct list_head        __targets;
 564        
 565        struct scsi_host_cmd_pool *cmd_pool;
 566        spinlock_t              free_list_lock;
 567        struct list_head        free_list; /* backup store of cmd structs */
 568        struct list_head        starved_list;
 569
 570        spinlock_t              default_lock;
 571        spinlock_t              *host_lock;
 572
 573        struct mutex            scan_mutex;/* serialize scanning activity */
 574
 575        struct list_head        eh_cmd_q;
 576        struct task_struct    * ehandler;  /* Error recovery thread. */
 577        struct completion     * eh_action; /* Wait for specific actions on the
 578                                              host. */
 579        wait_queue_head_t       host_wait;
 580        struct scsi_host_template *hostt;
 581        struct scsi_transport_template *transportt;
 582
 583        /*
 584         * Area to keep a shared tag map (if needed, will be
 585         * NULL if not).
 586         */
 587        struct blk_queue_tag    *bqt;
 588
 589        /*
 590         * The following two fields are protected with host_lock;
 591         * however, eh routines can safely access during eh processing
 592         * without acquiring the lock.
 593         */
 594        unsigned int host_busy;            /* commands actually active on low-level */
 595        unsigned int host_failed;          /* commands that failed. */
 596        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 597    
 598        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 599        int resetting; /* if set, it means that last_reset is a valid value */
 600        unsigned long last_reset;
 601
 602        /*
 603         * These three parameters can be used to allow for wide scsi,
 604         * and for host adapters that support multiple busses
 605         * The first two should be set to 1 more than the actual max id
 606         * or lun (i.e. 8 for normal systems).
 607         */
 608        unsigned int max_id;
 609        unsigned int max_lun;
 610        unsigned int max_channel;
 611
 612        /*
 613         * This is a unique identifier that must be assigned so that we
 614         * have some way of identifying each detected host adapter properly
 615         * and uniquely.  For hosts that do not support more than one card
 616         * in the system at one time, this does not need to be set.  It is
 617         * initialized to 0 in scsi_register.
 618         */
 619        unsigned int unique_id;
 620
 621        /*
 622         * The maximum length of SCSI commands that this host can accept.
 623         * Probably 12 for most host adapters, but could be 16 for others.
 624         * or 260 if the driver supports variable length cdbs.
 625         * For drivers that don't set this field, a value of 12 is
 626         * assumed.
 627         */
 628        unsigned short max_cmd_len;
 629
 630        int this_id;
 631        int can_queue;
 632        short cmd_per_lun;
 633        short unsigned int sg_tablesize;
 634        short unsigned int sg_prot_tablesize;
 635        short unsigned int max_sectors;
 636        unsigned long dma_boundary;
 637        /* 
 638         * Used to assign serial numbers to the cmds.
 639         * Protected by the host lock.
 640         */
 641        unsigned long cmd_serial_number;
 642        
 643        unsigned active_mode:2;
 644        unsigned unchecked_isa_dma:1;
 645        unsigned use_clustering:1;
 646        unsigned use_blk_tcq:1;
 647
 648        /*
 649         * Host has requested that no further requests come through for the
 650         * time being.
 651         */
 652        unsigned host_self_blocked:1;
 653    
 654        /*
 655         * Host uses correct SCSI ordering not PC ordering. The bit is
 656         * set for the minority of drivers whose authors actually read
 657         * the spec ;).
 658         */
 659        unsigned reverse_ordering:1;
 660
 661        /*
 662         * Ordered write support
 663         */
 664        unsigned ordered_tag:1;
 665
 666        /* Task mgmt function in progress */
 667        unsigned tmf_in_progress:1;
 668
 669        /* Asynchronous scan in progress */
 670        unsigned async_scan:1;
 671
 672        /* Don't resume host in EH */
 673        unsigned eh_noresume:1;
 674
 675        /*
 676         * Optional work queue to be utilized by the transport
 677         */
 678        char work_q_name[20];
 679        struct workqueue_struct *work_q;
 680
 681        /*
 682         * Host has rejected a command because it was busy.
 683         */
 684        unsigned int host_blocked;
 685
 686        /*
 687         * Value host_blocked counts down from
 688         */
 689        unsigned int max_host_blocked;
 690
 691        /* Protection Information */
 692        unsigned int prot_capabilities;
 693        unsigned char prot_guard_type;
 694
 695        /*
 696         * q used for scsi_tgt msgs, async events or any other requests that
 697         * need to be processed in userspace
 698         */
 699        struct request_queue *uspace_req_q;
 700
 701        /* legacy crap */
 702        unsigned long base;
 703        unsigned long io_port;
 704        unsigned char n_io_port;
 705        unsigned char dma_channel;
 706        unsigned int  irq;
 707        
 708
 709        enum scsi_host_state shost_state;
 710
 711        /* ldm bits */
 712        struct device           shost_gendev, shost_dev;
 713
 714        /*
 715         * List of hosts per template.
 716         *
 717         * This is only for use by scsi_module.c for legacy templates.
 718         * For these access to it is synchronized implicitly by
 719         * module_init/module_exit.
 720         */
 721        struct list_head sht_legacy_list;
 722
 723        /*
 724         * Points to the transport data (if any) which is allocated
 725         * separately
 726         */
 727        void *shost_data;
 728
 729        /*
 730         * Points to the physical bus device we'd use to do DMA
 731         * Needed just in case we have virtual hosts.
 732         */
 733        struct device *dma_dev;
 734
 735        /*
 736         * We should ensure that this is aligned, both for better performance
 737         * and also because some compilers (m68k) don't automatically force
 738         * alignment to a long boundary.
 739         */
 740        unsigned long hostdata[0]  /* Used for storage of host specific stuff */
 741                __attribute__ ((aligned (sizeof(unsigned long))));
 742};
 743
 744#define         class_to_shost(d)       \
 745        container_of(d, struct Scsi_Host, shost_dev)
 746
 747#define shost_printk(prefix, shost, fmt, a...)  \
 748        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 749
 750static inline void *shost_priv(struct Scsi_Host *shost)
 751{
 752        return (void *)shost->hostdata;
 753}
 754
 755int scsi_is_host_device(const struct device *);
 756
 757static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 758{
 759        while (!scsi_is_host_device(dev)) {
 760                if (!dev->parent)
 761                        return NULL;
 762                dev = dev->parent;
 763        }
 764        return container_of(dev, struct Scsi_Host, shost_gendev);
 765}
 766
 767static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 768{
 769        return shost->shost_state == SHOST_RECOVERY ||
 770                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 771                shost->shost_state == SHOST_DEL_RECOVERY ||
 772                shost->tmf_in_progress;
 773}
 774
 775extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 776extern void scsi_flush_work(struct Scsi_Host *);
 777
 778extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 779extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
 780                                               struct device *,
 781                                               struct device *);
 782extern void scsi_scan_host(struct Scsi_Host *);
 783extern void scsi_rescan_device(struct device *);
 784extern void scsi_remove_host(struct Scsi_Host *);
 785extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 786extern void scsi_host_put(struct Scsi_Host *t);
 787extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 788extern const char *scsi_host_state_name(enum scsi_host_state);
 789extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
 790
 791extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
 792
 793static inline int __must_check scsi_add_host(struct Scsi_Host *host,
 794                                             struct device *dev)
 795{
 796        return scsi_add_host_with_dma(host, dev, dev);
 797}
 798
 799static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 800{
 801        return shost->shost_gendev.parent;
 802}
 803
 804/**
 805 * scsi_host_scan_allowed - Is scanning of this host allowed
 806 * @shost:      Pointer to Scsi_Host.
 807 **/
 808static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 809{
 810        return shost->shost_state == SHOST_RUNNING ||
 811               shost->shost_state == SHOST_RECOVERY;
 812}
 813
 814extern void scsi_unblock_requests(struct Scsi_Host *);
 815extern void scsi_block_requests(struct Scsi_Host *);
 816
 817struct class_container;
 818
 819extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
 820                                                void (*) (struct request_queue *));
 821/*
 822 * These two functions are used to allocate and free a pseudo device
 823 * which will connect to the host adapter itself rather than any
 824 * physical device.  You must deallocate when you are done with the
 825 * thing.  This physical pseudo-device isn't real and won't be available
 826 * from any high-level drivers.
 827 */
 828extern void scsi_free_host_dev(struct scsi_device *);
 829extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
 830
 831/*
 832 * DIF defines the exchange of protection information between
 833 * initiator and SBC block device.
 834 *
 835 * DIX defines the exchange of protection information between OS and
 836 * initiator.
 837 */
 838enum scsi_host_prot_capabilities {
 839        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 840        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 841        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 842
 843        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 844        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 845        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 846        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 847};
 848
 849/*
 850 * SCSI hosts which support the Data Integrity Extensions must
 851 * indicate their capabilities by setting the prot_capabilities using
 852 * this call.
 853 */
 854static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 855{
 856        shost->prot_capabilities = mask;
 857}
 858
 859static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 860{
 861        return shost->prot_capabilities;
 862}
 863
 864static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
 865{
 866        return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
 867}
 868
 869static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 870{
 871        static unsigned char cap[] = { 0,
 872                                       SHOST_DIF_TYPE1_PROTECTION,
 873                                       SHOST_DIF_TYPE2_PROTECTION,
 874                                       SHOST_DIF_TYPE3_PROTECTION };
 875
 876        if (target_type > SHOST_DIF_TYPE3_PROTECTION)
 877                return 0;
 878
 879        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 880}
 881
 882static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 883{
 884#if defined(CONFIG_BLK_DEV_INTEGRITY)
 885        static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
 886                                       SHOST_DIX_TYPE1_PROTECTION,
 887                                       SHOST_DIX_TYPE2_PROTECTION,
 888                                       SHOST_DIX_TYPE3_PROTECTION };
 889
 890        if (target_type > SHOST_DIX_TYPE3_PROTECTION)
 891                return 0;
 892
 893        return shost->prot_capabilities & cap[target_type];
 894#endif
 895        return 0;
 896}
 897
 898/*
 899 * All DIX-capable initiators must support the T10-mandated CRC
 900 * checksum.  Controllers can optionally implement the IP checksum
 901 * scheme which has much lower impact on system performance.  Note
 902 * that the main rationale for the checksum is to match integrity
 903 * metadata with data.  Detecting bit errors are a job for ECC memory
 904 * and buses.
 905 */
 906
 907enum scsi_host_guard_type {
 908        SHOST_DIX_GUARD_CRC = 1 << 0,
 909        SHOST_DIX_GUARD_IP  = 1 << 1,
 910};
 911
 912static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 913{
 914        shost->prot_guard_type = type;
 915}
 916
 917static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 918{
 919        return shost->prot_guard_type;
 920}
 921
 922/* legacy interfaces */
 923extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
 924extern void scsi_unregister(struct Scsi_Host *);
 925extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 926
 927#endif /* _SCSI_SCSI_HOST_H */
 928
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.