linux/drivers/scsi/ipr.c
<<
>>
Prefs
   1/*
   2 * ipr.c -- driver for IBM Power Linux RAID adapters
   3 *
   4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
   5 *
   6 * Copyright (C) 2003, 2004 IBM Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 *
  22 */
  23
  24/*
  25 * Notes:
  26 *
  27 * This driver is used to control the following SCSI adapters:
  28 *
  29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  30 *
  31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
  33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  34 *              Embedded SCSI adapter on p615 and p655 systems
  35 *
  36 * Supported Hardware Features:
  37 *      - Ultra 320 SCSI controller
  38 *      - PCI-X host interface
  39 *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  40 *      - Non-Volatile Write Cache
  41 *      - Supports attachment of non-RAID disks, tape, and optical devices
  42 *      - RAID Levels 0, 5, 10
  43 *      - Hot spare
  44 *      - Background Parity Checking
  45 *      - Background Data Scrubbing
  46 *      - Ability to increase the capacity of an existing RAID 5 disk array
  47 *              by adding disks
  48 *
  49 * Driver Features:
  50 *      - Tagged command queuing
  51 *      - Adapter microcode download
  52 *      - PCI hot plug
  53 *      - SCSI device hot plug
  54 *
  55 */
  56
  57#include <linux/fs.h>
  58#include <linux/init.h>
  59#include <linux/types.h>
  60#include <linux/errno.h>
  61#include <linux/kernel.h>
  62#include <linux/slab.h>
  63#include <linux/vmalloc.h>
  64#include <linux/ioport.h>
  65#include <linux/delay.h>
  66#include <linux/pci.h>
  67#include <linux/wait.h>
  68#include <linux/spinlock.h>
  69#include <linux/sched.h>
  70#include <linux/interrupt.h>
  71#include <linux/blkdev.h>
  72#include <linux/firmware.h>
  73#include <linux/module.h>
  74#include <linux/moduleparam.h>
  75#include <linux/libata.h>
  76#include <linux/hdreg.h>
  77#include <linux/reboot.h>
  78#include <linux/stringify.h>
  79#include <asm/io.h>
  80#include <asm/irq.h>
  81#include <asm/processor.h>
  82#include <scsi/scsi.h>
  83#include <scsi/scsi_host.h>
  84#include <scsi/scsi_tcq.h>
  85#include <scsi/scsi_eh.h>
  86#include <scsi/scsi_cmnd.h>
  87#include "ipr.h"
  88
  89/*
  90 *   Global Data
  91 */
  92static LIST_HEAD(ipr_ioa_head);
  93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  94static unsigned int ipr_max_speed = 1;
  95static int ipr_testmode = 0;
  96static unsigned int ipr_fastfail = 0;
  97static unsigned int ipr_transop_timeout = 0;
  98static unsigned int ipr_debug = 0;
  99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
 100static unsigned int ipr_dual_ioa_raid = 1;
 101static DEFINE_SPINLOCK(ipr_driver_lock);
 102
 103/* This table describes the differences between DMA controller chips */
 104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 105        { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
 106                .mailbox = 0x0042C,
 107                .max_cmds = 100,
 108                .cache_line_size = 0x20,
 109                .clear_isr = 1,
 110                {
 111                        .set_interrupt_mask_reg = 0x0022C,
 112                        .clr_interrupt_mask_reg = 0x00230,
 113                        .clr_interrupt_mask_reg32 = 0x00230,
 114                        .sense_interrupt_mask_reg = 0x0022C,
 115                        .sense_interrupt_mask_reg32 = 0x0022C,
 116                        .clr_interrupt_reg = 0x00228,
 117                        .clr_interrupt_reg32 = 0x00228,
 118                        .sense_interrupt_reg = 0x00224,
 119                        .sense_interrupt_reg32 = 0x00224,
 120                        .ioarrin_reg = 0x00404,
 121                        .sense_uproc_interrupt_reg = 0x00214,
 122                        .sense_uproc_interrupt_reg32 = 0x00214,
 123                        .set_uproc_interrupt_reg = 0x00214,
 124                        .set_uproc_interrupt_reg32 = 0x00214,
 125                        .clr_uproc_interrupt_reg = 0x00218,
 126                        .clr_uproc_interrupt_reg32 = 0x00218
 127                }
 128        },
 129        { /* Snipe and Scamp */
 130                .mailbox = 0x0052C,
 131                .max_cmds = 100,
 132                .cache_line_size = 0x20,
 133                .clear_isr = 1,
 134                {
 135                        .set_interrupt_mask_reg = 0x00288,
 136                        .clr_interrupt_mask_reg = 0x0028C,
 137                        .clr_interrupt_mask_reg32 = 0x0028C,
 138                        .sense_interrupt_mask_reg = 0x00288,
 139                        .sense_interrupt_mask_reg32 = 0x00288,
 140                        .clr_interrupt_reg = 0x00284,
 141                        .clr_interrupt_reg32 = 0x00284,
 142                        .sense_interrupt_reg = 0x00280,
 143                        .sense_interrupt_reg32 = 0x00280,
 144                        .ioarrin_reg = 0x00504,
 145                        .sense_uproc_interrupt_reg = 0x00290,
 146                        .sense_uproc_interrupt_reg32 = 0x00290,
 147                        .set_uproc_interrupt_reg = 0x00290,
 148                        .set_uproc_interrupt_reg32 = 0x00290,
 149                        .clr_uproc_interrupt_reg = 0x00294,
 150                        .clr_uproc_interrupt_reg32 = 0x00294
 151                }
 152        },
 153        { /* CRoC */
 154                .mailbox = 0x00044,
 155                .max_cmds = 1000,
 156                .cache_line_size = 0x20,
 157                .clear_isr = 0,
 158                {
 159                        .set_interrupt_mask_reg = 0x00010,
 160                        .clr_interrupt_mask_reg = 0x00018,
 161                        .clr_interrupt_mask_reg32 = 0x0001C,
 162                        .sense_interrupt_mask_reg = 0x00010,
 163                        .sense_interrupt_mask_reg32 = 0x00014,
 164                        .clr_interrupt_reg = 0x00008,
 165                        .clr_interrupt_reg32 = 0x0000C,
 166                        .sense_interrupt_reg = 0x00000,
 167                        .sense_interrupt_reg32 = 0x00004,
 168                        .ioarrin_reg = 0x00070,
 169                        .sense_uproc_interrupt_reg = 0x00020,
 170                        .sense_uproc_interrupt_reg32 = 0x00024,
 171                        .set_uproc_interrupt_reg = 0x00020,
 172                        .set_uproc_interrupt_reg32 = 0x00024,
 173                        .clr_uproc_interrupt_reg = 0x00028,
 174                        .clr_uproc_interrupt_reg32 = 0x0002C,
 175                        .init_feedback_reg = 0x0005C,
 176                        .dump_addr_reg = 0x00064,
 177                        .dump_data_reg = 0x00068,
 178                        .endian_swap_reg = 0x00084
 179                }
 180        },
 181};
 182
 183static const struct ipr_chip_t ipr_chip[] = {
 184        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 185        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 186        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 187        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 188        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 189        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 190        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 191        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 192        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 193};
 194
 195static int ipr_max_bus_speeds[] = {
 196        IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
 197};
 198
 199MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
 200MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
 201module_param_named(max_speed, ipr_max_speed, uint, 0);
 202MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
 203module_param_named(log_level, ipr_log_level, uint, 0);
 204MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
 205module_param_named(testmode, ipr_testmode, int, 0);
 206MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
 207module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
 208MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
 209module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
 210MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
 211module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
 212MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
 213module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
 214MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
 215module_param_named(max_devs, ipr_max_devs, int, 0);
 216MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
 217                 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
 218MODULE_LICENSE("GPL");
 219MODULE_VERSION(IPR_DRIVER_VERSION);
 220
 221/*  A constant array of IOASCs/URCs/Error Messages */
 222static const
 223struct ipr_error_table_t ipr_error_table[] = {
 224        {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
 225        "8155: An unknown error was received"},
 226        {0x00330000, 0, 0,
 227        "Soft underlength error"},
 228        {0x005A0000, 0, 0,
 229        "Command to be cancelled not found"},
 230        {0x00808000, 0, 0,
 231        "Qualified success"},
 232        {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
 233        "FFFE: Soft device bus error recovered by the IOA"},
 234        {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
 235        "4101: Soft device bus fabric error"},
 236        {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
 237        "FFFC: Logical block guard error recovered by the device"},
 238        {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
 239        "FFFC: Logical block reference tag error recovered by the device"},
 240        {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
 241        "4171: Recovered scatter list tag / sequence number error"},
 242        {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
 243        "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
 244        {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
 245        "4171: Recovered logical block sequence number error on IOA to Host transfer"},
 246        {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
 247        "FFFD: Recovered logical block reference tag error detected by the IOA"},
 248        {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
 249        "FFFD: Logical block guard error recovered by the IOA"},
 250        {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
 251        "FFF9: Device sector reassign successful"},
 252        {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
 253        "FFF7: Media error recovered by device rewrite procedures"},
 254        {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
 255        "7001: IOA sector reassignment successful"},
 256        {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
 257        "FFF9: Soft media error. Sector reassignment recommended"},
 258        {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
 259        "FFF7: Media error recovered by IOA rewrite procedures"},
 260        {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
 261        "FF3D: Soft PCI bus error recovered by the IOA"},
 262        {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
 263        "FFF6: Device hardware error recovered by the IOA"},
 264        {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
 265        "FFF6: Device hardware error recovered by the device"},
 266        {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
 267        "FF3D: Soft IOA error recovered by the IOA"},
 268        {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
 269        "FFFA: Undefined device response recovered by the IOA"},
 270        {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 271        "FFF6: Device bus error, message or command phase"},
 272        {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
 273        "FFFE: Task Management Function failed"},
 274        {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
 275        "FFF6: Failure prediction threshold exceeded"},
 276        {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
 277        "8009: Impending cache battery pack failure"},
 278        {0x02040400, 0, 0,
 279        "34FF: Disk device format in progress"},
 280        {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
 281        "9070: IOA requested reset"},
 282        {0x023F0000, 0, 0,
 283        "Synchronization required"},
 284        {0x024E0000, 0, 0,
 285        "No ready, IOA shutdown"},
 286        {0x025A0000, 0, 0,
 287        "Not ready, IOA has been shutdown"},
 288        {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
 289        "3020: Storage subsystem configuration error"},
 290        {0x03110B00, 0, 0,
 291        "FFF5: Medium error, data unreadable, recommend reassign"},
 292        {0x03110C00, 0, 0,
 293        "7000: Medium error, data unreadable, do not reassign"},
 294        {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
 295        "FFF3: Disk media format bad"},
 296        {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
 297        "3002: Addressed device failed to respond to selection"},
 298        {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
 299        "3100: Device bus error"},
 300        {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
 301        "3109: IOA timed out a device command"},
 302        {0x04088000, 0, 0,
 303        "3120: SCSI bus is not operational"},
 304        {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
 305        "4100: Hard device bus fabric error"},
 306        {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
 307        "310C: Logical block guard error detected by the device"},
 308        {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
 309        "310C: Logical block reference tag error detected by the device"},
 310        {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
 311        "4170: Scatter list tag / sequence number error"},
 312        {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
 313        "8150: Logical block CRC error on IOA to Host transfer"},
 314        {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
 315        "4170: Logical block sequence number error on IOA to Host transfer"},
 316        {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
 317        "310D: Logical block reference tag error detected by the IOA"},
 318        {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
 319        "310D: Logical block guard error detected by the IOA"},
 320        {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
 321        "9000: IOA reserved area data check"},
 322        {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
 323        "9001: IOA reserved area invalid data pattern"},
 324        {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
 325        "9002: IOA reserved area LRC error"},
 326        {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
 327        "Hardware Error, IOA metadata access error"},
 328        {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
 329        "102E: Out of alternate sectors for disk storage"},
 330        {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
 331        "FFF4: Data transfer underlength error"},
 332        {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
 333        "FFF4: Data transfer overlength error"},
 334        {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
 335        "3400: Logical unit failure"},
 336        {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
 337        "FFF4: Device microcode is corrupt"},
 338        {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
 339        "8150: PCI bus error"},
 340        {0x04430000, 1, 0,
 341        "Unsupported device bus message received"},
 342        {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
 343        "FFF4: Disk device problem"},
 344        {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
 345        "8150: Permanent IOA failure"},
 346        {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
 347        "3010: Disk device returned wrong response to IOA"},
 348        {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
 349        "8151: IOA microcode error"},
 350        {0x04448500, 0, 0,
 351        "Device bus status error"},
 352        {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
 353        "8157: IOA error requiring IOA reset to recover"},
 354        {0x04448700, 0, 0,
 355        "ATA device status error"},
 356        {0x04490000, 0, 0,
 357        "Message reject received from the device"},
 358        {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
 359        "8008: A permanent cache battery pack failure occurred"},
 360        {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
 361        "9090: Disk unit has been modified after the last known status"},
 362        {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
 363        "9081: IOA detected device error"},
 364        {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
 365        "9082: IOA detected device error"},
 366        {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 367        "3110: Device bus error, message or command phase"},
 368        {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
 369        "3110: SAS Command / Task Management Function failed"},
 370        {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
 371        "9091: Incorrect hardware configuration change has been detected"},
 372        {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
 373        "9073: Invalid multi-adapter configuration"},
 374        {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
 375        "4010: Incorrect connection between cascaded expanders"},
 376        {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
 377        "4020: Connections exceed IOA design limits"},
 378        {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
 379        "4030: Incorrect multipath connection"},
 380        {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
 381        "4110: Unsupported enclosure function"},
 382        {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
 383        "FFF4: Command to logical unit failed"},
 384        {0x05240000, 1, 0,
 385        "Illegal request, invalid request type or request packet"},
 386        {0x05250000, 0, 0,
 387        "Illegal request, invalid resource handle"},
 388        {0x05258000, 0, 0,
 389        "Illegal request, commands not allowed to this device"},
 390        {0x05258100, 0, 0,
 391        "Illegal request, command not allowed to a secondary adapter"},
 392        {0x05258200, 0, 0,
 393        "Illegal request, command not allowed to a non-optimized resource"},
 394        {0x05260000, 0, 0,
 395        "Illegal request, invalid field in parameter list"},
 396        {0x05260100, 0, 0,
 397        "Illegal request, parameter not supported"},
 398        {0x05260200, 0, 0,
 399        "Illegal request, parameter value invalid"},
 400        {0x052C0000, 0, 0,
 401        "Illegal request, command sequence error"},
 402        {0x052C8000, 1, 0,
 403        "Illegal request, dual adapter support not enabled"},
 404        {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
 405        "9031: Array protection temporarily suspended, protection resuming"},
 406        {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
 407        "9040: Array protection temporarily suspended, protection resuming"},
 408        {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
 409        "3140: Device bus not ready to ready transition"},
 410        {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
 411        "FFFB: SCSI bus was reset"},
 412        {0x06290500, 0, 0,
 413        "FFFE: SCSI bus transition to single ended"},
 414        {0x06290600, 0, 0,
 415        "FFFE: SCSI bus transition to LVD"},
 416        {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
 417        "FFFB: SCSI bus was reset by another initiator"},
 418        {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
 419        "3029: A device replacement has occurred"},
 420        {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
 421        "9051: IOA cache data exists for a missing or failed device"},
 422        {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
 423        "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
 424        {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
 425        "9025: Disk unit is not supported at its physical location"},
 426        {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
 427        "3020: IOA detected a SCSI bus configuration error"},
 428        {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
 429        "3150: SCSI bus configuration error"},
 430        {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
 431        "9074: Asymmetric advanced function disk configuration"},
 432        {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
 433        "4040: Incomplete multipath connection between IOA and enclosure"},
 434        {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
 435        "4041: Incomplete multipath connection between enclosure and device"},
 436        {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
 437        "9075: Incomplete multipath connection between IOA and remote IOA"},
 438        {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
 439        "9076: Configuration error, missing remote IOA"},
 440        {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
 441        "4050: Enclosure does not support a required multipath function"},
 442        {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
 443        "4070: Logically bad block written on device"},
 444        {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
 445        "9041: Array protection temporarily suspended"},
 446        {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
 447        "9042: Corrupt array parity detected on specified device"},
 448        {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
 449        "9030: Array no longer protected due to missing or failed disk unit"},
 450        {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 451        "9071: Link operational transition"},
 452        {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 453        "9072: Link not operational transition"},
 454        {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 455        "9032: Array exposed but still protected"},
 456        {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
 457        "70DD: Device forced failed by disrupt device command"},
 458        {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 459        "4061: Multipath redundancy level got better"},
 460        {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 461        "4060: Multipath redundancy level got worse"},
 462        {0x07270000, 0, 0,
 463        "Failure due to other device"},
 464        {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
 465        "9008: IOA does not support functions expected by devices"},
 466        {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
 467        "9010: Cache data associated with attached devices cannot be found"},
 468        {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
 469        "9011: Cache data belongs to devices other than those attached"},
 470        {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
 471        "9020: Array missing 2 or more devices with only 1 device present"},
 472        {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
 473        "9021: Array missing 2 or more devices with 2 or more devices present"},
 474        {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
 475        "9022: Exposed array is missing a required device"},
 476        {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
 477        "9023: Array member(s) not at required physical locations"},
 478        {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
 479        "9024: Array not functional due to present hardware configuration"},
 480        {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
 481        "9026: Array not functional due to present hardware configuration"},
 482        {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
 483        "9027: Array is missing a device and parity is out of sync"},
 484        {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
 485        "9028: Maximum number of arrays already exist"},
 486        {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
 487        "9050: Required cache data cannot be located for a disk unit"},
 488        {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
 489        "9052: Cache data exists for a device that has been modified"},
 490        {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
 491        "9054: IOA resources not available due to previous problems"},
 492        {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
 493        "9092: Disk unit requires initialization before use"},
 494        {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
 495        "9029: Incorrect hardware configuration change has been detected"},
 496        {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
 497        "9060: One or more disk pairs are missing from an array"},
 498        {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
 499        "9061: One or more disks are missing from an array"},
 500        {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
 501        "9062: One or more disks are missing from an array"},
 502        {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
 503        "9063: Maximum number of functional arrays has been exceeded"},
 504        {0x0B260000, 0, 0,
 505        "Aborted command, invalid descriptor"},
 506        {0x0B5A0000, 0, 0,
 507        "Command terminated by host"}
 508};
 509
 510static const struct ipr_ses_table_entry ipr_ses_table[] = {
 511        { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
 512        { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
 513        { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
 514        { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
 515        { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
 516        { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
 517        { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
 518        { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
 519        { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 520        { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 521        { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
 522        { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
 523        { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
 524};
 525
 526/*
 527 *  Function Prototypes
 528 */
 529static int ipr_reset_alert(struct ipr_cmnd *);
 530static void ipr_process_ccn(struct ipr_cmnd *);
 531static void ipr_process_error(struct ipr_cmnd *);
 532static void ipr_reset_ioa_job(struct ipr_cmnd *);
 533static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
 534                                   enum ipr_shutdown_type);
 535
 536#ifdef CONFIG_SCSI_IPR_TRACE
 537/**
 538 * ipr_trc_hook - Add a trace entry to the driver trace
 539 * @ipr_cmd:    ipr command struct
 540 * @type:               trace type
 541 * @add_data:   additional data
 542 *
 543 * Return value:
 544 *      none
 545 **/
 546static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 547                         u8 type, u32 add_data)
 548{
 549        struct ipr_trace_entry *trace_entry;
 550        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 551
 552        trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
 553        trace_entry->time = jiffies;
 554        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 555        trace_entry->type = type;
 556        if (ipr_cmd->ioa_cfg->sis64)
 557                trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
 558        else
 559                trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
 560        trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
 561        trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
 562        trace_entry->u.add_data = add_data;
 563}
 564#else
 565#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
 566#endif
 567
 568/**
 569 * ipr_lock_and_done - Acquire lock and complete command
 570 * @ipr_cmd:    ipr command struct
 571 *
 572 * Return value:
 573 *      none
 574 **/
 575static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
 576{
 577        unsigned long lock_flags;
 578        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 579
 580        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 581        ipr_cmd->done(ipr_cmd);
 582        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 583}
 584
 585/**
 586 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
 587 * @ipr_cmd:    ipr command struct
 588 *
 589 * Return value:
 590 *      none
 591 **/
 592static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 593{
 594        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 595        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 596        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 597        dma_addr_t dma_addr = ipr_cmd->dma_addr;
 598
 599        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
 600        ioarcb->data_transfer_length = 0;
 601        ioarcb->read_data_transfer_length = 0;
 602        ioarcb->ioadl_len = 0;
 603        ioarcb->read_ioadl_len = 0;
 604
 605        if (ipr_cmd->ioa_cfg->sis64) {
 606                ioarcb->u.sis64_addr_data.data_ioadl_addr =
 607                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
 608                ioasa64->u.gata.status = 0;
 609        } else {
 610                ioarcb->write_ioadl_addr =
 611                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 612                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 613                ioasa->u.gata.status = 0;
 614        }
 615
 616        ioasa->hdr.ioasc = 0;
 617        ioasa->hdr.residual_data_len = 0;
 618        ipr_cmd->scsi_cmd = NULL;
 619        ipr_cmd->qc = NULL;
 620        ipr_cmd->sense_buffer[0] = 0;
 621        ipr_cmd->dma_use_sg = 0;
 622}
 623
 624/**
 625 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
 626 * @ipr_cmd:    ipr command struct
 627 *
 628 * Return value:
 629 *      none
 630 **/
 631static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
 632                              void (*fast_done) (struct ipr_cmnd *))
 633{
 634        ipr_reinit_ipr_cmnd(ipr_cmd);
 635        ipr_cmd->u.scratch = 0;
 636        ipr_cmd->sibling = NULL;
 637        ipr_cmd->fast_done = fast_done;
 638        init_timer(&ipr_cmd->timer);
 639}
 640
 641/**
 642 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
 643 * @ioa_cfg:    ioa config struct
 644 *
 645 * Return value:
 646 *      pointer to ipr command struct
 647 **/
 648static
 649struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 650{
 651        struct ipr_cmnd *ipr_cmd;
 652
 653        ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
 654        list_del(&ipr_cmd->queue);
 655
 656        return ipr_cmd;
 657}
 658
 659/**
 660 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
 661 * @ioa_cfg:    ioa config struct
 662 *
 663 * Return value:
 664 *      pointer to ipr command struct
 665 **/
 666static
 667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 668{
 669        struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
 670        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
 671        return ipr_cmd;
 672}
 673
 674/**
 675 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
 676 * @ioa_cfg:    ioa config struct
 677 * @clr_ints:     interrupts to clear
 678 *
 679 * This function masks all interrupts on the adapter, then clears the
 680 * interrupts specified in the mask
 681 *
 682 * Return value:
 683 *      none
 684 **/
 685static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
 686                                          u32 clr_ints)
 687{
 688        volatile u32 int_reg;
 689
 690        /* Stop new interrupts */
 691        ioa_cfg->allow_interrupts = 0;
 692
 693        /* Set interrupt mask to stop all new interrupts */
 694        if (ioa_cfg->sis64)
 695                writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 696        else
 697                writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 698
 699        /* Clear any pending interrupts */
 700        if (ioa_cfg->sis64)
 701                writel(~0, ioa_cfg->regs.clr_interrupt_reg);
 702        writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
 703        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 704}
 705
 706/**
 707 * ipr_save_pcix_cmd_reg - Save PCI-X command register
 708 * @ioa_cfg:    ioa config struct
 709 *
 710 * Return value:
 711 *      0 on success / -EIO on failure
 712 **/
 713static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 714{
 715        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 716
 717        if (pcix_cmd_reg == 0)
 718                return 0;
 719
 720        if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 721                                 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 722                dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
 723                return -EIO;
 724        }
 725
 726        ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
 727        return 0;
 728}
 729
 730/**
 731 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
 732 * @ioa_cfg:    ioa config struct
 733 *
 734 * Return value:
 735 *      0 on success / -EIO on failure
 736 **/
 737static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 738{
 739        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 740
 741        if (pcix_cmd_reg) {
 742                if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 743                                          ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 744                        dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
 745                        return -EIO;
 746                }
 747        }
 748
 749        return 0;
 750}
 751
 752/**
 753 * ipr_sata_eh_done - done function for aborted SATA commands
 754 * @ipr_cmd:    ipr command struct
 755 *
 756 * This function is invoked for ops generated to SATA
 757 * devices which are being aborted.
 758 *
 759 * Return value:
 760 *      none
 761 **/
 762static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 763{
 764        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 765        struct ata_queued_cmd *qc = ipr_cmd->qc;
 766        struct ipr_sata_port *sata_port = qc->ap->private_data;
 767
 768        qc->err_mask |= AC_ERR_OTHER;
 769        sata_port->ioasa.status |= ATA_BUSY;
 770        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 771        ata_qc_complete(qc);
 772}
 773
 774/**
 775 * ipr_scsi_eh_done - mid-layer done function for aborted ops
 776 * @ipr_cmd:    ipr command struct
 777 *
 778 * This function is invoked by the interrupt handler for
 779 * ops generated by the SCSI mid-layer which are being aborted.
 780 *
 781 * Return value:
 782 *      none
 783 **/
 784static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 785{
 786        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 787        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 788
 789        scsi_cmd->result |= (DID_ERROR << 16);
 790
 791        scsi_dma_unmap(ipr_cmd->scsi_cmd);
 792        scsi_cmd->scsi_done(scsi_cmd);
 793        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 794}
 795
 796/**
 797 * ipr_fail_all_ops - Fails all outstanding ops.
 798 * @ioa_cfg:    ioa config struct
 799 *
 800 * This function fails all outstanding ops.
 801 *
 802 * Return value:
 803 *      none
 804 **/
 805static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 806{
 807        struct ipr_cmnd *ipr_cmd, *temp;
 808
 809        ENTER;
 810        list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
 811                list_del(&ipr_cmd->queue);
 812
 813                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
 814                ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
 815
 816                if (ipr_cmd->scsi_cmd)
 817                        ipr_cmd->done = ipr_scsi_eh_done;
 818                else if (ipr_cmd->qc)
 819                        ipr_cmd->done = ipr_sata_eh_done;
 820
 821                ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
 822                del_timer(&ipr_cmd->timer);
 823                ipr_cmd->done(ipr_cmd);
 824        }
 825
 826        LEAVE;
 827}
 828
 829/**
 830 * ipr_send_command -  Send driver initiated requests.
 831 * @ipr_cmd:            ipr command struct
 832 *
 833 * This function sends a command to the adapter using the correct write call.
 834 * In the case of sis64, calculate the ioarcb size required. Then or in the
 835 * appropriate bits.
 836 *
 837 * Return value:
 838 *      none
 839 **/
 840static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
 841{
 842        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 843        dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
 844
 845        if (ioa_cfg->sis64) {
 846                /* The default size is 256 bytes */
 847                send_dma_addr |= 0x1;
 848
 849                /* If the number of ioadls * size of ioadl > 128 bytes,
 850                   then use a 512 byte ioarcb */
 851                if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
 852                        send_dma_addr |= 0x4;
 853                writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 854        } else
 855                writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 856}
 857
 858/**
 859 * ipr_do_req -  Send driver initiated requests.
 860 * @ipr_cmd:            ipr command struct
 861 * @done:                       done function
 862 * @timeout_func:       timeout function
 863 * @timeout:            timeout value
 864 *
 865 * This function sends the specified command to the adapter with the
 866 * timeout given. The done function is invoked on command completion.
 867 *
 868 * Return value:
 869 *      none
 870 **/
 871static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
 872                       void (*done) (struct ipr_cmnd *),
 873                       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
 874{
 875        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 876
 877        list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
 878
 879        ipr_cmd->done = done;
 880
 881        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
 882        ipr_cmd->timer.expires = jiffies + timeout;
 883        ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
 884
 885        add_timer(&ipr_cmd->timer);
 886
 887        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 888
 889        ipr_send_command(ipr_cmd);
 890}
 891
 892/**
 893 * ipr_internal_cmd_done - Op done function for an internally generated op.
 894 * @ipr_cmd:    ipr command struct
 895 *
 896 * This function is the op done function for an internally generated,
 897 * blocking op. It simply wakes the sleeping thread.
 898 *
 899 * Return value:
 900 *      none
 901 **/
 902static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
 903{
 904        if (ipr_cmd->sibling)
 905                ipr_cmd->sibling = NULL;
 906        else
 907                complete(&ipr_cmd->completion);
 908}
 909
 910/**
 911 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
 912 * @ipr_cmd:    ipr command struct
 913 * @dma_addr:   dma address
 914 * @len:        transfer length
 915 * @flags:      ioadl flag value
 916 *
 917 * This function initializes an ioadl in the case where there is only a single
 918 * descriptor.
 919 *
 920 * Return value:
 921 *      nothing
 922 **/
 923static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
 924                           u32 len, int flags)
 925{
 926        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 927        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
 928
 929        ipr_cmd->dma_use_sg = 1;
 930
 931        if (ipr_cmd->ioa_cfg->sis64) {
 932                ioadl64->flags = cpu_to_be32(flags);
 933                ioadl64->data_len = cpu_to_be32(len);
 934                ioadl64->address = cpu_to_be64(dma_addr);
 935
 936                ipr_cmd->ioarcb.ioadl_len =
 937                        cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
 938                ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
 939        } else {
 940                ioadl->flags_and_data_len = cpu_to_be32(flags | len);
 941                ioadl->address = cpu_to_be32(dma_addr);
 942
 943                if (flags == IPR_IOADL_FLAGS_READ_LAST) {
 944                        ipr_cmd->ioarcb.read_ioadl_len =
 945                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
 946                        ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
 947                } else {
 948                        ipr_cmd->ioarcb.ioadl_len =
 949                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
 950                        ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
 951                }
 952        }
 953}
 954
 955/**
 956 * ipr_send_blocking_cmd - Send command and sleep on its completion.
 957 * @ipr_cmd:    ipr command struct
 958 * @timeout_func:       function to invoke if command times out
 959 * @timeout:    timeout
 960 *
 961 * Return value:
 962 *      none
 963 **/
 964static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
 965                                  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
 966                                  u32 timeout)
 967{
 968        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 969
 970        init_completion(&ipr_cmd->completion);
 971        ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
 972
 973        spin_unlock_irq(ioa_cfg->host->host_lock);
 974        wait_for_completion(&ipr_cmd->completion);
 975        spin_lock_irq(ioa_cfg->host->host_lock);
 976}
 977
 978/**
 979 * ipr_send_hcam - Send an HCAM to the adapter.
 980 * @ioa_cfg:    ioa config struct
 981 * @type:               HCAM type
 982 * @hostrcb:    hostrcb struct
 983 *
 984 * This function will send a Host Controlled Async command to the adapter.
 985 * If HCAMs are currently not allowed to be issued to the adapter, it will
 986 * place the hostrcb on the free queue.
 987 *
 988 * Return value:
 989 *      none
 990 **/
 991static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
 992                          struct ipr_hostrcb *hostrcb)
 993{
 994        struct ipr_cmnd *ipr_cmd;
 995        struct ipr_ioarcb *ioarcb;
 996
 997        if (ioa_cfg->allow_cmds) {
 998                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 999                list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
1000                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1001
1002                ipr_cmd->u.hostrcb = hostrcb;
1003                ioarcb = &ipr_cmd->ioarcb;
1004
1005                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1006                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1007                ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1008                ioarcb->cmd_pkt.cdb[1] = type;
1009                ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1010                ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1011
1012                ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1013                               sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1014
1015                if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1016                        ipr_cmd->done = ipr_process_ccn;
1017                else
1018                        ipr_cmd->done = ipr_process_error;
1019
1020                ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1021
1022                ipr_send_command(ipr_cmd);
1023        } else {
1024                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1025        }
1026}
1027
1028/**
1029 * ipr_update_ata_class - Update the ata class in the resource entry
1030 * @res:        resource entry struct
1031 * @proto:      cfgte device bus protocol value
1032 *
1033 * Return value:
1034 *      none
1035 **/
1036static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1037{
1038        switch (proto) {
1039        case IPR_PROTO_SATA:
1040        case IPR_PROTO_SAS_STP:
1041                res->ata_class = ATA_DEV_ATA;
1042                break;
1043        case IPR_PROTO_SATA_ATAPI:
1044        case IPR_PROTO_SAS_STP_ATAPI:
1045                res->ata_class = ATA_DEV_ATAPI;
1046                break;
1047        default:
1048                res->ata_class = ATA_DEV_UNKNOWN;
1049                break;
1050        };
1051}
1052
1053/**
1054 * ipr_init_res_entry - Initialize a resource entry struct.
1055 * @res:        resource entry struct
1056 * @cfgtew:     config table entry wrapper struct
1057 *
1058 * Return value:
1059 *      none
1060 **/
1061static void ipr_init_res_entry(struct ipr_resource_entry *res,
1062                               struct ipr_config_table_entry_wrapper *cfgtew)
1063{
1064        int found = 0;
1065        unsigned int proto;
1066        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1067        struct ipr_resource_entry *gscsi_res = NULL;
1068
1069        res->needs_sync_complete = 0;
1070        res->in_erp = 0;
1071        res->add_to_ml = 0;
1072        res->del_from_ml = 0;
1073        res->resetting_device = 0;
1074        res->sdev = NULL;
1075        res->sata_port = NULL;
1076
1077        if (ioa_cfg->sis64) {
1078                proto = cfgtew->u.cfgte64->proto;
1079                res->res_flags = cfgtew->u.cfgte64->res_flags;
1080                res->qmodel = IPR_QUEUEING_MODEL64(res);
1081                res->type = cfgtew->u.cfgte64->res_type;
1082
1083                memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1084                        sizeof(res->res_path));
1085
1086                res->bus = 0;
1087                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1088                        sizeof(res->dev_lun.scsi_lun));
1089                res->lun = scsilun_to_int(&res->dev_lun);
1090
1091                if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1092                        list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1093                                if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1094                                        found = 1;
1095                                        res->target = gscsi_res->target;
1096                                        break;
1097                                }
1098                        }
1099                        if (!found) {
1100                                res->target = find_first_zero_bit(ioa_cfg->target_ids,
1101                                                                  ioa_cfg->max_devs_supported);
1102                                set_bit(res->target, ioa_cfg->target_ids);
1103                        }
1104                } else if (res->type == IPR_RES_TYPE_IOAFP) {
1105                        res->bus = IPR_IOAFP_VIRTUAL_BUS;
1106                        res->target = 0;
1107                } else if (res->type == IPR_RES_TYPE_ARRAY) {
1108                        res->bus = IPR_ARRAY_VIRTUAL_BUS;
1109                        res->target = find_first_zero_bit(ioa_cfg->array_ids,
1110                                                          ioa_cfg->max_devs_supported);
1111                        set_bit(res->target, ioa_cfg->array_ids);
1112                } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1113                        res->bus = IPR_VSET_VIRTUAL_BUS;
1114                        res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1115                                                          ioa_cfg->max_devs_supported);
1116                        set_bit(res->target, ioa_cfg->vset_ids);
1117                } else {
1118                        res->target = find_first_zero_bit(ioa_cfg->target_ids,
1119                                                          ioa_cfg->max_devs_supported);
1120                        set_bit(res->target, ioa_cfg->target_ids);
1121                }
1122        } else {
1123                proto = cfgtew->u.cfgte->proto;
1124                res->qmodel = IPR_QUEUEING_MODEL(res);
1125                res->flags = cfgtew->u.cfgte->flags;
1126                if (res->flags & IPR_IS_IOA_RESOURCE)
1127                        res->type = IPR_RES_TYPE_IOAFP;
1128                else
1129                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1130
1131                res->bus = cfgtew->u.cfgte->res_addr.bus;
1132                res->target = cfgtew->u.cfgte->res_addr.target;
1133                res->lun = cfgtew->u.cfgte->res_addr.lun;
1134                res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1135        }
1136
1137        ipr_update_ata_class(res, proto);
1138}
1139
1140/**
1141 * ipr_is_same_device - Determine if two devices are the same.
1142 * @res:        resource entry struct
1143 * @cfgtew:     config table entry wrapper struct
1144 *
1145 * Return value:
1146 *      1 if the devices are the same / 0 otherwise
1147 **/
1148static int ipr_is_same_device(struct ipr_resource_entry *res,
1149                              struct ipr_config_table_entry_wrapper *cfgtew)
1150{
1151        if (res->ioa_cfg->sis64) {
1152                if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1153                                        sizeof(cfgtew->u.cfgte64->dev_id)) &&
1154                        !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1155                                        sizeof(cfgtew->u.cfgte64->lun))) {
1156                        return 1;
1157                }
1158        } else {
1159                if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1160                    res->target == cfgtew->u.cfgte->res_addr.target &&
1161                    res->lun == cfgtew->u.cfgte->res_addr.lun)
1162                        return 1;
1163        }
1164
1165        return 0;
1166}
1167
1168/**
1169 * ipr_format_res_path - Format the resource path for printing.
1170 * @res_path:   resource path
1171 * @buf:        buffer
1172 *
1173 * Return value:
1174 *      pointer to buffer
1175 **/
1176static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1177{
1178        int i;
1179        char *p = buffer;
1180
1181        *p = '\0';
1182        p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1183        for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1184                p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1185
1186        return buffer;
1187}
1188
1189/**
1190 * ipr_update_res_entry - Update the resource entry.
1191 * @res:        resource entry struct
1192 * @cfgtew:     config table entry wrapper struct
1193 *
1194 * Return value:
1195 *      none
1196 **/
1197static void ipr_update_res_entry(struct ipr_resource_entry *res,
1198                                 struct ipr_config_table_entry_wrapper *cfgtew)
1199{
1200        char buffer[IPR_MAX_RES_PATH_LENGTH];
1201        unsigned int proto;
1202        int new_path = 0;
1203
1204        if (res->ioa_cfg->sis64) {
1205                res->flags = cfgtew->u.cfgte64->flags;
1206                res->res_flags = cfgtew->u.cfgte64->res_flags;
1207                res->type = cfgtew->u.cfgte64->res_type;
1208
1209                memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1210                        sizeof(struct ipr_std_inq_data));
1211
1212                res->qmodel = IPR_QUEUEING_MODEL64(res);
1213                proto = cfgtew->u.cfgte64->proto;
1214                res->res_handle = cfgtew->u.cfgte64->res_handle;
1215                res->dev_id = cfgtew->u.cfgte64->dev_id;
1216
1217                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1218                        sizeof(res->dev_lun.scsi_lun));
1219
1220                if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1221                                        sizeof(res->res_path))) {
1222                        memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1223                                sizeof(res->res_path));
1224                        new_path = 1;
1225                }
1226
1227                if (res->sdev && new_path)
1228                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1229                                    ipr_format_res_path(res->res_path, buffer,
1230                                                        sizeof(buffer)));
1231        } else {
1232                res->flags = cfgtew->u.cfgte->flags;
1233                if (res->flags & IPR_IS_IOA_RESOURCE)
1234                        res->type = IPR_RES_TYPE_IOAFP;
1235                else
1236                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1237
1238                memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1239                        sizeof(struct ipr_std_inq_data));
1240
1241                res->qmodel = IPR_QUEUEING_MODEL(res);
1242                proto = cfgtew->u.cfgte->proto;
1243                res->res_handle = cfgtew->u.cfgte->res_handle;
1244        }
1245
1246        ipr_update_ata_class(res, proto);
1247}
1248
1249/**
1250 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1251 *                        for the resource.
1252 * @res:        resource entry struct
1253 * @cfgtew:     config table entry wrapper struct
1254 *
1255 * Return value:
1256 *      none
1257 **/
1258static void ipr_clear_res_target(struct ipr_resource_entry *res)
1259{
1260        struct ipr_resource_entry *gscsi_res = NULL;
1261        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1262
1263        if (!ioa_cfg->sis64)
1264                return;
1265
1266        if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1267                clear_bit(res->target, ioa_cfg->array_ids);
1268        else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1269                clear_bit(res->target, ioa_cfg->vset_ids);
1270        else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1271                list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1272                        if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1273                                return;
1274                clear_bit(res->target, ioa_cfg->target_ids);
1275
1276        } else if (res->bus == 0)
1277                clear_bit(res->target, ioa_cfg->target_ids);
1278}
1279
1280/**
1281 * ipr_handle_config_change - Handle a config change from the adapter
1282 * @ioa_cfg:    ioa config struct
1283 * @hostrcb:    hostrcb
1284 *
1285 * Return value:
1286 *      none
1287 **/
1288static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1289                                     struct ipr_hostrcb *hostrcb)
1290{
1291        struct ipr_resource_entry *res = NULL;
1292        struct ipr_config_table_entry_wrapper cfgtew;
1293        __be32 cc_res_handle;
1294
1295        u32 is_ndn = 1;
1296
1297        if (ioa_cfg->sis64) {
1298                cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1299                cc_res_handle = cfgtew.u.cfgte64->res_handle;
1300        } else {
1301                cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1302                cc_res_handle = cfgtew.u.cfgte->res_handle;
1303        }
1304
1305        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1306                if (res->res_handle == cc_res_handle) {
1307                        is_ndn = 0;
1308                        break;
1309                }
1310        }
1311
1312        if (is_ndn) {
1313                if (list_empty(&ioa_cfg->free_res_q)) {
1314                        ipr_send_hcam(ioa_cfg,
1315                                      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1316                                      hostrcb);
1317                        return;
1318                }
1319
1320                res = list_entry(ioa_cfg->free_res_q.next,
1321                                 struct ipr_resource_entry, queue);
1322
1323                list_del(&res->queue);
1324                ipr_init_res_entry(res, &cfgtew);
1325                list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1326        }
1327
1328        ipr_update_res_entry(res, &cfgtew);
1329
1330        if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1331                if (res->sdev) {
1332                        res->del_from_ml = 1;
1333                        res->res_handle = IPR_INVALID_RES_HANDLE;
1334                        if (ioa_cfg->allow_ml_add_del)
1335                                schedule_work(&ioa_cfg->work_q);
1336                } else {
1337                        ipr_clear_res_target(res);
1338                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1339                }
1340        } else if (!res->sdev || res->del_from_ml) {
1341                res->add_to_ml = 1;
1342                if (ioa_cfg->allow_ml_add_del)
1343                        schedule_work(&ioa_cfg->work_q);
1344        }
1345
1346        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1347}
1348
1349/**
1350 * ipr_process_ccn - Op done function for a CCN.
1351 * @ipr_cmd:    ipr command struct
1352 *
1353 * This function is the op done function for a configuration
1354 * change notification host controlled async from the adapter.
1355 *
1356 * Return value:
1357 *      none
1358 **/
1359static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1360{
1361        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1362        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1363        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1364
1365        list_del(&hostrcb->queue);
1366        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1367
1368        if (ioasc) {
1369                if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1370                        dev_err(&ioa_cfg->pdev->dev,
1371                                "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1372
1373                ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1374        } else {
1375                ipr_handle_config_change(ioa_cfg, hostrcb);
1376        }
1377}
1378
1379/**
1380 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1381 * @i:          index into buffer
1382 * @buf:                string to modify
1383 *
1384 * This function will strip all trailing whitespace, pad the end
1385 * of the string with a single space, and NULL terminate the string.
1386 *
1387 * Return value:
1388 *      new length of string
1389 **/
1390static int strip_and_pad_whitespace(int i, char *buf)
1391{
1392        while (i && buf[i] == ' ')
1393                i--;
1394        buf[i+1] = ' ';
1395        buf[i+2] = '\0';
1396        return i + 2;
1397}
1398
1399/**
1400 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1401 * @prefix:             string to print at start of printk
1402 * @hostrcb:    hostrcb pointer
1403 * @vpd:                vendor/product id/sn struct
1404 *
1405 * Return value:
1406 *      none
1407 **/
1408static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1409                                struct ipr_vpd *vpd)
1410{
1411        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1412        int i = 0;
1413
1414        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1415        i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1416
1417        memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1418        i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1419
1420        memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1421        buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1422
1423        ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1424}
1425
1426/**
1427 * ipr_log_vpd - Log the passed VPD to the error log.
1428 * @vpd:                vendor/product id/sn struct
1429 *
1430 * Return value:
1431 *      none
1432 **/
1433static void ipr_log_vpd(struct ipr_vpd *vpd)
1434{
1435        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1436                    + IPR_SERIAL_NUM_LEN];
1437
1438        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1439        memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1440               IPR_PROD_ID_LEN);
1441        buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1442        ipr_err("Vendor/Product ID: %s\n", buffer);
1443
1444        memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1445        buffer[IPR_SERIAL_NUM_LEN] = '\0';
1446        ipr_err("    Serial Number: %s\n", buffer);
1447}
1448
1449/**
1450 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1451 * @prefix:             string to print at start of printk
1452 * @hostrcb:    hostrcb pointer
1453 * @vpd:                vendor/product id/sn/wwn struct
1454 *
1455 * Return value:
1456 *      none
1457 **/
1458static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1459                                    struct ipr_ext_vpd *vpd)
1460{
1461        ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1462        ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1463                     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1464}
1465
1466/**
1467 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1468 * @vpd:                vendor/product id/sn/wwn struct
1469 *
1470 * Return value:
1471 *      none
1472 **/
1473static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1474{
1475        ipr_log_vpd(&vpd->vpd);
1476        ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1477                be32_to_cpu(vpd->wwid[1]));
1478}
1479
1480/**
1481 * ipr_log_enhanced_cache_error - Log a cache error.
1482 * @ioa_cfg:    ioa config struct
1483 * @hostrcb:    hostrcb struct
1484 *
1485 * Return value:
1486 *      none
1487 **/
1488static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489                                         struct ipr_hostrcb *hostrcb)
1490{
1491        struct ipr_hostrcb_type_12_error *error;
1492
1493        if (ioa_cfg->sis64)
1494                error = &hostrcb->hcam.u.error64.u.type_12_error;
1495        else
1496                error = &hostrcb->hcam.u.error.u.type_12_error;
1497
1498        ipr_err("-----Current Configuration-----\n");
1499        ipr_err("Cache Directory Card Information:\n");
1500        ipr_log_ext_vpd(&error->ioa_vpd);
1501        ipr_err("Adapter Card Information:\n");
1502        ipr_log_ext_vpd(&error->cfc_vpd);
1503
1504        ipr_err("-----Expected Configuration-----\n");
1505        ipr_err("Cache Directory Card Information:\n");
1506        ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1507        ipr_err("Adapter Card Information:\n");
1508        ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1509
1510        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1511                     be32_to_cpu(error->ioa_data[0]),
1512                     be32_to_cpu(error->ioa_data[1]),
1513                     be32_to_cpu(error->ioa_data[2]));
1514}
1515
1516/**
1517 * ipr_log_cache_error - Log a cache error.
1518 * @ioa_cfg:    ioa config struct
1519 * @hostrcb:    hostrcb struct
1520 *
1521 * Return value:
1522 *      none
1523 **/
1524static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1525                                struct ipr_hostrcb *hostrcb)
1526{
1527        struct ipr_hostrcb_type_02_error *error =
1528                &hostrcb->hcam.u.error.u.type_02_error;
1529
1530        ipr_err("-----Current Configuration-----\n");
1531        ipr_err("Cache Directory Card Information:\n");
1532        ipr_log_vpd(&error->ioa_vpd);
1533        ipr_err("Adapter Card Information:\n");
1534        ipr_log_vpd(&error->cfc_vpd);
1535
1536        ipr_err("-----Expected Configuration-----\n");
1537        ipr_err("Cache Directory Card Information:\n");
1538        ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1539        ipr_err("Adapter Card Information:\n");
1540        ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1541
1542        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1543                     be32_to_cpu(error->ioa_data[0]),
1544                     be32_to_cpu(error->ioa_data[1]),
1545                     be32_to_cpu(error->ioa_data[2]));
1546}
1547
1548/**
1549 * ipr_log_enhanced_config_error - Log a configuration error.
1550 * @ioa_cfg:    ioa config struct
1551 * @hostrcb:    hostrcb struct
1552 *
1553 * Return value:
1554 *      none
1555 **/
1556static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1557                                          struct ipr_hostrcb *hostrcb)
1558{
1559        int errors_logged, i;
1560        struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1561        struct ipr_hostrcb_type_13_error *error;
1562
1563        error = &hostrcb->hcam.u.error.u.type_13_error;
1564        errors_logged = be32_to_cpu(error->errors_logged);
1565
1566        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1567                be32_to_cpu(error->errors_detected), errors_logged);
1568
1569        dev_entry = error->dev;
1570
1571        for (i = 0; i < errors_logged; i++, dev_entry++) {
1572                ipr_err_separator;
1573
1574                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1575                ipr_log_ext_vpd(&dev_entry->vpd);
1576
1577                ipr_err("-----New Device Information-----\n");
1578                ipr_log_ext_vpd(&dev_entry->new_vpd);
1579
1580                ipr_err("Cache Directory Card Information:\n");
1581                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1582
1583                ipr_err("Adapter Card Information:\n");
1584                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1585        }
1586}
1587
1588/**
1589 * ipr_log_sis64_config_error - Log a device error.
1590 * @ioa_cfg:    ioa config struct
1591 * @hostrcb:    hostrcb struct
1592 *
1593 * Return value:
1594 *      none
1595 **/
1596static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1597                                       struct ipr_hostrcb *hostrcb)
1598{
1599        int errors_logged, i;
1600        struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1601        struct ipr_hostrcb_type_23_error *error;
1602        char buffer[IPR_MAX_RES_PATH_LENGTH];
1603
1604        error = &hostrcb->hcam.u.error64.u.type_23_error;
1605        errors_logged = be32_to_cpu(error->errors_logged);
1606
1607        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1608                be32_to_cpu(error->errors_detected), errors_logged);
1609
1610        dev_entry = error->dev;
1611
1612        for (i = 0; i < errors_logged; i++, dev_entry++) {
1613                ipr_err_separator;
1614
1615                ipr_err("Device %d : %s", i + 1,
1616                         ipr_format_res_path(dev_entry->res_path, buffer,
1617                                             sizeof(buffer)));
1618                ipr_log_ext_vpd(&dev_entry->vpd);
1619
1620                ipr_err("-----New Device Information-----\n");
1621                ipr_log_ext_vpd(&dev_entry->new_vpd);
1622
1623                ipr_err("Cache Directory Card Information:\n");
1624                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1625
1626                ipr_err("Adapter Card Information:\n");
1627                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1628        }
1629}
1630
1631/**
1632 * ipr_log_config_error - Log a configuration error.
1633 * @ioa_cfg:    ioa config struct
1634 * @hostrcb:    hostrcb struct
1635 *
1636 * Return value:
1637 *      none
1638 **/
1639static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1640                                 struct ipr_hostrcb *hostrcb)
1641{
1642        int errors_logged, i;
1643        struct ipr_hostrcb_device_data_entry *dev_entry;
1644        struct ipr_hostrcb_type_03_error *error;
1645
1646        error = &hostrcb->hcam.u.error.u.type_03_error;
1647        errors_logged = be32_to_cpu(error->errors_logged);
1648
1649        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1650                be32_to_cpu(error->errors_detected), errors_logged);
1651
1652        dev_entry = error->dev;
1653
1654        for (i = 0; i < errors_logged; i++, dev_entry++) {
1655                ipr_err_separator;
1656
1657                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1658                ipr_log_vpd(&dev_entry->vpd);
1659
1660                ipr_err("-----New Device Information-----\n");
1661                ipr_log_vpd(&dev_entry->new_vpd);
1662
1663                ipr_err("Cache Directory Card Information:\n");
1664                ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1665
1666                ipr_err("Adapter Card Information:\n");
1667                ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1668
1669                ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1670                        be32_to_cpu(dev_entry->ioa_data[0]),
1671                        be32_to_cpu(dev_entry->ioa_data[1]),
1672                        be32_to_cpu(dev_entry->ioa_data[2]),
1673                        be32_to_cpu(dev_entry->ioa_data[3]),
1674                        be32_to_cpu(dev_entry->ioa_data[4]));
1675        }
1676}
1677
1678/**
1679 * ipr_log_enhanced_array_error - Log an array configuration error.
1680 * @ioa_cfg:    ioa config struct
1681 * @hostrcb:    hostrcb struct
1682 *
1683 * Return value:
1684 *      none
1685 **/
1686static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1687                                         struct ipr_hostrcb *hostrcb)
1688{
1689        int i, num_entries;
1690        struct ipr_hostrcb_type_14_error *error;
1691        struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1692        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1693
1694        error = &hostrcb->hcam.u.error.u.type_14_error;
1695
1696        ipr_err_separator;
1697
1698        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1699                error->protection_level,
1700                ioa_cfg->host->host_no,
1701                error->last_func_vset_res_addr.bus,
1702                error->last_func_vset_res_addr.target,
1703                error->last_func_vset_res_addr.lun);
1704
1705        ipr_err_separator;
1706
1707        array_entry = error->array_member;
1708        num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1709                            ARRAY_SIZE(error->array_member));
1710
1711        for (i = 0; i < num_entries; i++, array_entry++) {
1712                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1713                        continue;
1714
1715                if (be32_to_cpu(error->exposed_mode_adn) == i)
1716                        ipr_err("Exposed Array Member %d:\n", i);
1717                else
1718                        ipr_err("Array Member %d:\n", i);
1719
1720                ipr_log_ext_vpd(&array_entry->vpd);
1721                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1722                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1723                                 "Expected Location");
1724
1725                ipr_err_separator;
1726        }
1727}
1728
1729/**
1730 * ipr_log_array_error - Log an array configuration error.
1731 * @ioa_cfg:    ioa config struct
1732 * @hostrcb:    hostrcb struct
1733 *
1734 * Return value:
1735 *      none
1736 **/
1737static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1738                                struct ipr_hostrcb *hostrcb)
1739{
1740        int i;
1741        struct ipr_hostrcb_type_04_error *error;
1742        struct ipr_hostrcb_array_data_entry *array_entry;
1743        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1744
1745        error = &hostrcb->hcam.u.error.u.type_04_error;
1746
1747        ipr_err_separator;
1748
1749        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1750                error->protection_level,
1751                ioa_cfg->host->host_no,
1752                error->last_func_vset_res_addr.bus,
1753                error->last_func_vset_res_addr.target,
1754                error->last_func_vset_res_addr.lun);
1755
1756        ipr_err_separator;
1757
1758        array_entry = error->array_member;
1759
1760        for (i = 0; i < 18; i++) {
1761                if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1762                        continue;
1763
1764                if (be32_to_cpu(error->exposed_mode_adn) == i)
1765                        ipr_err("Exposed Array Member %d:\n", i);
1766                else
1767                        ipr_err("Array Member %d:\n", i);
1768
1769                ipr_log_vpd(&array_entry->vpd);
1770
1771                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1772                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1773                                 "Expected Location");
1774
1775                ipr_err_separator;
1776
1777                if (i == 9)
1778                        array_entry = error->array_member2;
1779                else
1780                        array_entry++;
1781        }
1782}
1783
1784/**
1785 * ipr_log_hex_data - Log additional hex IOA error data.
1786 * @ioa_cfg:    ioa config struct
1787 * @data:               IOA error data
1788 * @len:                data length
1789 *
1790 * Return value:
1791 *      none
1792 **/
1793static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1794{
1795        int i;
1796
1797        if (len == 0)
1798                return;
1799
1800        if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1801                len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1802
1803        for (i = 0; i < len / 4; i += 4) {
1804                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1805                        be32_to_cpu(data[i]),
1806                        be32_to_cpu(data[i+1]),
1807                        be32_to_cpu(data[i+2]),
1808                        be32_to_cpu(data[i+3]));
1809        }
1810}
1811
1812/**
1813 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1814 * @ioa_cfg:    ioa config struct
1815 * @hostrcb:    hostrcb struct
1816 *
1817 * Return value:
1818 *      none
1819 **/
1820static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1821                                            struct ipr_hostrcb *hostrcb)
1822{
1823        struct ipr_hostrcb_type_17_error *error;
1824
1825        if (ioa_cfg->sis64)
1826                error = &hostrcb->hcam.u.error64.u.type_17_error;
1827        else
1828                error = &hostrcb->hcam.u.error.u.type_17_error;
1829
1830        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1831        strim(error->failure_reason);
1832
1833        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1834                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1835        ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1836        ipr_log_hex_data(ioa_cfg, error->data,
1837                         be32_to_cpu(hostrcb->hcam.length) -
1838                         (offsetof(struct ipr_hostrcb_error, u) +
1839                          offsetof(struct ipr_hostrcb_type_17_error, data)));
1840}
1841
1842/**
1843 * ipr_log_dual_ioa_error - Log a dual adapter error.
1844 * @ioa_cfg:    ioa config struct
1845 * @hostrcb:    hostrcb struct
1846 *
1847 * Return value:
1848 *      none
1849 **/
1850static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1851                                   struct ipr_hostrcb *hostrcb)
1852{
1853        struct ipr_hostrcb_type_07_error *error;
1854
1855        error = &hostrcb->hcam.u.error.u.type_07_error;
1856        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1857        strim(error->failure_reason);
1858
1859        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1860                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1861        ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1862        ipr_log_hex_data(ioa_cfg, error->data,
1863                         be32_to_cpu(hostrcb->hcam.length) -
1864                         (offsetof(struct ipr_hostrcb_error, u) +
1865                          offsetof(struct ipr_hostrcb_type_07_error, data)));
1866}
1867
1868static const struct {
1869        u8 active;
1870        char *desc;
1871} path_active_desc[] = {
1872        { IPR_PATH_NO_INFO, "Path" },
1873        { IPR_PATH_ACTIVE, "Active path" },
1874        { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1875};
1876
1877static const struct {
1878        u8 state;
1879        char *desc;
1880} path_state_desc[] = {
1881        { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1882        { IPR_PATH_HEALTHY, "is healthy" },
1883        { IPR_PATH_DEGRADED, "is degraded" },
1884        { IPR_PATH_FAILED, "is failed" }
1885};
1886
1887/**
1888 * ipr_log_fabric_path - Log a fabric path error
1889 * @hostrcb:    hostrcb struct
1890 * @fabric:             fabric descriptor
1891 *
1892 * Return value:
1893 *      none
1894 **/
1895static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1896                                struct ipr_hostrcb_fabric_desc *fabric)
1897{
1898        int i, j;
1899        u8 path_state = fabric->path_state;
1900        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1901        u8 state = path_state & IPR_PATH_STATE_MASK;
1902
1903        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1904                if (path_active_desc[i].active != active)
1905                        continue;
1906
1907                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1908                        if (path_state_desc[j].state != state)
1909                                continue;
1910
1911                        if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1912                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1913                                             path_active_desc[i].desc, path_state_desc[j].desc,
1914                                             fabric->ioa_port);
1915                        } else if (fabric->cascaded_expander == 0xff) {
1916                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1917                                             path_active_desc[i].desc, path_state_desc[j].desc,
1918                                             fabric->ioa_port, fabric->phy);
1919                        } else if (fabric->phy == 0xff) {
1920                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1921                                             path_active_desc[i].desc, path_state_desc[j].desc,
1922                                             fabric->ioa_port, fabric->cascaded_expander);
1923                        } else {
1924                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1925                                             path_active_desc[i].desc, path_state_desc[j].desc,
1926                                             fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1927                        }
1928                        return;
1929                }
1930        }
1931
1932        ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1933                fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1934}
1935
1936/**
1937 * ipr_log64_fabric_path - Log a fabric path error
1938 * @hostrcb:    hostrcb struct
1939 * @fabric:             fabric descriptor
1940 *
1941 * Return value:
1942 *      none
1943 **/
1944static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1945                                  struct ipr_hostrcb64_fabric_desc *fabric)
1946{
1947        int i, j;
1948        u8 path_state = fabric->path_state;
1949        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1950        u8 state = path_state & IPR_PATH_STATE_MASK;
1951        char buffer[IPR_MAX_RES_PATH_LENGTH];
1952
1953        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1954                if (path_active_desc[i].active != active)
1955                        continue;
1956
1957                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1958                        if (path_state_desc[j].state != state)
1959                                continue;
1960
1961                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1962                                     path_active_desc[i].desc, path_state_desc[j].desc,
1963                                     ipr_format_res_path(fabric->res_path, buffer,
1964                                                         sizeof(buffer)));
1965                        return;
1966                }
1967        }
1968
1969        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1970                ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1971}
1972
1973static const struct {
1974        u8 type;
1975        char *desc;
1976} path_type_desc[] = {
1977        { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1978        { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1979        { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1980        { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1981};
1982
1983static const struct {
1984        u8 status;
1985        char *desc;
1986} path_status_desc[] = {
1987        { IPR_PATH_CFG_NO_PROB, "Functional" },
1988        { IPR_PATH_CFG_DEGRADED, "Degraded" },
1989        { IPR_PATH_CFG_FAILED, "Failed" },
1990        { IPR_PATH_CFG_SUSPECT, "Suspect" },
1991        { IPR_PATH_NOT_DETECTED, "Missing" },
1992        { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1993};
1994
1995static const char *link_rate[] = {
1996        "unknown",
1997        "disabled",
1998        "phy reset problem",
1999        "spinup hold",
2000        "port selector",
2001        "unknown",
2002        "unknown",
2003        "unknown",
2004        "1.5Gbps",
2005        "3.0Gbps",
2006        "unknown",
2007        "unknown",
2008        "unknown",
2009        "unknown",
2010        "unknown",
2011        "unknown"
2012};
2013
2014/**
2015 * ipr_log_path_elem - Log a fabric path element.
2016 * @hostrcb:    hostrcb struct
2017 * @cfg:                fabric path element struct
2018 *
2019 * Return value:
2020 *      none
2021 **/
2022static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2023                              struct ipr_hostrcb_config_element *cfg)
2024{
2025        int i, j;
2026        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2027        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2028
2029        if (type == IPR_PATH_CFG_NOT_EXIST)
2030                return;
2031
2032        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2033                if (path_type_desc[i].type != type)
2034                        continue;
2035
2036                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2037                        if (path_status_desc[j].status != status)
2038                                continue;
2039
2040                        if (type == IPR_PATH_CFG_IOA_PORT) {
2041                                ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2042                                             path_status_desc[j].desc, path_type_desc[i].desc,
2043                                             cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2044                                             be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2045                        } else {
2046                                if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2047                                        ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2048                                                     path_status_desc[j].desc, path_type_desc[i].desc,
2049                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2050                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2051                                } else if (cfg->cascaded_expander == 0xff) {
2052                                        ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2053                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2054                                                     path_type_desc[i].desc, cfg->phy,
2055                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2056                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2057                                } else if (cfg->phy == 0xff) {
2058                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2059                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2060                                                     path_type_desc[i].desc, cfg->cascaded_expander,
2061                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2062                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2063                                } else {
2064                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2065                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2066                                                     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2067                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2068                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2069                                }
2070                        }
2071                        return;
2072                }
2073        }
2074
2075        ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2076                     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2077                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079}
2080
2081/**
2082 * ipr_log64_path_elem - Log a fabric path element.
2083 * @hostrcb:    hostrcb struct
2084 * @cfg:                fabric path element struct
2085 *
2086 * Return value:
2087 *      none
2088 **/
2089static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2090                                struct ipr_hostrcb64_config_element *cfg)
2091{
2092        int i, j;
2093        u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2094        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2095        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2096        char buffer[IPR_MAX_RES_PATH_LENGTH];
2097
2098        if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2099                return;
2100
2101        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2102                if (path_type_desc[i].type != type)
2103                        continue;
2104
2105                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2106                        if (path_status_desc[j].status != status)
2107                                continue;
2108
2109                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2110                                     path_status_desc[j].desc, path_type_desc[i].desc,
2111                                     ipr_format_res_path(cfg->res_path, buffer,
2112                                                         sizeof(buffer)),
2113                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115                        return;
2116                }
2117        }
2118        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2119                     "WWN=%08X%08X\n", cfg->type_status,
2120                     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2121                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2122                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2123}
2124
2125/**
2126 * ipr_log_fabric_error - Log a fabric error.
2127 * @ioa_cfg:    ioa config struct
2128 * @hostrcb:    hostrcb struct
2129 *
2130 * Return value:
2131 *      none
2132 **/
2133static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2134                                 struct ipr_hostrcb *hostrcb)
2135{
2136        struct ipr_hostrcb_type_20_error *error;
2137        struct ipr_hostrcb_fabric_desc *fabric;
2138        struct ipr_hostrcb_config_element *cfg;
2139        int i, add_len;
2140
2141        error = &hostrcb->hcam.u.error.u.type_20_error;
2142        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2143        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2144
2145        add_len = be32_to_cpu(hostrcb->hcam.length) -
2146                (offsetof(struct ipr_hostrcb_error, u) +
2147                 offsetof(struct ipr_hostrcb_type_20_error, desc));
2148
2149        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2150                ipr_log_fabric_path(hostrcb, fabric);
2151                for_each_fabric_cfg(fabric, cfg)
2152                        ipr_log_path_elem(hostrcb, cfg);
2153
2154                add_len -= be16_to_cpu(fabric->length);
2155                fabric = (struct ipr_hostrcb_fabric_desc *)
2156                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2157        }
2158
2159        ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2160}
2161
2162/**
2163 * ipr_log_sis64_array_error - Log a sis64 array error.
2164 * @ioa_cfg:    ioa config struct
2165 * @hostrcb:    hostrcb struct
2166 *
2167 * Return value:
2168 *      none
2169 **/
2170static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2171                                      struct ipr_hostrcb *hostrcb)
2172{
2173        int i, num_entries;
2174        struct ipr_hostrcb_type_24_error *error;
2175        struct ipr_hostrcb64_array_data_entry *array_entry;
2176        char buffer[IPR_MAX_RES_PATH_LENGTH];
2177        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2178
2179        error = &hostrcb->hcam.u.error64.u.type_24_error;
2180
2181        ipr_err_separator;
2182
2183        ipr_err("RAID %s Array Configuration: %s\n",
2184                error->protection_level,
2185                ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2186
2187        ipr_err_separator;
2188
2189        array_entry = error->array_member;
2190        num_entries = min_t(u32, error->num_entries,
2191                            ARRAY_SIZE(error->array_member));
2192
2193        for (i = 0; i < num_entries; i++, array_entry++) {
2194
2195                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2196                        continue;
2197
2198                if (error->exposed_mode_adn == i)
2199                        ipr_err("Exposed Array Member %d:\n", i);
2200                else
2201                        ipr_err("Array Member %d:\n", i);
2202
2203                ipr_err("Array Member %d:\n", i);
2204                ipr_log_ext_vpd(&array_entry->vpd);
2205                ipr_err("Current Location: %s\n",
2206                         ipr_format_res_path(array_entry->res_path, buffer,
2207                                             sizeof(buffer)));
2208                ipr_err("Expected Location: %s\n",
2209                         ipr_format_res_path(array_entry->expected_res_path,
2210                                             buffer, sizeof(buffer)));
2211
2212                ipr_err_separator;
2213        }
2214}
2215
2216/**
2217 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2218 * @ioa_cfg:    ioa config struct
2219 * @hostrcb:    hostrcb struct
2220 *
2221 * Return value:
2222 *      none
2223 **/
2224static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2225                                       struct ipr_hostrcb *hostrcb)
2226{
2227        struct ipr_hostrcb_type_30_error *error;
2228        struct ipr_hostrcb64_fabric_desc *fabric;
2229        struct ipr_hostrcb64_config_element *cfg;
2230        int i, add_len;
2231
2232        error = &hostrcb->hcam.u.error64.u.type_30_error;
2233
2234        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2235        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2236
2237        add_len = be32_to_cpu(hostrcb->hcam.length) -
2238                (offsetof(struct ipr_hostrcb64_error, u) +
2239                 offsetof(struct ipr_hostrcb_type_30_error, desc));
2240
2241        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2242                ipr_log64_fabric_path(hostrcb, fabric);
2243                for_each_fabric_cfg(fabric, cfg)
2244                        ipr_log64_path_elem(hostrcb, cfg);
2245
2246                add_len -= be16_to_cpu(fabric->length);
2247                fabric = (struct ipr_hostrcb64_fabric_desc *)
2248                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2249        }
2250
2251        ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2252}
2253
2254/**
2255 * ipr_log_generic_error - Log an adapter error.
2256 * @ioa_cfg:    ioa config struct
2257 * @hostrcb:    hostrcb struct
2258 *
2259 * Return value:
2260 *      none
2261 **/
2262static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2263                                  struct ipr_hostrcb *hostrcb)
2264{
2265        ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2266                         be32_to_cpu(hostrcb->hcam.length));
2267}
2268
2269/**
2270 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2271 * @ioasc:      IOASC
2272 *
2273 * This function will return the index of into the ipr_error_table
2274 * for the specified IOASC. If the IOASC is not in the table,
2275 * 0 will be returned, which points to the entry used for unknown errors.
2276 *
2277 * Return value:
2278 *      index into the ipr_error_table
2279 **/
2280static u32 ipr_get_error(u32 ioasc)
2281{
2282        int i;
2283
2284        for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2285                if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2286                        return i;
2287
2288        return 0;
2289}
2290
2291/**
2292 * ipr_handle_log_data - Log an adapter error.
2293 * @ioa_cfg:    ioa config struct
2294 * @hostrcb:    hostrcb struct
2295 *
2296 * This function logs an adapter error to the system.
2297 *
2298 * Return value:
2299 *      none
2300 **/
2301static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2302                                struct ipr_hostrcb *hostrcb)
2303{
2304        u32 ioasc;
2305        int error_index;
2306
2307        if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2308                return;
2309
2310        if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2311                dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2312
2313        if (ioa_cfg->sis64)
2314                ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2315        else
2316                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2317
2318        if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2319            ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2320                /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2321                scsi_report_bus_reset(ioa_cfg->host,
2322                                      hostrcb->hcam.u.error.fd_res_addr.bus);
2323        }
2324
2325        error_index = ipr_get_error(ioasc);
2326
2327        if (!ipr_error_table[error_index].log_hcam)
2328                return;
2329
2330        ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2331
2332        /* Set indication we have logged an error */
2333        ioa_cfg->errors_logged++;
2334
2335        if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2336                return;
2337        if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2338                hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2339
2340        switch (hostrcb->hcam.overlay_id) {
2341        case IPR_HOST_RCB_OVERLAY_ID_2:
2342                ipr_log_cache_error(ioa_cfg, hostrcb);
2343                break;
2344        case IPR_HOST_RCB_OVERLAY_ID_3:
2345                ipr_log_config_error(ioa_cfg, hostrcb);
2346                break;
2347        case IPR_HOST_RCB_OVERLAY_ID_4:
2348        case IPR_HOST_RCB_OVERLAY_ID_6:
2349                ipr_log_array_error(ioa_cfg, hostrcb);
2350                break;
2351        case IPR_HOST_RCB_OVERLAY_ID_7:
2352                ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2353                break;
2354        case IPR_HOST_RCB_OVERLAY_ID_12:
2355                ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2356                break;
2357        case IPR_HOST_RCB_OVERLAY_ID_13:
2358                ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2359                break;
2360        case IPR_HOST_RCB_OVERLAY_ID_14:
2361        case IPR_HOST_RCB_OVERLAY_ID_16:
2362                ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2363                break;
2364        case IPR_HOST_RCB_OVERLAY_ID_17:
2365                ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2366                break;
2367        case IPR_HOST_RCB_OVERLAY_ID_20:
2368                ipr_log_fabric_error(ioa_cfg, hostrcb);
2369                break;
2370        case IPR_HOST_RCB_OVERLAY_ID_23:
2371                ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2372                break;
2373        case IPR_HOST_RCB_OVERLAY_ID_24:
2374        case IPR_HOST_RCB_OVERLAY_ID_26:
2375                ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2376                break;
2377        case IPR_HOST_RCB_OVERLAY_ID_30:
2378                ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2379                break;
2380        case IPR_HOST_RCB_OVERLAY_ID_1:
2381        case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2382        default:
2383                ipr_log_generic_error(ioa_cfg, hostrcb);
2384                break;
2385        }
2386}
2387
2388/**
2389 * ipr_process_error - Op done function for an adapter error log.
2390 * @ipr_cmd:    ipr command struct
2391 *
2392 * This function is the op done function for an error log host
2393 * controlled async from the adapter. It will log the error and
2394 * send the HCAM back to the adapter.
2395 *
2396 * Return value:
2397 *      none
2398 **/
2399static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2400{
2401        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2402        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2403        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2404        u32 fd_ioasc;
2405
2406        if (ioa_cfg->sis64)
2407                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2408        else
2409                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2410
2411        list_del(&hostrcb->queue);
2412        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2413
2414        if (!ioasc) {
2415                ipr_handle_log_data(ioa_cfg, hostrcb);
2416                if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2417                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2418        } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2419                dev_err(&ioa_cfg->pdev->dev,
2420                        "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2421        }
2422
2423        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2424}
2425
2426/**
2427 * ipr_timeout -  An internally generated op has timed out.
2428 * @ipr_cmd:    ipr command struct
2429 *
2430 * This function blocks host requests and initiates an
2431 * adapter reset.
2432 *
2433 * Return value:
2434 *      none
2435 **/
2436static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2437{
2438        unsigned long lock_flags = 0;
2439        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2440
2441        ENTER;
2442        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443
2444        ioa_cfg->errors_logged++;
2445        dev_err(&ioa_cfg->pdev->dev,
2446                "Adapter being reset due to command timeout.\n");
2447
2448        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2449                ioa_cfg->sdt_state = GET_DUMP;
2450
2451        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2452                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2453
2454        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2455        LEAVE;
2456}
2457
2458/**
2459 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2460 * @ipr_cmd:    ipr command struct
2461 *
2462 * This function blocks host requests and initiates an
2463 * adapter reset.
2464 *
2465 * Return value:
2466 *      none
2467 **/
2468static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2469{
2470        unsigned long lock_flags = 0;
2471        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2472
2473        ENTER;
2474        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2475
2476        ioa_cfg->errors_logged++;
2477        dev_err(&ioa_cfg->pdev->dev,
2478                "Adapter timed out transitioning to operational.\n");
2479
2480        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2481                ioa_cfg->sdt_state = GET_DUMP;
2482
2483        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2484                if (ipr_fastfail)
2485                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2486                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2487        }
2488
2489        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2490        LEAVE;
2491}
2492
2493/**
2494 * ipr_reset_reload - Reset/Reload the IOA
2495 * @ioa_cfg:            ioa config struct
2496 * @shutdown_type:      shutdown type
2497 *
2498 * This function resets the adapter and re-initializes it.
2499 * This function assumes that all new host commands have been stopped.
2500 * Return value:
2501 *      SUCCESS / FAILED
2502 **/
2503static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2504                            enum ipr_shutdown_type shutdown_type)
2505{
2506        if (!ioa_cfg->in_reset_reload)
2507                ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2508
2509        spin_unlock_irq(ioa_cfg->host->host_lock);
2510        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2511        spin_lock_irq(ioa_cfg->host->host_lock);
2512
2513        /* If we got hit with a host reset while we were already resetting
2514         the adapter for some reason, and the reset failed. */
2515        if (ioa_cfg->ioa_is_dead) {
2516                ipr_trace;
2517                return FAILED;
2518        }
2519
2520        return SUCCESS;
2521}
2522
2523/**
2524 * ipr_find_ses_entry - Find matching SES in SES table
2525 * @res:        resource entry struct of SES
2526 *
2527 * Return value:
2528 *      pointer to SES table entry / NULL on failure
2529 **/
2530static const struct ipr_ses_table_entry *
2531ipr_find_ses_entry(struct ipr_resource_entry *res)
2532{
2533        int i, j, matches;
2534        struct ipr_std_inq_vpids *vpids;
2535        const struct ipr_ses_table_entry *ste = ipr_ses_table;
2536
2537        for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2538                for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2539                        if (ste->compare_product_id_byte[j] == 'X') {
2540                                vpids = &res->std_inq_data.vpids;
2541                                if (vpids->product_id[j] == ste->product_id[j])
2542                                        matches++;
2543                                else
2544                                        break;
2545                        } else
2546                                matches++;
2547                }
2548
2549                if (matches == IPR_PROD_ID_LEN)
2550                        return ste;
2551        }
2552
2553        return NULL;
2554}
2555
2556/**
2557 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2558 * @ioa_cfg:    ioa config struct
2559 * @bus:                SCSI bus
2560 * @bus_width:  bus width
2561 *
2562 * Return value:
2563 *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2564 *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2565 *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2566 *      max 160MHz = max 320MB/sec).
2567 **/
2568static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2569{
2570        struct ipr_resource_entry *res;
2571        const struct ipr_ses_table_entry *ste;
2572        u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2573
2574        /* Loop through each config table entry in the config table buffer */
2575        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2576                if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2577                        continue;
2578
2579                if (bus != res->bus)
2580                        continue;
2581
2582                if (!(ste = ipr_find_ses_entry(res)))
2583                        continue;
2584
2585                max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2586        }
2587
2588        return max_xfer_rate;
2589}
2590
2591/**
2592 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2593 * @ioa_cfg:            ioa config struct
2594 * @max_delay:          max delay in micro-seconds to wait
2595 *
2596 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2597 *
2598 * Return value:
2599 *      0 on success / other on failure
2600 **/
2601static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2602{
2603        volatile u32 pcii_reg;
2604        int delay = 1;
2605
2606        /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2607        while (delay < max_delay) {
2608                pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2609
2610                if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2611                        return 0;
2612
2613                /* udelay cannot be used if delay is more than a few milliseconds */
2614                if ((delay / 1000) > MAX_UDELAY_MS)
2615                        mdelay(delay / 1000);
2616                else
2617                        udelay(delay);
2618
2619                delay += delay;
2620        }
2621        return -EIO;
2622}
2623
2624/**
2625 * ipr_get_sis64_dump_data_section - Dump IOA memory
2626 * @ioa_cfg:                    ioa config struct
2627 * @start_addr:                 adapter address to dump
2628 * @dest:                       destination kernel buffer
2629 * @length_in_words:            length to dump in 4 byte words
2630 *
2631 * Return value:
2632 *      0 on success
2633 **/
2634static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2635                                           u32 start_addr,
2636                                           __be32 *dest, u32 length_in_words)
2637{
2638        int i;
2639
2640        for (i = 0; i < length_in_words; i++) {
2641                writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2642                *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2643                dest++;
2644        }
2645
2646        return 0;
2647}
2648
2649/**
2650 * ipr_get_ldump_data_section - Dump IOA memory
2651 * @ioa_cfg:                    ioa config struct
2652 * @start_addr:                 adapter address to dump
2653 * @dest:                               destination kernel buffer
2654 * @length_in_words:    length to dump in 4 byte words
2655 *
2656 * Return value:
2657 *      0 on success / -EIO on failure
2658 **/
2659static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2660                                      u32 start_addr,
2661                                      __be32 *dest, u32 length_in_words)
2662{
2663        volatile u32 temp_pcii_reg;
2664        int i, delay = 0;
2665
2666        if (ioa_cfg->sis64)
2667                return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2668                                                       dest, length_in_words);
2669
2670        /* Write IOA interrupt reg starting LDUMP state  */
2671        writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2672               ioa_cfg->regs.set_uproc_interrupt_reg32);
2673
2674        /* Wait for IO debug acknowledge */
2675        if (ipr_wait_iodbg_ack(ioa_cfg,
2676                               IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2677                dev_err(&ioa_cfg->pdev->dev,
2678                        "IOA dump long data transfer timeout\n");
2679                return -EIO;
2680        }
2681
2682        /* Signal LDUMP interlocked - clear IO debug ack */
2683        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2684               ioa_cfg->regs.clr_interrupt_reg);
2685
2686        /* Write Mailbox with starting address */
2687        writel(start_addr, ioa_cfg->ioa_mailbox);
2688
2689        /* Signal address valid - clear IOA Reset alert */
2690        writel(IPR_UPROCI_RESET_ALERT,
2691               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2692
2693        for (i = 0; i < length_in_words; i++) {
2694                /* Wait for IO debug acknowledge */
2695                if (ipr_wait_iodbg_ack(ioa_cfg,
2696                                       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2697                        dev_err(&ioa_cfg->pdev->dev,
2698                                "IOA dump short data transfer timeout\n");
2699                        return -EIO;
2700                }
2701
2702                /* Read data from mailbox and increment destination pointer */
2703                *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2704                dest++;
2705
2706                /* For all but the last word of data, signal data received */
2707                if (i < (length_in_words - 1)) {
2708                        /* Signal dump data received - Clear IO debug Ack */
2709                        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2710                               ioa_cfg->regs.clr_interrupt_reg);
2711                }
2712        }
2713
2714        /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2715        writel(IPR_UPROCI_RESET_ALERT,
2716               ioa_cfg->regs.set_uproc_interrupt_reg32);
2717
2718        writel(IPR_UPROCI_IO_DEBUG_ALERT,
2719               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2720
2721        /* Signal dump data received - Clear IO debug Ack */
2722        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2723               ioa_cfg->regs.clr_interrupt_reg);
2724
2725        /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2726        while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2727                temp_pcii_reg =
2728                    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2729
2730                if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2731                        return 0;
2732
2733                udelay(10);
2734                delay += 10;
2735        }
2736
2737        return 0;
2738}
2739
2740#ifdef CONFIG_SCSI_IPR_DUMP
2741/**
2742 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2743 * @ioa_cfg:            ioa config struct
2744 * @pci_address:        adapter address
2745 * @length:                     length of data to copy
2746 *
2747 * Copy data from PCI adapter to kernel buffer.
2748 * Note: length MUST be a 4 byte multiple
2749 * Return value:
2750 *      0 on success / other on failure
2751 **/
2752static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2753                        unsigned long pci_address, u32 length)
2754{
2755        int bytes_copied = 0;
2756        int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2757        __be32 *page;
2758        unsigned long lock_flags = 0;
2759        struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2760
2761        if (ioa_cfg->sis64)
2762                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2763        else
2764                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2765
2766        while (bytes_copied < length &&
2767               (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2768                if (ioa_dump->page_offset >= PAGE_SIZE ||
2769                    ioa_dump->page_offset == 0) {
2770                        page = (__be32 *)__get_free_page(GFP_ATOMIC);
2771
2772                        if (!page) {
2773                                ipr_trace;
2774                                return bytes_copied;
2775                        }
2776
2777                        ioa_dump->page_offset = 0;
2778                        ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2779                        ioa_dump->next_page_index++;
2780                } else
2781                        page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2782
2783                rem_len = length - bytes_copied;
2784                rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2785                cur_len = min(rem_len, rem_page_len);
2786
2787                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2788                if (ioa_cfg->sdt_state == ABORT_DUMP) {
2789                        rc = -EIO;
2790                } else {
2791                        rc = ipr_get_ldump_data_section(ioa_cfg,
2792                                                        pci_address + bytes_copied,
2793                                                        &page[ioa_dump->page_offset / 4],
2794                                                        (cur_len / sizeof(u32)));
2795                }
2796                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2797
2798                if (!rc) {
2799                        ioa_dump->page_offset += cur_len;
2800                        bytes_copied += cur_len;
2801                } else {
2802                        ipr_trace;
2803                        break;
2804                }
2805                schedule();
2806        }
2807
2808        return bytes_copied;
2809}
2810
2811/**
2812 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2813 * @hdr:        dump entry header struct
2814 *
2815 * Return value:
2816 *      nothing
2817 **/
2818static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2819{
2820        hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2821        hdr->num_elems = 1;
2822        hdr->offset = sizeof(*hdr);
2823        hdr->status = IPR_DUMP_STATUS_SUCCESS;
2824}
2825
2826/**
2827 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2828 * @ioa_cfg:    ioa config struct
2829 * @driver_dump:        driver dump struct
2830 *
2831 * Return value:
2832 *      nothing
2833 **/
2834static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2835                                   struct ipr_driver_dump *driver_dump)
2836{
2837        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2838
2839        ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2840        driver_dump->ioa_type_entry.hdr.len =
2841                sizeof(struct ipr_dump_ioa_type_entry) -
2842                sizeof(struct ipr_dump_entry_header);
2843        driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2844        driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2845        driver_dump->ioa_type_entry.type = ioa_cfg->type;
2846        driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2847                (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2848                ucode_vpd->minor_release[1];
2849        driver_dump->hdr.num_entries++;
2850}
2851
2852/**
2853 * ipr_dump_version_data - Fill in the driver version in the dump.
2854 * @ioa_cfg:    ioa config struct
2855 * @driver_dump:        driver dump struct
2856 *
2857 * Return value:
2858 *      nothing
2859 **/
2860static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2861                                  struct ipr_driver_dump *driver_dump)
2862{
2863        ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2864        driver_dump->version_entry.hdr.len =
2865                sizeof(struct ipr_dump_version_entry) -
2866                sizeof(struct ipr_dump_entry_header);
2867        driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868        driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2869        strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2870        driver_dump->hdr.num_entries++;
2871}
2872
2873/**
2874 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2875 * @ioa_cfg:    ioa config struct
2876 * @driver_dump:        driver dump struct
2877 *
2878 * Return value:
2879 *      nothing
2880 **/
2881static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2882                                   struct ipr_driver_dump *driver_dump)
2883{
2884        ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2885        driver_dump->trace_entry.hdr.len =
2886                sizeof(struct ipr_dump_trace_entry) -
2887                sizeof(struct ipr_dump_entry_header);
2888        driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2889        driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2890        memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2891        driver_dump->hdr.num_entries++;
2892}
2893
2894/**
2895 * ipr_dump_location_data - Fill in the IOA location in the dump.
2896 * @ioa_cfg:    ioa config struct
2897 * @driver_dump:        driver dump struct
2898 *
2899 * Return value:
2900 *      nothing
2901 **/
2902static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2903                                   struct ipr_driver_dump *driver_dump)
2904{
2905        ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2906        driver_dump->location_entry.hdr.len =
2907                sizeof(struct ipr_dump_location_entry) -
2908                sizeof(struct ipr_dump_entry_header);
2909        driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2910        driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2911        strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2912        driver_dump->hdr.num_entries++;
2913}
2914
2915/**
2916 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2917 * @ioa_cfg:    ioa config struct
2918 * @dump:               dump struct
2919 *
2920 * Return value:
2921 *      nothing
2922 **/
2923static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2924{
2925        unsigned long start_addr, sdt_word;
2926        unsigned long lock_flags = 0;
2927        struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2928        struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2929        u32 num_entries, max_num_entries, start_off, end_off;
2930        u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2931        struct ipr_sdt *sdt;
2932        int valid = 1;
2933        int i;
2934
2935        ENTER;
2936
2937        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2938
2939        if (ioa_cfg->sdt_state != READ_DUMP) {
2940                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2941                return;
2942        }
2943
2944        if (ioa_cfg->sis64) {
2945                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946                ssleep(IPR_DUMP_DELAY_SECONDS);
2947                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2948        }
2949
2950        start_addr = readl(ioa_cfg->ioa_mailbox);
2951
2952        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2953                dev_err(&ioa_cfg->pdev->dev,
2954                        "Invalid dump table format: %lx\n", start_addr);
2955                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956                return;
2957        }
2958
2959        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2960
2961        driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2962
2963        /* Initialize the overall dump header */
2964        driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2965        driver_dump->hdr.num_entries = 1;
2966        driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2967        driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2968        driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2969        driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2970
2971        ipr_dump_version_data(ioa_cfg, driver_dump);
2972        ipr_dump_location_data(ioa_cfg, driver_dump);
2973        ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2974        ipr_dump_trace_data(ioa_cfg, driver_dump);
2975
2976        /* Update dump_header */
2977        driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2978
2979        /* IOA Dump entry */
2980        ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2981        ioa_dump->hdr.len = 0;
2982        ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2983        ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2984
2985        /* First entries in sdt are actually a list of dump addresses and
2986         lengths to gather the real dump data.  sdt represents the pointer
2987         to the ioa generated dump table.  Dump data will be extracted based
2988         on entries in this table */
2989        sdt = &ioa_dump->sdt;
2990
2991        if (ioa_cfg->sis64) {
2992                max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2993                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2994        } else {
2995                max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2996                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2997        }
2998
2999        bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3000                        (max_num_entries * sizeof(struct ipr_sdt_entry));
3001        rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3002                                        bytes_to_copy / sizeof(__be32));
3003
3004        /* Smart Dump table is ready to use and the first entry is valid */
3005        if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3006            (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3007                dev_err(&ioa_cfg->pdev->dev,
3008                        "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3009                        rc, be32_to_cpu(sdt->hdr.state));
3010                driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3011                ioa_cfg->sdt_state = DUMP_OBTAINED;
3012                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3013                return;
3014        }
3015
3016        num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3017
3018        if (num_entries > max_num_entries)
3019                num_entries = max_num_entries;
3020
3021        /* Update dump length to the actual data to be copied */
3022        dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3023        if (ioa_cfg->sis64)
3024                dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3025        else
3026                dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3027
3028        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3029
3030        for (i = 0; i < num_entries; i++) {
3031                if (ioa_dump->hdr.len > max_dump_size) {
3032                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3033                        break;
3034                }
3035
3036                if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3037                        sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3038                        if (ioa_cfg->sis64)
3039                                bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3040                        else {
3041                                start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3042                                end_off = be32_to_cpu(sdt->entry[i].end_token);
3043
3044                                if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3045                                        bytes_to_copy = end_off - start_off;
3046                                else
3047                                        valid = 0;
3048                        }
3049                        if (valid) {
3050                                if (bytes_to_copy > max_dump_size) {
3051                                        sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3052                                        continue;
3053                                }
3054
3055                                /* Copy data from adapter to driver buffers */
3056                                bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3057                                                            bytes_to_copy);
3058
3059                                ioa_dump->hdr.len += bytes_copied;
3060
3061                                if (bytes_copied != bytes_to_copy) {
3062                                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3063                                        break;
3064                                }
3065                        }
3066                }
3067        }
3068
3069        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3070
3071        /* Update dump_header */
3072        driver_dump->hdr.len += ioa_dump->hdr.len;
3073        wmb();
3074        ioa_cfg->sdt_state = DUMP_OBTAINED;
3075        LEAVE;
3076}
3077
3078#else
3079#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3080#endif
3081
3082/**
3083 * ipr_release_dump - Free adapter dump memory
3084 * @kref:       kref struct
3085 *
3086 * Return value:
3087 *      nothing
3088 **/
3089static void ipr_release_dump(struct kref *kref)
3090{
3091        struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3092        struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3093        unsigned long lock_flags = 0;
3094        int i;
3095
3096        ENTER;
3097        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3098        ioa_cfg->dump = NULL;
3099        ioa_cfg->sdt_state = INACTIVE;
3100        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3101
3102        for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3103                free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3104
3105        vfree(dump->ioa_dump.ioa_data);
3106        kfree(dump);
3107        LEAVE;
3108}
3109
3110/**
3111 * ipr_worker_thread - Worker thread
3112 * @work:               ioa config struct
3113 *
3114 * Called at task level from a work thread. This function takes care
3115 * of adding and removing device from the mid-layer as configuration
3116 * changes are detected by the adapter.
3117 *
3118 * Return value:
3119 *      nothing
3120 **/
3121static void ipr_worker_thread(struct work_struct *work)
3122{
3123        unsigned long lock_flags;
3124        struct ipr_resource_entry *res;
3125        struct scsi_device *sdev;
3126        struct ipr_dump *dump;
3127        struct ipr_ioa_cfg *ioa_cfg =
3128                container_of(work, struct ipr_ioa_cfg, work_q);
3129        u8 bus, target, lun;
3130        int did_work;
3131
3132        ENTER;
3133        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3134
3135        if (ioa_cfg->sdt_state == READ_DUMP) {
3136                dump = ioa_cfg->dump;
3137                if (!dump) {
3138                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139                        return;
3140                }
3141                kref_get(&dump->kref);
3142                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                ipr_get_ioa_dump(ioa_cfg, dump);
3144                kref_put(&dump->kref, ipr_release_dump);
3145
3146                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3147                if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3148                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3149                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150                return;
3151        }
3152
3153restart:
3154        do {
3155                did_work = 0;
3156                if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3157                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158                        return;
3159                }
3160
3161                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3162                        if (res->del_from_ml && res->sdev) {
3163                                did_work = 1;
3164                                sdev = res->sdev;
3165                                if (!scsi_device_get(sdev)) {
3166                                        if (!res->add_to_ml)
3167                                                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3168                                        else
3169                                                res->del_from_ml = 0;
3170                                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171                                        scsi_remove_device(sdev);
3172                                        scsi_device_put(sdev);
3173                                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3174                                }
3175                                break;
3176                        }
3177                }
3178        } while (did_work);
3179
3180        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3181                if (res->add_to_ml) {
3182                        bus = res->bus;
3183                        target = res->target;
3184                        lun = res->lun;
3185                        res->add_to_ml = 0;
3186                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3187                        scsi_add_device(ioa_cfg->host, bus, target, lun);
3188                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189                        goto restart;
3190                }
3191        }
3192
3193        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194        kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3195        LEAVE;
3196}
3197
3198#ifdef CONFIG_SCSI_IPR_TRACE
3199/**
3200 * ipr_read_trace - Dump the adapter trace
3201 * @filp:               open sysfs file
3202 * @kobj:               kobject struct
3203 * @bin_attr:           bin_attribute struct
3204 * @buf:                buffer
3205 * @off:                offset
3206 * @count:              buffer size
3207 *
3208 * Return value:
3209 *      number of bytes printed to buffer
3210 **/
3211static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3212                              struct bin_attribute *bin_attr,
3213                              char *buf, loff_t off, size_t count)
3214{
3215        struct device *dev = container_of(kobj, struct device, kobj);
3216        struct Scsi_Host *shost = class_to_shost(dev);
3217        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3218        unsigned long lock_flags = 0;
3219        ssize_t ret;
3220
3221        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222        ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3223                                IPR_TRACE_SIZE);
3224        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225
3226        return ret;
3227}
3228
3229static struct bin_attribute ipr_trace_attr = {
3230        .attr = {
3231                .name = "trace",
3232                .mode = S_IRUGO,
3233        },
3234        .size = 0,
3235        .read = ipr_read_trace,
3236};
3237#endif
3238
3239/**
3240 * ipr_show_fw_version - Show the firmware version
3241 * @dev:        class device struct
3242 * @buf:        buffer
3243 *
3244 * Return value:
3245 *      number of bytes printed to buffer
3246 **/
3247static ssize_t ipr_show_fw_version(struct device *dev,
3248                                   struct device_attribute *attr, char *buf)
3249{
3250        struct Scsi_Host *shost = class_to_shost(dev);
3251        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3252        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3253        unsigned long lock_flags = 0;
3254        int len;
3255
3256        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257        len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3258                       ucode_vpd->major_release, ucode_vpd->card_type,
3259                       ucode_vpd->minor_release[0],
3260                       ucode_vpd->minor_release[1]);
3261        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262        return len;
3263}
3264
3265static struct device_attribute ipr_fw_version_attr = {
3266        .attr = {
3267                .name =         "fw_version",
3268                .mode =         S_IRUGO,
3269        },
3270        .show = ipr_show_fw_version,
3271};
3272
3273/**
3274 * ipr_show_log_level - Show the adapter's error logging level
3275 * @dev:        class device struct
3276 * @buf:        buffer
3277 *
3278 * Return value:
3279 *      number of bytes printed to buffer
3280 **/
3281static ssize_t ipr_show_log_level(struct device *dev,
3282                                   struct device_attribute *attr, char *buf)
3283{
3284        struct Scsi_Host *shost = class_to_shost(dev);
3285        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3286        unsigned long lock_flags = 0;
3287        int len;
3288
3289        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3291        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292        return len;
3293}
3294
3295/**
3296 * ipr_store_log_level - Change the adapter's error logging level
3297 * @dev:        class device struct
3298 * @buf:        buffer
3299 *
3300 * Return value:
3301 *      number of bytes printed to buffer
3302 **/
3303static ssize_t ipr_store_log_level(struct device *dev,
3304                                   struct device_attribute *attr,
3305                                   const char *buf, size_t count)
3306{
3307        struct Scsi_Host *shost = class_to_shost(dev);
3308        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3309        unsigned long lock_flags = 0;
3310
3311        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312        ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3313        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314        return strlen(buf);
3315}
3316
3317static struct device_attribute ipr_log_level_attr = {
3318        .attr = {
3319                .name =         "log_level",
3320                .mode =         S_IRUGO | S_IWUSR,
3321        },
3322        .show = ipr_show_log_level,
3323        .store = ipr_store_log_level
3324};
3325
3326/**
3327 * ipr_store_diagnostics - IOA Diagnostics interface
3328 * @dev:        device struct
3329 * @buf:        buffer
3330 * @count:      buffer size
3331 *
3332 * This function will reset the adapter and wait a reasonable
3333 * amount of time for any errors that the adapter might log.
3334 *
3335 * Return value:
3336 *      count on success / other on failure
3337 **/
3338static ssize_t ipr_store_diagnostics(struct device *dev,
3339                                     struct device_attribute *attr,
3340                                     const char *buf, size_t count)
3341{
3342        struct Scsi_Host *shost = class_to_shost(dev);
3343        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3344        unsigned long lock_flags = 0;
3345        int rc = count;
3346
3347        if (!capable(CAP_SYS_ADMIN))
3348                return -EACCES;
3349
3350        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3351        while (ioa_cfg->in_reset_reload) {
3352                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3354                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3355        }
3356
3357        ioa_cfg->errors_logged = 0;
3358        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3359
3360        if (ioa_cfg->in_reset_reload) {
3361                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3362                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3363
3364                /* Wait for a second for any errors to be logged */
3365                msleep(1000);
3366        } else {
3367                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3368                return -EIO;
3369        }
3370
3371        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372        if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3373                rc = -EIO;
3374        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375
3376        return rc;
3377}
3378
3379static struct device_attribute ipr_diagnostics_attr = {
3380        .attr = {
3381                .name =         "run_diagnostics",
3382                .mode =         S_IWUSR,
3383        },
3384        .store = ipr_store_diagnostics
3385};
3386
3387/**
3388 * ipr_show_adapter_state - Show the adapter's state
3389 * @class_dev:  device struct
3390 * @buf:        buffer
3391 *
3392 * Return value:
3393 *      number of bytes printed to buffer
3394 **/
3395static ssize_t ipr_show_adapter_state(struct device *dev,
3396                                      struct device_attribute *attr, char *buf)
3397{
3398        struct Scsi_Host *shost = class_to_shost(dev);
3399        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3400        unsigned long lock_flags = 0;
3401        int len;
3402
3403        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404        if (ioa_cfg->ioa_is_dead)
3405                len = snprintf(buf, PAGE_SIZE, "offline\n");
3406        else
3407                len = snprintf(buf, PAGE_SIZE, "online\n");
3408        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3409        return len;
3410}
3411
3412/**
3413 * ipr_store_adapter_state - Change adapter state
3414 * @dev:        device struct
3415 * @buf:        buffer
3416 * @count:      buffer size
3417 *
3418 * This function will change the adapter's state.
3419 *
3420 * Return value:
3421 *      count on success / other on failure
3422 **/
3423static ssize_t ipr_store_adapter_state(struct device *dev,
3424                                       struct device_attribute *attr,
3425                                       const char *buf, size_t count)
3426{
3427        struct Scsi_Host *shost = class_to_shost(dev);
3428        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3429        unsigned long lock_flags;
3430        int result = count;
3431
3432        if (!capable(CAP_SYS_ADMIN))
3433                return -EACCES;
3434
3435        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436        if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3437                ioa_cfg->ioa_is_dead = 0;
3438                ioa_cfg->reset_retries = 0;
3439                ioa_cfg->in_ioa_bringdown = 0;
3440                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3441        }
3442        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3444
3445        return result;
3446}
3447
3448static struct device_attribute ipr_ioa_state_attr = {
3449        .attr = {
3450                .name =         "online_state",
3451                .mode =         S_IRUGO | S_IWUSR,
3452        },
3453        .show = ipr_show_adapter_state,
3454        .store = ipr_store_adapter_state
3455};
3456
3457/**
3458 * ipr_store_reset_adapter - Reset the adapter
3459 * @dev:        device struct
3460 * @buf:        buffer
3461 * @count:      buffer size
3462 *
3463 * This function will reset the adapter.
3464 *
3465 * Return value:
3466 *      count on success / other on failure
3467 **/
3468static ssize_t ipr_store_reset_adapter(struct device *dev,
3469                                       struct device_attribute *attr,
3470                                       const char *buf, size_t count)
3471{
3472        struct Scsi_Host *shost = class_to_shost(dev);
3473        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3474        unsigned long lock_flags;
3475        int result = count;
3476
3477        if (!capable(CAP_SYS_ADMIN))
3478                return -EACCES;
3479
3480        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481        if (!ioa_cfg->in_reset_reload)
3482                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3483        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3485
3486        return result;
3487}
3488
3489static struct device_attribute ipr_ioa_reset_attr = {
3490        .attr = {
3491                .name =         "reset_host",
3492                .mode =         S_IWUSR,
3493        },
3494        .store = ipr_store_reset_adapter
3495};
3496
3497/**
3498 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3499 * @buf_len:            buffer length
3500 *
3501 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3502 * list to use for microcode download
3503 *
3504 * Return value:
3505 *      pointer to sglist / NULL on failure
3506 **/
3507static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3508{
3509        int sg_size, order, bsize_elem, num_elem, i, j;
3510        struct ipr_sglist *sglist;
3511        struct scatterlist *scatterlist;
3512        struct page *page;
3513
3514        /* Get the minimum size per scatter/gather element */
3515        sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3516
3517        /* Get the actual size per element */
3518        order = get_order(sg_size);
3519
3520        /* Determine the actual number of bytes per element */
3521        bsize_elem = PAGE_SIZE * (1 << order);
3522
3523        /* Determine the actual number of sg entries needed */
3524        if (buf_len % bsize_elem)
3525                num_elem = (buf_len / bsize_elem) + 1;
3526        else
3527                num_elem = buf_len / bsize_elem;
3528
3529        /* Allocate a scatter/gather list for the DMA */
3530        sglist = kzalloc(sizeof(struct ipr_sglist) +
3531                         (sizeof(struct scatterlist) * (num_elem - 1)),
3532                         GFP_KERNEL);
3533
3534        if (sglist == NULL) {
3535                ipr_trace;
3536                return NULL;
3537        }
3538
3539        scatterlist = sglist->scatterlist;
3540        sg_init_table(scatterlist, num_elem);
3541
3542        sglist->order = order;
3543        sglist->num_sg = num_elem;
3544
3545        /* Allocate a bunch of sg elements */
3546        for (i = 0; i < num_elem; i++) {
3547                page = alloc_pages(GFP_KERNEL, order);
3548                if (!page) {
3549                        ipr_trace;
3550
3551                        /* Free up what we already allocated */
3552                        for (j = i - 1; j >= 0; j--)
3553                                __free_pages(sg_page(&scatterlist[j]), order);
3554                        kfree(sglist);
3555                        return NULL;
3556                }
3557
3558                sg_set_page(&scatterlist[i], page, 0, 0);
3559        }
3560
3561        return sglist;
3562}
3563
3564/**
3565 * ipr_free_ucode_buffer - Frees a microcode download buffer
3566 * @p_dnld:             scatter/gather list pointer
3567 *
3568 * Free a DMA'able ucode download buffer previously allocated with
3569 * ipr_alloc_ucode_buffer
3570 *
3571 * Return value:
3572 *      nothing
3573 **/
3574static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3575{
3576        int i;
3577
3578        for (i = 0; i < sglist->num_sg; i++)
3579                __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3580
3581        kfree(sglist);
3582}
3583
3584/**
3585 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3586 * @sglist:             scatter/gather list pointer
3587 * @buffer:             buffer pointer
3588 * @len:                buffer length
3589 *
3590 * Copy a microcode image from a user buffer into a buffer allocated by
3591 * ipr_alloc_ucode_buffer
3592 *
3593 * Return value:
3594 *      0 on success / other on failure
3595 **/
3596static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3597                                 u8 *buffer, u32 len)
3598{
3599        int bsize_elem, i, result = 0;
3600        struct scatterlist *scatterlist;
3601        void *kaddr;
3602
3603        /* Determine the actual number of bytes per element */
3604        bsize_elem = PAGE_SIZE * (1 << sglist->order);
3605
3606        scatterlist = sglist->scatterlist;
3607
3608        for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3609                struct page *page = sg_page(&scatterlist[i]);
3610
3611                kaddr = kmap(page);
3612                memcpy(kaddr, buffer, bsize_elem);
3613                kunmap(page);
3614
3615                scatterlist[i].length = bsize_elem;
3616
3617                if (result != 0) {
3618                        ipr_trace;
3619                        return result;
3620                }
3621        }
3622
3623        if (len % bsize_elem) {
3624                struct page *page = sg_page(&scatterlist[i]);
3625
3626                kaddr = kmap(page);
3627                memcpy(kaddr, buffer, len % bsize_elem);
3628                kunmap(page);
3629
3630                scatterlist[i].length = len % bsize_elem;
3631        }
3632
3633        sglist->buffer_len = len;
3634        return result;
3635}
3636
3637/**
3638 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3639 * @ipr_cmd:            ipr command struct
3640 * @sglist:             scatter/gather list
3641 *
3642 * Builds a microcode download IOA data list (IOADL).
3643 *
3644 **/
3645static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3646                                    struct ipr_sglist *sglist)
3647{
3648        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3649        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3650        struct scatterlist *scatterlist = sglist->scatterlist;
3651        int i;
3652
3653        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3654        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3655        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3656
3657        ioarcb->ioadl_len =
3658                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3659        for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3660                ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3661                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3662                ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3663        }
3664
3665        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3666}
3667
3668/**
3669 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3670 * @ipr_cmd:    ipr command struct
3671 * @sglist:             scatter/gather list
3672 *
3673 * Builds a microcode download IOA data list (IOADL).
3674 *
3675 **/
3676static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3677                                  struct ipr_sglist *sglist)
3678{
3679        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3680        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3681        struct scatterlist *scatterlist = sglist->scatterlist;
3682        int i;
3683
3684        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3685        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3686        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3687
3688        ioarcb->ioadl_len =
3689                cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3690
3691        for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3692                ioadl[i].flags_and_data_len =
3693                        cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3694                ioadl[i].address =
3695                        cpu_to_be32(sg_dma_address(&scatterlist[i]));
3696        }
3697
3698        ioadl[i-1].flags_and_data_len |=
3699                cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3700}
3701
3702/**
3703 * ipr_update_ioa_ucode - Update IOA's microcode
3704 * @ioa_cfg:    ioa config struct
3705 * @sglist:             scatter/gather list
3706 *
3707 * Initiate an adapter reset to update the IOA's microcode
3708 *
3709 * Return value:
3710 *      0 on success / -EIO on failure
3711 **/
3712static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3713                                struct ipr_sglist *sglist)
3714{
3715        unsigned long lock_flags;
3716
3717        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3718        while (ioa_cfg->in_reset_reload) {
3719                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3720                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3721                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3722        }
3723
3724        if (ioa_cfg->ucode_sglist) {
3725                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3726                dev_err(&ioa_cfg->pdev->dev,
3727                        "Microcode download already in progress\n");
3728                return -EIO;
3729        }
3730
3731        sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3732                                        sglist->num_sg, DMA_TO_DEVICE);
3733
3734        if (!sglist->num_dma_sg) {
3735                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3736                dev_err(&ioa_cfg->pdev->dev,
3737                        "Failed to map microcode download buffer!\n");
3738                return -EIO;
3739        }
3740
3741        ioa_cfg->ucode_sglist = sglist;
3742        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3743        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3744        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3745
3746        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3747        ioa_cfg->ucode_sglist = NULL;
3748        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749        return 0;
3750}
3751
3752/**
3753 * ipr_store_update_fw - Update the firmware on the adapter
3754 * @class_dev:  device struct
3755 * @buf:        buffer
3756 * @count:      buffer size
3757 *
3758 * This function will update the firmware on the adapter.
3759 *
3760 * Return value:
3761 *      count on success / other on failure
3762 **/
3763static ssize_t ipr_store_update_fw(struct device *dev,
3764                                   struct device_attribute *attr,
3765                                   const char *buf, size_t count)
3766{
3767        struct Scsi_Host *shost = class_to_shost(dev);
3768        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3769        struct ipr_ucode_image_header *image_hdr;
3770        const struct firmware *fw_entry;
3771        struct ipr_sglist *sglist;
3772        char fname[100];
3773        char *src;
3774        int len, result, dnld_size;
3775
3776        if (!capable(CAP_SYS_ADMIN))
3777                return -EACCES;
3778
3779        len = snprintf(fname, 99, "%s", buf);
3780        fname[len-1] = '\0';
3781
3782        if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3783                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3784                return -EIO;
3785        }
3786
3787        image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3788
3789        src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3790        dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3791        sglist = ipr_alloc_ucode_buffer(dnld_size);
3792
3793        if (!sglist) {
3794                dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3795                release_firmware(fw_entry);
3796                return -ENOMEM;
3797        }
3798
3799        result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3800
3801        if (result) {
3802                dev_err(&ioa_cfg->pdev->dev,
3803                        "Microcode buffer copy to DMA buffer failed\n");
3804                goto out;
3805        }
3806
3807        ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3808
3809        result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3810
3811        if (!result)
3812                result = count;
3813out:
3814        ipr_free_ucode_buffer(sglist);
3815        release_firmware(fw_entry);
3816        return result;
3817}
3818
3819static struct device_attribute ipr_update_fw_attr = {
3820        .attr = {
3821                .name =         "update_fw",
3822                .mode =         S_IWUSR,
3823        },
3824        .store = ipr_store_update_fw
3825};
3826
3827/**
3828 * ipr_show_fw_type - Show the adapter's firmware type.
3829 * @dev:        class device struct
3830 * @buf:        buffer
3831 *
3832 * Return value:
3833 *      number of bytes printed to buffer
3834 **/
3835static ssize_t ipr_show_fw_type(struct device *dev,
3836                                struct device_attribute *attr, char *buf)
3837{
3838        struct Scsi_Host *shost = class_to_shost(dev);
3839        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3840        unsigned long lock_flags = 0;
3841        int len;
3842
3843        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3844        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3845        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3846        return len;
3847}
3848
3849static struct device_attribute ipr_ioa_fw_type_attr = {
3850        .attr = {
3851                .name =         "fw_type",
3852                .mode =         S_IRUGO,
3853        },
3854        .show = ipr_show_fw_type
3855};
3856
3857static struct device_attribute *ipr_ioa_attrs[] = {
3858        &ipr_fw_version_attr,
3859        &ipr_log_level_attr,
3860        &ipr_diagnostics_attr,
3861        &ipr_ioa_state_attr,
3862        &ipr_ioa_reset_attr,
3863        &ipr_update_fw_attr,
3864        &ipr_ioa_fw_type_attr,
3865        NULL,
3866};
3867
3868#ifdef CONFIG_SCSI_IPR_DUMP
3869/**
3870 * ipr_read_dump - Dump the adapter
3871 * @filp:               open sysfs file
3872 * @kobj:               kobject struct
3873 * @bin_attr:           bin_attribute struct
3874 * @buf:                buffer
3875 * @off:                offset
3876 * @count:              buffer size
3877 *
3878 * Return value:
3879 *      number of bytes printed to buffer
3880 **/
3881static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3882                             struct bin_attribute *bin_attr,
3883                             char *buf, loff_t off, size_t count)
3884{
3885        struct device *cdev = container_of(kobj, struct device, kobj);
3886        struct Scsi_Host *shost = class_to_shost(cdev);
3887        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3888        struct ipr_dump *dump;
3889        unsigned long lock_flags = 0;
3890        char *src;
3891        int len, sdt_end;
3892        size_t rc = count;
3893
3894        if (!capable(CAP_SYS_ADMIN))
3895                return -EACCES;
3896
3897        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3898        dump = ioa_cfg->dump;
3899
3900        if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3901                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3902                return 0;
3903        }
3904        kref_get(&dump->kref);
3905        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3906
3907        if (off > dump->driver_dump.hdr.len) {
3908                kref_put(&dump->kref, ipr_release_dump);
3909                return 0;
3910        }
3911
3912        if (off + count > dump->driver_dump.hdr.len) {
3913                count = dump->driver_dump.hdr.len - off;
3914                rc = count;
3915        }
3916
3917        if (count && off < sizeof(dump->driver_dump)) {
3918                if (off + count > sizeof(dump->driver_dump))
3919                        len = sizeof(dump->driver_dump) - off;
3920                else
3921                        len = count;
3922                src = (u8 *)&dump->driver_dump + off;
3923                memcpy(buf, src, len);
3924                buf += len;
3925                off += len;
3926                count -= len;
3927        }
3928
3929        off -= sizeof(dump->driver_dump);
3930
3931        if (ioa_cfg->sis64)
3932                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3933                          (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3934                           sizeof(struct ipr_sdt_entry));
3935        else
3936                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3937                          (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3938
3939        if (count && off < sdt_end) {
3940                if (off + count > sdt_end)
3941                        len = sdt_end - off;
3942                else
3943                        len = count;
3944                src = (u8 *)&dump->ioa_dump + off;
3945                memcpy(buf, src, len);
3946                buf += len;
3947                off += len;
3948                count -= len;
3949        }
3950
3951        off -= sdt_end;
3952
3953        while (count) {
3954                if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3955                        len = PAGE_ALIGN(off) - off;
3956                else
3957                        len = count;
3958                src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3959                src += off & ~PAGE_MASK;
3960                memcpy(buf, src, len);
3961                buf += len;
3962                off += len;
3963                count -= len;
3964        }
3965
3966        kref_put(&dump->kref, ipr_release_dump);
3967        return rc;
3968}
3969
3970/**
3971 * ipr_alloc_dump - Prepare for adapter dump
3972 * @ioa_cfg:    ioa config struct
3973 *
3974 * Return value:
3975 *      0 on success / other on failure
3976 **/
3977static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3978{
3979        struct ipr_dump *dump;
3980        __be32 **ioa_data;
3981        unsigned long lock_flags = 0;
3982
3983        dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3984
3985        if (!dump) {
3986                ipr_err("Dump memory allocation failed\n");
3987                return -ENOMEM;
3988        }
3989
3990        if (ioa_cfg->sis64)
3991                ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3992        else
3993                ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3994
3995        if (!ioa_data) {
3996                ipr_err("Dump memory allocation failed\n");
3997                kfree(dump);
3998                return -ENOMEM;
3999        }
4000
4001        dump->ioa_dump.ioa_data = ioa_data;
4002
4003        kref_init(&dump->kref);
4004        dump->ioa_cfg = ioa_cfg;
4005
4006        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007
4008        if (INACTIVE != ioa_cfg->sdt_state) {
4009                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4010                vfree(dump->ioa_dump.ioa_data);
4011                kfree(dump);
4012                return 0;
4013        }
4014
4015        ioa_cfg->dump = dump;
4016        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4017        if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
4018                ioa_cfg->dump_taken = 1;
4019                schedule_work(&ioa_cfg->work_q);
4020        }
4021        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4022
4023        return 0;
4024}
4025
4026/**
4027 * ipr_free_dump - Free adapter dump memory
4028 * @ioa_cfg:    ioa config struct
4029 *
4030 * Return value:
4031 *      0 on success / other on failure
4032 **/
4033static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4034{
4035        struct ipr_dump *dump;
4036        unsigned long lock_flags = 0;
4037
4038        ENTER;
4039
4040        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4041        dump = ioa_cfg->dump;
4042        if (!dump) {
4043                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044                return 0;
4045        }
4046
4047        ioa_cfg->dump = NULL;
4048        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4049
4050        kref_put(&dump->kref, ipr_release_dump);
4051
4052        LEAVE;
4053        return 0;
4054}
4055
4056/**
4057 * ipr_write_dump - Setup dump state of adapter
4058 * @filp:               open sysfs file
4059 * @kobj:               kobject struct
4060 * @bin_attr:           bin_attribute struct
4061 * @buf:                buffer
4062 * @off:                offset
4063 * @count:              buffer size
4064 *
4065 * Return value:
4066 *      number of bytes printed to buffer
4067 **/
4068static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4069                              struct bin_attribute *bin_attr,
4070                              char *buf, loff_t off, size_t count)
4071{
4072        struct device *cdev = container_of(kobj, struct device, kobj);
4073        struct Scsi_Host *shost = class_to_shost(cdev);
4074        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4075        int rc;
4076
4077        if (!capable(CAP_SYS_ADMIN))
4078                return -EACCES;
4079
4080        if (buf[0] == '1')
4081                rc = ipr_alloc_dump(ioa_cfg);
4082        else if (buf[0] == '0')
4083                rc = ipr_free_dump(ioa_cfg);
4084        else
4085                return -EINVAL;
4086
4087        if (rc)
4088                return rc;
4089        else
4090                return count;
4091}
4092
4093static struct bin_attribute ipr_dump_attr = {
4094        .attr = {
4095                .name = "dump",
4096                .mode = S_IRUSR | S_IWUSR,
4097        },
4098        .size = 0,
4099        .read = ipr_read_dump,
4100        .write = ipr_write_dump
4101};
4102#else
4103static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4104#endif
4105
4106/**
4107 * ipr_change_queue_depth - Change the device's queue depth
4108 * @sdev:       scsi device struct
4109 * @qdepth:     depth to set
4110 * @reason:     calling context
4111 *
4112 * Return value:
4113 *      actual depth set
4114 **/
4115static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4116                                  int reason)
4117{
4118        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4119        struct ipr_resource_entry *res;
4120        unsigned long lock_flags = 0;
4121
4122        if (reason != SCSI_QDEPTH_DEFAULT)
4123                return -EOPNOTSUPP;
4124
4125        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4126        res = (struct ipr_resource_entry *)sdev->hostdata;
4127
4128        if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4129                qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4130        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4131
4132        scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4133        return sdev->queue_depth;
4134}
4135
4136/**
4137 * ipr_change_queue_type - Change the device's queue type
4138 * @dsev:               scsi device struct
4139 * @tag_type:   type of tags to use
4140 *
4141 * Return value:
4142 *      actual queue type set
4143 **/
4144static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4145{
4146        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4147        struct ipr_resource_entry *res;
4148        unsigned long lock_flags = 0;
4149
4150        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4151        res = (struct ipr_resource_entry *)sdev->hostdata;
4152
4153        if (res) {
4154                if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4155                        /*
4156                         * We don't bother quiescing the device here since the
4157                         * adapter firmware does it for us.
4158                         */
4159                        scsi_set_tag_type(sdev, tag_type);
4160
4161                        if (tag_type)
4162                                scsi_activate_tcq(sdev, sdev->queue_depth);
4163                        else
4164                                scsi_deactivate_tcq(sdev, sdev->queue_depth);
4165                } else
4166                        tag_type = 0;
4167        } else
4168                tag_type = 0;
4169
4170        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4171        return tag_type;
4172}
4173
4174/**
4175 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4176 * @dev:        device struct
4177 * @attr:       device attribute structure
4178 * @buf:        buffer
4179 *
4180 * Return value:
4181 *      number of bytes printed to buffer
4182 **/
4183static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4184{
4185        struct scsi_device *sdev = to_scsi_device(dev);
4186        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4187        struct ipr_resource_entry *res;
4188        unsigned long lock_flags = 0;
4189        ssize_t len = -ENXIO;
4190
4191        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192        res = (struct ipr_resource_entry *)sdev->hostdata;
4193        if (res)
4194                len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4195        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4196        return len;
4197}
4198
4199static struct device_attribute ipr_adapter_handle_attr = {
4200        .attr = {
4201                .name =         "adapter_handle",
4202                .mode =         S_IRUSR,
4203        },
4204        .show = ipr_show_adapter_handle
4205};
4206
4207/**
4208 * ipr_show_resource_path - Show the resource path or the resource address for
4209 *                          this device.
4210 * @dev:        device struct
4211 * @attr:       device attribute structure
4212 * @buf:        buffer
4213 *
4214 * Return value:
4215 *      number of bytes printed to buffer
4216 **/
4217static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4218{
4219        struct scsi_device *sdev = to_scsi_device(dev);
4220        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4221        struct ipr_resource_entry *res;
4222        unsigned long lock_flags = 0;
4223        ssize_t len = -ENXIO;
4224        char buffer[IPR_MAX_RES_PATH_LENGTH];
4225
4226        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4227        res = (struct ipr_resource_entry *)sdev->hostdata;
4228        if (res && ioa_cfg->sis64)
4229                len = snprintf(buf, PAGE_SIZE, "%s\n",
4230                               ipr_format_res_path(res->res_path, buffer,
4231                                                   sizeof(buffer)));
4232        else if (res)
4233                len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4234                               res->bus, res->target, res->lun);
4235
4236        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237        return len;
4238}
4239
4240static struct device_attribute ipr_resource_path_attr = {
4241        .attr = {
4242                .name =         "resource_path",
4243                .mode =         S_IRUGO,
4244        },
4245        .show = ipr_show_resource_path
4246};
4247
4248/**
4249 * ipr_show_device_id - Show the device_id for this device.
4250 * @dev:        device struct
4251 * @attr:       device attribute structure
4252 * @buf:        buffer
4253 *
4254 * Return value:
4255 *      number of bytes printed to buffer
4256 **/
4257static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4258{
4259        struct scsi_device *sdev = to_scsi_device(dev);
4260        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4261        struct ipr_resource_entry *res;
4262        unsigned long lock_flags = 0;
4263        ssize_t len = -ENXIO;
4264
4265        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4266        res = (struct ipr_resource_entry *)sdev->hostdata;
4267        if (res && ioa_cfg->sis64)
4268                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4269        else if (res)
4270                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4271
4272        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273        return len;
4274}
4275
4276static struct device_attribute ipr_device_id_attr = {
4277        .attr = {
4278                .name =         "device_id",
4279                .mode =         S_IRUGO,
4280        },
4281        .show = ipr_show_device_id
4282};
4283
4284/**
4285 * ipr_show_resource_type - Show the resource type for this device.
4286 * @dev:        device struct
4287 * @attr:       device attribute structure
4288 * @buf:        buffer
4289 *
4290 * Return value:
4291 *      number of bytes printed to buffer
4292 **/
4293static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4294{
4295        struct scsi_device *sdev = to_scsi_device(dev);
4296        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4297        struct ipr_resource_entry *res;
4298        unsigned long lock_flags = 0;
4299        ssize_t len = -ENXIO;
4300
4301        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4302        res = (struct ipr_resource_entry *)sdev->hostdata;
4303
4304        if (res)
4305                len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4306
4307        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308        return len;
4309}
4310
4311static struct device_attribute ipr_resource_type_attr = {
4312        .attr = {
4313                .name =         "resource_type",
4314                .mode =         S_IRUGO,
4315        },
4316        .show = ipr_show_resource_type
4317};
4318
4319static struct device_attribute *ipr_dev_attrs[] = {
4320        &ipr_adapter_handle_attr,
4321        &ipr_resource_path_attr,
4322        &ipr_device_id_attr,
4323        &ipr_resource_type_attr,
4324        NULL,
4325};
4326
4327/**
4328 * ipr_biosparam - Return the HSC mapping
4329 * @sdev:                       scsi device struct
4330 * @block_device:       block device pointer
4331 * @capacity:           capacity of the device
4332 * @parm:                       Array containing returned HSC values.
4333 *
4334 * This function generates the HSC parms that fdisk uses.
4335 * We want to make sure we return something that places partitions
4336 * on 4k boundaries for best performance with the IOA.
4337 *
4338 * Return value:
4339 *      0 on success
4340 **/
4341static int ipr_biosparam(struct scsi_device *sdev,
4342                         struct block_device *block_device,
4343                         sector_t capacity, int *parm)
4344{
4345        int heads, sectors;
4346        sector_t cylinders;
4347
4348        heads = 128;
4349        sectors = 32;
4350
4351        cylinders = capacity;
4352        sector_div(cylinders, (128 * 32));
4353
4354        /* return result */
4355        parm[0] = heads;
4356        parm[1] = sectors;
4357        parm[2] = cylinders;
4358
4359        return 0;
4360}
4361
4362/**
4363 * ipr_find_starget - Find target based on bus/target.
4364 * @starget:    scsi target struct
4365 *
4366 * Return value:
4367 *      resource entry pointer if found / NULL if not found
4368 **/
4369static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4370{
4371        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4372        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4373        struct ipr_resource_entry *res;
4374
4375        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4376                if ((res->bus == starget->channel) &&
4377                    (res->target == starget->id)) {
4378                        return res;
4379                }
4380        }
4381
4382        return NULL;
4383}
4384
4385static struct ata_port_info sata_port_info;
4386
4387/**
4388 * ipr_target_alloc - Prepare for commands to a SCSI target
4389 * @starget:    scsi target struct
4390 *
4391 * If the device is a SATA device, this function allocates an
4392 * ATA port with libata, else it does nothing.
4393 *
4394 * Return value:
4395 *      0 on success / non-0 on failure
4396 **/
4397static int ipr_target_alloc(struct scsi_target *starget)
4398{
4399        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4400        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4401        struct ipr_sata_port *sata_port;
4402        struct ata_port *ap;
4403        struct ipr_resource_entry *res;
4404        unsigned long lock_flags;
4405
4406        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407        res = ipr_find_starget(starget);
4408        starget->hostdata = NULL;
4409
4410        if (res && ipr_is_gata(res)) {
4411                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412                sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4413                if (!sata_port)
4414                        return -ENOMEM;
4415
4416                ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4417                if (ap) {
4418                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4419                        sata_port->ioa_cfg = ioa_cfg;
4420                        sata_port->ap = ap;
4421                        sata_port->res = res;
4422
4423                        res->sata_port = sata_port;
4424                        ap->private_data = sata_port;
4425                        starget->hostdata = sata_port;
4426                } else {
4427                        kfree(sata_port);
4428                        return -ENOMEM;
4429                }
4430        }
4431        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432
4433        return 0;
4434}
4435
4436/**
4437 * ipr_target_destroy - Destroy a SCSI target
4438 * @starget:    scsi target struct
4439 *
4440 * If the device was a SATA device, this function frees the libata
4441 * ATA port, else it does nothing.
4442 *
4443 **/
4444static void ipr_target_destroy(struct scsi_target *starget)
4445{
4446        struct ipr_sata_port *sata_port = starget->hostdata;
4447        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4448        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4449
4450        if (ioa_cfg->sis64) {
4451                if (!ipr_find_starget(starget)) {
4452                        if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4453                                clear_bit(starget->id, ioa_cfg->array_ids);
4454                        else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4455                                clear_bit(starget->id, ioa_cfg->vset_ids);
4456                        else if (starget->channel == 0)
4457                                clear_bit(starget->id, ioa_cfg->target_ids);
4458                }
4459        }
4460
4461        if (sata_port) {
4462                starget->hostdata = NULL;
4463                ata_sas_port_destroy(sata_port->ap);
4464                kfree(sata_port);
4465        }
4466}
4467
4468/**
4469 * ipr_find_sdev - Find device based on bus/target/lun.
4470 * @sdev:       scsi device struct
4471 *
4472 * Return value:
4473 *      resource entry pointer if found / NULL if not found
4474 **/
4475static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4476{
4477        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4478        struct ipr_resource_entry *res;
4479
4480        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4481                if ((res->bus == sdev->channel) &&
4482                    (res->target == sdev->id) &&
4483                    (res->lun == sdev->lun))
4484                        return res;
4485        }
4486
4487        return NULL;
4488}
4489
4490/**
4491 * ipr_slave_destroy - Unconfigure a SCSI device
4492 * @sdev:       scsi device struct
4493 *
4494 * Return value:
4495 *      nothing
4496 **/
4497static void ipr_slave_destroy(struct scsi_device *sdev)
4498{
4499        struct ipr_resource_entry *res;
4500        struct ipr_ioa_cfg *ioa_cfg;
4501        unsigned long lock_flags = 0;
4502
4503        ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4504
4505        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4506        res = (struct ipr_resource_entry *) sdev->hostdata;
4507        if (res) {
4508                if (res->sata_port)
4509                        res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4510                sdev->hostdata = NULL;
4511                res->sdev = NULL;
4512                res->sata_port = NULL;
4513        }
4514        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515}
4516
4517/**
4518 * ipr_slave_configure - Configure a SCSI device
4519 * @sdev:       scsi device struct
4520 *
4521 * This function configures the specified scsi device.
4522 *
4523 * Return value:
4524 *      0 on success
4525 **/
4526static int ipr_slave_configure(struct scsi_device *sdev)
4527{
4528        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4529        struct ipr_resource_entry *res;
4530        struct ata_port *ap = NULL;
4531        unsigned long lock_flags = 0;
4532        char buffer[IPR_MAX_RES_PATH_LENGTH];
4533
4534        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4535        res = sdev->hostdata;
4536        if (res) {
4537                if (ipr_is_af_dasd_device(res))
4538                        sdev->type = TYPE_RAID;
4539                if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4540                        sdev->scsi_level = 4;
4541                        sdev->no_uld_attach = 1;
4542                }
4543                if (ipr_is_vset_device(res)) {
4544                        blk_queue_rq_timeout(sdev->request_queue,
4545                                             IPR_VSET_RW_TIMEOUT);
4546                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4547                }
4548                if (ipr_is_gata(res) && res->sata_port)
4549                        ap = res->sata_port->ap;
4550                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4551
4552                if (ap) {
4553                        scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4554                        ata_sas_slave_configure(sdev, ap);
4555                } else
4556                        scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4557                if (ioa_cfg->sis64)
4558                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4559                                    ipr_format_res_path(res->res_path, buffer,
4560                                                        sizeof(buffer)));
4561                return 0;
4562        }
4563        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4564        return 0;
4565}
4566
4567/**
4568 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4569 * @sdev:       scsi device struct
4570 *
4571 * This function initializes an ATA port so that future commands
4572 * sent through queuecommand will work.
4573 *
4574 * Return value:
4575 *      0 on success
4576 **/
4577static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4578{
4579        struct ipr_sata_port *sata_port = NULL;
4580        int rc = -ENXIO;
4581
4582        ENTER;
4583        if (sdev->sdev_target)
4584                sata_port = sdev->sdev_target->hostdata;
4585        if (sata_port) {
4586                rc = ata_sas_port_init(sata_port->ap);
4587                if (rc == 0)
4588                        rc = ata_sas_sync_probe(sata_port->ap);
4589        }
4590
4591        if (rc)
4592                ipr_slave_destroy(sdev);
4593
4594        LEAVE;
4595        return rc;
4596}
4597
4598/**
4599 * ipr_slave_alloc - Prepare for commands to a device.
4600 * @sdev:       scsi device struct
4601 *
4602 * This function saves a pointer to the resource entry
4603 * in the scsi device struct if the device exists. We
4604 * can then use this pointer in ipr_queuecommand when
4605 * handling new commands.
4606 *
4607 * Return value:
4608 *      0 on success / -ENXIO if device does not exist
4609 **/
4610static int ipr_slave_alloc(struct scsi_device *sdev)
4611{
4612        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4613        struct ipr_resource_entry *res;
4614        unsigned long lock_flags;
4615        int rc = -ENXIO;
4616
4617        sdev->hostdata = NULL;
4618
4619        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4620
4621        res = ipr_find_sdev(sdev);
4622        if (res) {
4623                res->sdev = sdev;
4624                res->add_to_ml = 0;
4625                res->in_erp = 0;
4626                sdev->hostdata = res;
4627                if (!ipr_is_naca_model(res))
4628                        res->needs_sync_complete = 1;
4629                rc = 0;
4630                if (ipr_is_gata(res)) {
4631                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4632                        return ipr_ata_slave_alloc(sdev);
4633                }
4634        }
4635
4636        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4637
4638        return rc;
4639}
4640
4641/**
4642 * ipr_eh_host_reset - Reset the host adapter
4643 * @scsi_cmd:   scsi command struct
4644 *
4645 * Return value:
4646 *      SUCCESS / FAILED
4647 **/
4648static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
4649{
4650        struct ipr_ioa_cfg *ioa_cfg;
4651        int rc;
4652
4653        ENTER;
4654        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4655
4656        if (!ioa_cfg->in_reset_reload) {
4657                dev_err(&ioa_cfg->pdev->dev,
4658                        "Adapter being reset as a result of error recovery.\n");
4659
4660                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4661                        ioa_cfg->sdt_state = GET_DUMP;
4662        }
4663
4664        rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4665
4666        LEAVE;
4667        return rc;
4668}
4669
4670static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4671{
4672        int rc;
4673
4674        spin_lock_irq(cmd->device->host->host_lock);
4675        rc = __ipr_eh_host_reset(cmd);
4676        spin_unlock_irq(cmd->device->host->host_lock);
4677
4678        return rc;
4679}
4680
4681/**
4682 * ipr_device_reset - Reset the device
4683 * @ioa_cfg:    ioa config struct
4684 * @res:                resource entry struct
4685 *
4686 * This function issues a device reset to the affected device.
4687 * If the device is a SCSI device, a LUN reset will be sent
4688 * to the device first. If that does not work, a target reset
4689 * will be sent. If the device is a SATA device, a PHY reset will
4690 * be sent.
4691 *
4692 * Return value:
4693 *      0 on success / non-zero on failure
4694 **/
4695static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4696                            struct ipr_resource_entry *res)
4697{
4698        struct ipr_cmnd *ipr_cmd;
4699        struct ipr_ioarcb *ioarcb;
4700        struct ipr_cmd_pkt *cmd_pkt;
4701        struct ipr_ioarcb_ata_regs *regs;
4702        u32 ioasc;
4703
4704        ENTER;
4705        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4706        ioarcb = &ipr_cmd->ioarcb;
4707        cmd_pkt = &ioarcb->cmd_pkt;
4708
4709        if (ipr_cmd->ioa_cfg->sis64) {
4710                regs = &ipr_cmd->i.ata_ioadl.regs;
4711                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4712        } else
4713                regs = &ioarcb->u.add_data.u.regs;
4714
4715        ioarcb->res_handle = res->res_handle;
4716        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4717        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4718        if (ipr_is_gata(res)) {
4719                cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4720                ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4721                regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4722        }
4723
4724        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4725        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4726        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4727        if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4728                if (ipr_cmd->ioa_cfg->sis64)
4729                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4730                               sizeof(struct ipr_ioasa_gata));
4731                else
4732                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4733                               sizeof(struct ipr_ioasa_gata));
4734        }
4735
4736        LEAVE;
4737        return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4738}
4739
4740/**
4741 * ipr_sata_reset - Reset the SATA port
4742 * @link:       SATA link to reset
4743 * @classes:    class of the attached device
4744 *
4745 * This function issues a SATA phy reset to the affected ATA link.
4746 *
4747 * Return value:
4748 *      0 on success / non-zero on failure
4749 **/
4750static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4751                                unsigned long deadline)
4752{
4753        struct ipr_sata_port *sata_port = link->ap->private_data;
4754        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4755        struct ipr_resource_entry *res;
4756        unsigned long lock_flags = 0;
4757        int rc = -ENXIO;
4758
4759        ENTER;
4760        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4761        while (ioa_cfg->in_reset_reload) {
4762                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4763                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4764                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4765        }
4766
4767        res = sata_port->res;
4768        if (res) {
4769                rc = ipr_device_reset(ioa_cfg, res);
4770                *classes = res->ata_class;
4771        }
4772
4773        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4774        LEAVE;
4775        return rc;
4776}
4777
4778/**
4779 * ipr_eh_dev_reset - Reset the device
4780 * @scsi_cmd:   scsi command struct
4781 *
4782 * This function issues a device reset to the affected device.
4783 * A LUN reset will be sent to the device first. If that does
4784 * not work, a target reset will be sent.
4785 *
4786 * Return value:
4787 *      SUCCESS / FAILED
4788 **/
4789static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4790{
4791        struct ipr_cmnd *ipr_cmd;
4792        struct ipr_ioa_cfg *ioa_cfg;
4793        struct ipr_resource_entry *res;
4794        struct ata_port *ap;
4795        int rc = 0;
4796
4797        ENTER;
4798        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4799        res = scsi_cmd->device->hostdata;
4800
4801        if (!res)
4802                return FAILED;
4803
4804        /*
4805         * If we are currently going through reset/reload, return failed. This will force the
4806         * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4807         * reset to complete
4808         */
4809        if (ioa_cfg->in_reset_reload)
4810                return FAILED;
4811        if (ioa_cfg->ioa_is_dead)
4812                return FAILED;
4813
4814        list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4815                if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4816                        if (ipr_cmd->scsi_cmd)
4817                                ipr_cmd->done = ipr_scsi_eh_done;
4818                        if (ipr_cmd->qc)
4819                                ipr_cmd->done = ipr_sata_eh_done;
4820                        if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4821                                ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4822                                ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4823                        }
4824                }
4825        }
4826
4827        res->resetting_device = 1;
4828        scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4829
4830        if (ipr_is_gata(res) && res->sata_port) {
4831                ap = res->sata_port->ap;
4832                spin_unlock_irq(scsi_cmd->device->host->host_lock);
4833                ata_std_error_handler(ap);
4834                spin_lock_irq(scsi_cmd->device->host->host_lock);
4835
4836                list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4837                        if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4838                                rc = -EIO;
4839                                break;
4840                        }
4841                }
4842        } else
4843                rc = ipr_device_reset(ioa_cfg, res);
4844        res->resetting_device = 0;
4845
4846        LEAVE;
4847        return rc ? FAILED : SUCCESS;
4848}
4849
4850static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
4851{
4852        int rc;
4853
4854        spin_lock_irq(cmd->device->host->host_lock);
4855        rc = __ipr_eh_dev_reset(cmd);
4856        spin_unlock_irq(cmd->device->host->host_lock);
4857
4858        return rc;
4859}
4860
4861/**
4862 * ipr_bus_reset_done - Op done function for bus reset.
4863 * @ipr_cmd:    ipr command struct
4864 *
4865 * This function is the op done function for a bus reset
4866 *
4867 * Return value:
4868 *      none
4869 **/
4870static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4871{
4872        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4873        struct ipr_resource_entry *res;
4874
4875        ENTER;
4876        if (!ioa_cfg->sis64)
4877                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4878                        if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4879                                scsi_report_bus_reset(ioa_cfg->host, res->bus);
4880                                break;
4881                        }
4882                }
4883
4884        /*
4885         * If abort has not completed, indicate the reset has, else call the
4886         * abort's done function to wake the sleeping eh thread
4887         */
4888        if (ipr_cmd->sibling->sibling)
4889                ipr_cmd->sibling->sibling = NULL;
4890        else
4891                ipr_cmd->sibling->done(ipr_cmd->sibling);
4892
4893        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4894        LEAVE;
4895}
4896
4897/**
4898 * ipr_abort_timeout - An abort task has timed out
4899 * @ipr_cmd:    ipr command struct
4900 *
4901 * This function handles when an abort task times out. If this
4902 * happens we issue a bus reset since we have resources tied
4903 * up that must be freed before returning to the midlayer.
4904 *
4905 * Return value:
4906 *      none
4907 **/
4908static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4909{
4910        struct ipr_cmnd *reset_cmd;
4911        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4912        struct ipr_cmd_pkt *cmd_pkt;
4913        unsigned long lock_flags = 0;
4914
4915        ENTER;
4916        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4917        if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4918                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919                return;
4920        }
4921
4922        sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4923        reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4924        ipr_cmd->sibling = reset_cmd;
4925        reset_cmd->sibling = ipr_cmd;
4926        reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4927        cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4928        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4929        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4930        cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4931
4932        ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4933        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934        LEAVE;
4935}
4936
4937/**
4938 * ipr_cancel_op - Cancel specified op
4939 * @scsi_cmd:   scsi command struct
4940 *
4941 * This function cancels specified op.
4942 *
4943 * Return value:
4944 *      SUCCESS / FAILED
4945 **/
4946static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4947{
4948        struct ipr_cmnd *ipr_cmd;
4949        struct ipr_ioa_cfg *ioa_cfg;
4950        struct ipr_resource_entry *res;
4951        struct ipr_cmd_pkt *cmd_pkt;
4952        u32 ioasc, int_reg;
4953        int op_found = 0;
4954
4955        ENTER;
4956        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4957        res = scsi_cmd->device->hostdata;
4958
4959        /* If we are currently going through reset/reload, return failed.
4960         * This will force the mid-layer to call ipr_eh_host_reset,
4961         * which will then go to sleep and wait for the reset to complete
4962         */
4963        if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4964                return FAILED;
4965        if (!res)
4966                return FAILED;
4967
4968        /*
4969         * If we are aborting a timed out op, chances are that the timeout was caused
4970         * by a still not detected EEH error. In such cases, reading a register will
4971         * trigger the EEH recovery infrastructure.
4972         */
4973        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4974
4975        if (!ipr_is_gscsi(res))
4976                return FAILED;
4977
4978        list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4979                if (ipr_cmd->scsi_cmd == scsi_cmd) {
4980                        ipr_cmd->done = ipr_scsi_eh_done;
4981                        op_found = 1;
4982                        break;
4983                }
4984        }
4985
4986        if (!op_found)
4987                return SUCCESS;
4988
4989        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4990        ipr_cmd->ioarcb.res_handle = res->res_handle;
4991        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4992        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4993        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4994        ipr_cmd->u.sdev = scsi_cmd->device;
4995
4996        scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4997                    scsi_cmd->cmnd[0]);
4998        ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4999        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5000
5001        /*
5002         * If the abort task timed out and we sent a bus reset, we will get
5003         * one the following responses to the abort
5004         */
5005        if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5006                ioasc = 0;
5007                ipr_trace;
5008        }
5009
5010        list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5011        if (!ipr_is_naca_model(res))
5012                res->needs_sync_complete = 1;
5013
5014        LEAVE;
5015        return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5016}
5017
5018/**
5019 * ipr_eh_abort - Abort a single op
5020 * @scsi_cmd:   scsi command struct
5021 *
5022 * Return value:
5023 *      SUCCESS / FAILED
5024 **/
5025static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5026{
5027        unsigned long flags;
5028        int rc;
5029
5030        ENTER;
5031
5032        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5033        rc = ipr_cancel_op(scsi_cmd);
5034        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5035
5036        LEAVE;
5037        return rc;
5038}
5039
5040/**
5041 * ipr_handle_other_interrupt - Handle "other" interrupts
5042 * @ioa_cfg:    ioa config struct
5043 * @int_reg:    interrupt register
5044 *
5045 * Return value:
5046 *      IRQ_NONE / IRQ_HANDLED
5047 **/
5048static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5049                                              u32 int_reg)
5050{
5051        irqreturn_t rc = IRQ_HANDLED;
5052        u32 int_mask_reg;
5053
5054        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5055        int_reg &= ~int_mask_reg;
5056
5057        /* If an interrupt on the adapter did not occur, ignore it.
5058         * Or in the case of SIS 64, check for a stage change interrupt.
5059         */
5060        if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5061                if (ioa_cfg->sis64) {
5062                        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5063                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5064                        if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5065
5066                                /* clear stage change */
5067                                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5068                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5069                                list_del(&ioa_cfg->reset_cmd->queue);
5070                                del_timer(&ioa_cfg->reset_cmd->timer);
5071                                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5072                                return IRQ_HANDLED;
5073                        }
5074                }
5075
5076                return IRQ_NONE;
5077        }
5078
5079        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5080                /* Mask the interrupt */
5081                writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5082
5083                /* Clear the interrupt */
5084                writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5085                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5086
5087                list_del(&ioa_cfg->reset_cmd->queue);
5088                del_timer(&ioa_cfg->reset_cmd->timer);
5089                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5090        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5091                if (ioa_cfg->clear_isr) {
5092                        if (ipr_debug && printk_ratelimit())
5093                                dev_err(&ioa_cfg->pdev->dev,
5094                                        "Spurious interrupt detected. 0x%08X\n", int_reg);
5095                        writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5096                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5097                        return IRQ_NONE;
5098                }
5099        } else {
5100                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5101                        ioa_cfg->ioa_unit_checked = 1;
5102                else
5103                        dev_err(&ioa_cfg->pdev->dev,
5104                                "Permanent IOA failure. 0x%08X\n", int_reg);
5105
5106                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5107                        ioa_cfg->sdt_state = GET_DUMP;
5108
5109                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5110                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5111        }
5112
5113        return rc;
5114}
5115
5116/**
5117 * ipr_isr_eh - Interrupt service routine error handler
5118 * @ioa_cfg:    ioa config struct
5119 * @msg:        message to log
5120 *
5121 * Return value:
5122 *      none
5123 **/
5124static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5125{
5126        ioa_cfg->errors_logged++;
5127        dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5128
5129        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5130                ioa_cfg->sdt_state = GET_DUMP;
5131
5132        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5133}
5134
5135/**
5136 * ipr_isr - Interrupt service routine
5137 * @irq:        irq number
5138 * @devp:       pointer to ioa config struct
5139 *
5140 * Return value:
5141 *      IRQ_NONE / IRQ_HANDLED
5142 **/
5143static irqreturn_t ipr_isr(int irq, void *devp)
5144{
5145        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5146        unsigned long lock_flags = 0;
5147        u32 int_reg = 0;
5148        u32 ioasc;
5149        u16 cmd_index;
5150        int num_hrrq = 0;
5151        int irq_none = 0;
5152        struct ipr_cmnd *ipr_cmd, *temp;
5153        irqreturn_t rc = IRQ_NONE;
5154        LIST_HEAD(doneq);
5155
5156        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5157
5158        /* If interrupts are disabled, ignore the interrupt */
5159        if (!ioa_cfg->allow_interrupts) {
5160                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161                return IRQ_NONE;
5162        }
5163
5164        while (1) {
5165                ipr_cmd = NULL;
5166
5167                while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5168                       ioa_cfg->toggle_bit) {
5169
5170                        cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5171                                     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5172
5173                        if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5174                                ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5175                                rc = IRQ_HANDLED;
5176                                goto unlock_out;
5177                        }
5178
5179                        ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5180
5181                        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5182
5183                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5184
5185                        list_move_tail(&ipr_cmd->queue, &doneq);
5186
5187                        rc = IRQ_HANDLED;
5188
5189                        if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5190                                ioa_cfg->hrrq_curr++;
5191                        } else {
5192                                ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5193                                ioa_cfg->toggle_bit ^= 1u;
5194                        }
5195                }
5196
5197                if (ipr_cmd && !ioa_cfg->clear_isr)
5198                        break;
5199
5200                if (ipr_cmd != NULL) {
5201                        /* Clear the PCI interrupt */
5202                        num_hrrq = 0;
5203                        do {
5204                                writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5205                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5206                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5207                                        num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5208
5209                } else if (rc == IRQ_NONE && irq_none == 0) {
5210                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5211                        irq_none++;
5212                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5213                           int_reg & IPR_PCII_HRRQ_UPDATED) {
5214                        ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5215                        rc = IRQ_HANDLED;
5216                        goto unlock_out;
5217                } else
5218                        break;
5219        }
5220
5221        if (unlikely(rc == IRQ_NONE))
5222                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5223
5224unlock_out:
5225        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5226        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5227                list_del(&ipr_cmd->queue);
5228                del_timer(&ipr_cmd->timer);
5229                ipr_cmd->fast_done(ipr_cmd);
5230        }
5231
5232        return rc;
5233}
5234
5235/**
5236 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5237 * @ioa_cfg:    ioa config struct
5238 * @ipr_cmd:    ipr command struct
5239 *
5240 * Return value:
5241 *      0 on success / -1 on failure
5242 **/
5243static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5244                             struct ipr_cmnd *ipr_cmd)
5245{
5246        int i, nseg;
5247        struct scatterlist *sg;
5248        u32 length;
5249        u32 ioadl_flags = 0;
5250        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5251        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5252        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5253
5254        length = scsi_bufflen(scsi_cmd);
5255        if (!length)
5256                return 0;
5257