linux/drivers/scsi/arcmsr/arcmsr_hba.c
<<
>>
Prefs
   1/*
   2*******************************************************************************
   3**        O.S   : Linux
   4**   FILE NAME  : arcmsr_hba.c
   5**        BY    : Nick Cheng, C.L. Huang
   6**   Description: SCSI RAID Device Driver for Areca RAID Controller
   7*******************************************************************************
   8** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
   9**
  10**     Web site: www.areca.com.tw
  11**       E-mail: support@areca.com.tw
  12**
  13** This program is free software; you can redistribute it and/or modify
  14** it under the terms of the GNU General Public License version 2 as
  15** published by the Free Software Foundation.
  16** This program is distributed in the hope that it will be useful,
  17** but WITHOUT ANY WARRANTY; without even the implied warranty of
  18** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19** GNU General Public License for more details.
  20*******************************************************************************
  21** Redistribution and use in source and binary forms, with or without
  22** modification, are permitted provided that the following conditions
  23** are met:
  24** 1. Redistributions of source code must retain the above copyright
  25**    notice, this list of conditions and the following disclaimer.
  26** 2. Redistributions in binary form must reproduce the above copyright
  27**    notice, this list of conditions and the following disclaimer in the
  28**    documentation and/or other materials provided with the distribution.
  29** 3. The name of the author may not be used to endorse or promote products
  30**    derived from this software without specific prior written permission.
  31**
  32** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  33** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  34** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  35** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  36** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  37** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  39** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  41** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42*******************************************************************************
  43** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  44**     Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
  45*******************************************************************************
  46*/
  47#include <linux/module.h>
  48#include <linux/reboot.h>
  49#include <linux/spinlock.h>
  50#include <linux/pci_ids.h>
  51#include <linux/interrupt.h>
  52#include <linux/moduleparam.h>
  53#include <linux/errno.h>
  54#include <linux/types.h>
  55#include <linux/delay.h>
  56#include <linux/dma-mapping.h>
  57#include <linux/timer.h>
  58#include <linux/slab.h>
  59#include <linux/pci.h>
  60#include <linux/aer.h>
  61#include <linux/circ_buf.h>
  62#include <asm/dma.h>
  63#include <asm/io.h>
  64#include <linux/uaccess.h>
  65#include <scsi/scsi_host.h>
  66#include <scsi/scsi.h>
  67#include <scsi/scsi_cmnd.h>
  68#include <scsi/scsi_tcq.h>
  69#include <scsi/scsi_device.h>
  70#include <scsi/scsi_transport.h>
  71#include <scsi/scsicam.h>
  72#include "arcmsr.h"
  73MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
  74MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
  75MODULE_LICENSE("Dual BSD/GPL");
  76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  77
  78static int msix_enable = 1;
  79module_param(msix_enable, int, S_IRUGO);
  80MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
  81
  82static int msi_enable = 1;
  83module_param(msi_enable, int, S_IRUGO);
  84MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
  85
  86static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
  87module_param(host_can_queue, int, S_IRUGO);
  88MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
  89
  90static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
  91module_param(cmd_per_lun, int, S_IRUGO);
  92MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
  93
  94static int dma_mask_64 = 0;
  95module_param(dma_mask_64, int, S_IRUGO);
  96MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
  97
  98static int set_date_time = 0;
  99module_param(set_date_time, int, S_IRUGO);
 100MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
 101
 102static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
 103module_param(cmd_timeout, int, S_IRUGO);
 104MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
 105
 106#define ARCMSR_SLEEPTIME        10
 107#define ARCMSR_RETRYCOUNT       12
 108
 109static wait_queue_head_t wait_q;
 110static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 111                                        struct scsi_cmnd *cmd);
 112static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
 113static int arcmsr_abort(struct scsi_cmnd *);
 114static int arcmsr_bus_reset(struct scsi_cmnd *);
 115static int arcmsr_bios_param(struct scsi_device *sdev,
 116                struct block_device *bdev, sector_t capacity, int *info);
 117static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 118static int arcmsr_probe(struct pci_dev *pdev,
 119                                const struct pci_device_id *id);
 120static int __maybe_unused arcmsr_suspend(struct device *dev);
 121static int __maybe_unused arcmsr_resume(struct device *dev);
 122static void arcmsr_remove(struct pci_dev *pdev);
 123static void arcmsr_shutdown(struct pci_dev *pdev);
 124static void arcmsr_iop_init(struct AdapterControlBlock *acb);
 125static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
 126static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
 127static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
 128        u32 intmask_org);
 129static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
 130static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
 131static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
 132static void arcmsr_request_device_map(struct timer_list *t);
 133static void arcmsr_message_isr_bh_fn(struct work_struct *work);
 134static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
 135static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
 136static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
 137static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
 138static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
 139static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
 140static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
 141static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
 142static const char *arcmsr_info(struct Scsi_Host *);
 143static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
 144static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
 145static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
 146static void arcmsr_set_iop_datetime(struct timer_list *);
 147static int arcmsr_slave_config(struct scsi_device *sdev);
 148static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
 149{
 150        if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
 151                queue_depth = ARCMSR_MAX_CMD_PERLUN;
 152        return scsi_change_queue_depth(sdev, queue_depth);
 153}
 154
 155static struct scsi_host_template arcmsr_scsi_host_template = {
 156        .module                 = THIS_MODULE,
 157        .name                   = "Areca SAS/SATA RAID driver",
 158        .info                   = arcmsr_info,
 159        .queuecommand           = arcmsr_queue_command,
 160        .eh_abort_handler       = arcmsr_abort,
 161        .eh_bus_reset_handler   = arcmsr_bus_reset,
 162        .bios_param             = arcmsr_bios_param,
 163        .slave_configure        = arcmsr_slave_config,
 164        .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
 165        .can_queue              = ARCMSR_DEFAULT_OUTSTANDING_CMD,
 166        .this_id                = ARCMSR_SCSI_INITIATOR_ID,
 167        .sg_tablesize           = ARCMSR_DEFAULT_SG_ENTRIES,
 168        .max_sectors            = ARCMSR_MAX_XFER_SECTORS_C,
 169        .cmd_per_lun            = ARCMSR_DEFAULT_CMD_PERLUN,
 170        .shost_attrs            = arcmsr_host_attrs,
 171        .no_write_same          = 1,
 172};
 173
 174static struct pci_device_id arcmsr_device_id_table[] = {
 175        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
 176                .driver_data = ACB_ADAPTER_TYPE_A},
 177        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
 178                .driver_data = ACB_ADAPTER_TYPE_A},
 179        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
 180                .driver_data = ACB_ADAPTER_TYPE_A},
 181        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
 182                .driver_data = ACB_ADAPTER_TYPE_A},
 183        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
 184                .driver_data = ACB_ADAPTER_TYPE_A},
 185        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
 186                .driver_data = ACB_ADAPTER_TYPE_B},
 187        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
 188                .driver_data = ACB_ADAPTER_TYPE_B},
 189        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
 190                .driver_data = ACB_ADAPTER_TYPE_B},
 191        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
 192                .driver_data = ACB_ADAPTER_TYPE_B},
 193        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
 194                .driver_data = ACB_ADAPTER_TYPE_A},
 195        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
 196                .driver_data = ACB_ADAPTER_TYPE_D},
 197        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
 198                .driver_data = ACB_ADAPTER_TYPE_A},
 199        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
 200                .driver_data = ACB_ADAPTER_TYPE_A},
 201        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
 202                .driver_data = ACB_ADAPTER_TYPE_A},
 203        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
 204                .driver_data = ACB_ADAPTER_TYPE_A},
 205        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
 206                .driver_data = ACB_ADAPTER_TYPE_A},
 207        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
 208                .driver_data = ACB_ADAPTER_TYPE_A},
 209        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
 210                .driver_data = ACB_ADAPTER_TYPE_A},
 211        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
 212                .driver_data = ACB_ADAPTER_TYPE_A},
 213        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
 214                .driver_data = ACB_ADAPTER_TYPE_A},
 215        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
 216                .driver_data = ACB_ADAPTER_TYPE_C},
 217        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
 218                .driver_data = ACB_ADAPTER_TYPE_E},
 219        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
 220                .driver_data = ACB_ADAPTER_TYPE_F},
 221        {0, 0}, /* Terminating entry */
 222};
 223MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
 224
 225static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
 226
 227static struct pci_driver arcmsr_pci_driver = {
 228        .name                   = "arcmsr",
 229        .id_table               = arcmsr_device_id_table,
 230        .probe                  = arcmsr_probe,
 231        .remove                 = arcmsr_remove,
 232        .driver.pm              = &arcmsr_pm_ops,
 233        .shutdown               = arcmsr_shutdown,
 234};
 235/*
 236****************************************************************************
 237****************************************************************************
 238*/
 239
 240static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
 241{
 242        switch (acb->adapter_type) {
 243        case ACB_ADAPTER_TYPE_B:
 244        case ACB_ADAPTER_TYPE_D:
 245        case ACB_ADAPTER_TYPE_E:
 246        case ACB_ADAPTER_TYPE_F:
 247                dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
 248                        acb->dma_coherent2, acb->dma_coherent_handle2);
 249                break;
 250        }
 251}
 252
 253static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
 254{
 255        struct pci_dev *pdev = acb->pdev;
 256        switch (acb->adapter_type){
 257        case ACB_ADAPTER_TYPE_A:{
 258                acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
 259                if (!acb->pmuA) {
 260                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
 261                        return false;
 262                }
 263                break;
 264        }
 265        case ACB_ADAPTER_TYPE_B:{
 266                void __iomem *mem_base0, *mem_base1;
 267                mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
 268                if (!mem_base0) {
 269                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
 270                        return false;
 271                }
 272                mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
 273                if (!mem_base1) {
 274                        iounmap(mem_base0);
 275                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
 276                        return false;
 277                }
 278                acb->mem_base0 = mem_base0;
 279                acb->mem_base1 = mem_base1;
 280                break;
 281        }
 282        case ACB_ADAPTER_TYPE_C:{
 283                acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
 284                if (!acb->pmuC) {
 285                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
 286                        return false;
 287                }
 288                if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
 289                        writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
 290                        return true;
 291                }
 292                break;
 293        }
 294        case ACB_ADAPTER_TYPE_D: {
 295                void __iomem *mem_base0;
 296                unsigned long addr, range;
 297
 298                addr = (unsigned long)pci_resource_start(pdev, 0);
 299                range = pci_resource_len(pdev, 0);
 300                mem_base0 = ioremap(addr, range);
 301                if (!mem_base0) {
 302                        pr_notice("arcmsr%d: memory mapping region fail\n",
 303                                acb->host->host_no);
 304                        return false;
 305                }
 306                acb->mem_base0 = mem_base0;
 307                break;
 308                }
 309        case ACB_ADAPTER_TYPE_E: {
 310                acb->pmuE = ioremap(pci_resource_start(pdev, 1),
 311                        pci_resource_len(pdev, 1));
 312                if (!acb->pmuE) {
 313                        pr_notice("arcmsr%d: memory mapping region fail \n",
 314                                acb->host->host_no);
 315                        return false;
 316                }
 317                writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
 318                writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);       /* synchronize doorbell to 0 */
 319                acb->in_doorbell = 0;
 320                acb->out_doorbell = 0;
 321                break;
 322                }
 323        case ACB_ADAPTER_TYPE_F: {
 324                acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
 325                if (!acb->pmuF) {
 326                        pr_notice("arcmsr%d: memory mapping region fail\n",
 327                                acb->host->host_no);
 328                        return false;
 329                }
 330                writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
 331                writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
 332                acb->in_doorbell = 0;
 333                acb->out_doorbell = 0;
 334                break;
 335                }
 336        }
 337        return true;
 338}
 339
 340static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
 341{
 342        switch (acb->adapter_type) {
 343        case ACB_ADAPTER_TYPE_A:
 344                iounmap(acb->pmuA);
 345                break;
 346        case ACB_ADAPTER_TYPE_B:
 347                iounmap(acb->mem_base0);
 348                iounmap(acb->mem_base1);
 349                break;
 350        case ACB_ADAPTER_TYPE_C:
 351                iounmap(acb->pmuC);
 352                break;
 353        case ACB_ADAPTER_TYPE_D:
 354                iounmap(acb->mem_base0);
 355                break;
 356        case ACB_ADAPTER_TYPE_E:
 357                iounmap(acb->pmuE);
 358                break;
 359        case ACB_ADAPTER_TYPE_F:
 360                iounmap(acb->pmuF);
 361                break;
 362        }
 363}
 364
 365static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
 366{
 367        irqreturn_t handle_state;
 368        struct AdapterControlBlock *acb = dev_id;
 369
 370        handle_state = arcmsr_interrupt(acb);
 371        return handle_state;
 372}
 373
 374static int arcmsr_bios_param(struct scsi_device *sdev,
 375                struct block_device *bdev, sector_t capacity, int *geom)
 376{
 377        int heads, sectors, cylinders, total_capacity;
 378
 379        if (scsi_partsize(bdev, capacity, geom))
 380                return 0;
 381
 382        total_capacity = capacity;
 383        heads = 64;
 384        sectors = 32;
 385        cylinders = total_capacity / (heads * sectors);
 386        if (cylinders > 1024) {
 387                heads = 255;
 388                sectors = 63;
 389                cylinders = total_capacity / (heads * sectors);
 390        }
 391        geom[0] = heads;
 392        geom[1] = sectors;
 393        geom[2] = cylinders;
 394        return 0;
 395}
 396
 397static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
 398{
 399        struct MessageUnit_A __iomem *reg = acb->pmuA;
 400        int i;
 401
 402        for (i = 0; i < 2000; i++) {
 403                if (readl(&reg->outbound_intstatus) &
 404                                ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
 405                        writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
 406                                &reg->outbound_intstatus);
 407                        return true;
 408                }
 409                msleep(10);
 410        } /* max 20 seconds */
 411
 412        return false;
 413}
 414
 415static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
 416{
 417        struct MessageUnit_B *reg = acb->pmuB;
 418        int i;
 419
 420        for (i = 0; i < 2000; i++) {
 421                if (readl(reg->iop2drv_doorbell)
 422                        & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
 423                        writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
 424                                        reg->iop2drv_doorbell);
 425                        writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
 426                                        reg->drv2iop_doorbell);
 427                        return true;
 428                }
 429                msleep(10);
 430        } /* max 20 seconds */
 431
 432        return false;
 433}
 434
 435static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
 436{
 437        struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
 438        int i;
 439
 440        for (i = 0; i < 2000; i++) {
 441                if (readl(&phbcmu->outbound_doorbell)
 442                                & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
 443                        writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
 444                                &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
 445                        return true;
 446                }
 447                msleep(10);
 448        } /* max 20 seconds */
 449
 450        return false;
 451}
 452
 453static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
 454{
 455        struct MessageUnit_D *reg = pACB->pmuD;
 456        int i;
 457
 458        for (i = 0; i < 2000; i++) {
 459                if (readl(reg->outbound_doorbell)
 460                        & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
 461                        writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
 462                                reg->outbound_doorbell);
 463                        return true;
 464                }
 465                msleep(10);
 466        } /* max 20 seconds */
 467        return false;
 468}
 469
 470static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
 471{
 472        int i;
 473        uint32_t read_doorbell;
 474        struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
 475
 476        for (i = 0; i < 2000; i++) {
 477                read_doorbell = readl(&phbcmu->iobound_doorbell);
 478                if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
 479                        writel(0, &phbcmu->host_int_status); /*clear interrupt*/
 480                        pACB->in_doorbell = read_doorbell;
 481                        return true;
 482                }
 483                msleep(10);
 484        } /* max 20 seconds */
 485        return false;
 486}
 487
 488static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
 489{
 490        struct MessageUnit_A __iomem *reg = acb->pmuA;
 491        int retry_count = 30;
 492        writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 493        do {
 494                if (arcmsr_hbaA_wait_msgint_ready(acb))
 495                        break;
 496                else {
 497                        retry_count--;
 498                        printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
 499                        timeout, retry count down = %d \n", acb->host->host_no, retry_count);
 500                }
 501        } while (retry_count != 0);
 502}
 503
 504static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
 505{
 506        struct MessageUnit_B *reg = acb->pmuB;
 507        int retry_count = 30;
 508        writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
 509        do {
 510                if (arcmsr_hbaB_wait_msgint_ready(acb))
 511                        break;
 512                else {
 513                        retry_count--;
 514                        printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
 515                        timeout,retry count down = %d \n", acb->host->host_no, retry_count);
 516                }
 517        } while (retry_count != 0);
 518}
 519
 520static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
 521{
 522        struct MessageUnit_C __iomem *reg = pACB->pmuC;
 523        int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 524        writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 525        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
 526        do {
 527                if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
 528                        break;
 529                } else {
 530                        retry_count--;
 531                        printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
 532                        timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
 533                }
 534        } while (retry_count != 0);
 535        return;
 536}
 537
 538static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
 539{
 540        int retry_count = 15;
 541        struct MessageUnit_D *reg = pACB->pmuD;
 542
 543        writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
 544        do {
 545                if (arcmsr_hbaD_wait_msgint_ready(pACB))
 546                        break;
 547
 548                retry_count--;
 549                pr_notice("arcmsr%d: wait 'flush adapter "
 550                        "cache' timeout, retry count down = %d\n",
 551                        pACB->host->host_no, retry_count);
 552        } while (retry_count != 0);
 553}
 554
 555static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
 556{
 557        int retry_count = 30;
 558        struct MessageUnit_E __iomem *reg = pACB->pmuE;
 559
 560        writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 561        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
 562        writel(pACB->out_doorbell, &reg->iobound_doorbell);
 563        do {
 564                if (arcmsr_hbaE_wait_msgint_ready(pACB))
 565                        break;
 566                retry_count--;
 567                pr_notice("arcmsr%d: wait 'flush adapter "
 568                        "cache' timeout, retry count down = %d\n",
 569                        pACB->host->host_no, retry_count);
 570        } while (retry_count != 0);
 571}
 572
 573static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
 574{
 575        switch (acb->adapter_type) {
 576
 577        case ACB_ADAPTER_TYPE_A:
 578                arcmsr_hbaA_flush_cache(acb);
 579                break;
 580        case ACB_ADAPTER_TYPE_B:
 581                arcmsr_hbaB_flush_cache(acb);
 582                break;
 583        case ACB_ADAPTER_TYPE_C:
 584                arcmsr_hbaC_flush_cache(acb);
 585                break;
 586        case ACB_ADAPTER_TYPE_D:
 587                arcmsr_hbaD_flush_cache(acb);
 588                break;
 589        case ACB_ADAPTER_TYPE_E:
 590        case ACB_ADAPTER_TYPE_F:
 591                arcmsr_hbaE_flush_cache(acb);
 592                break;
 593        }
 594}
 595
 596static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
 597{
 598        struct MessageUnit_B *reg = acb->pmuB;
 599
 600        if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
 601                reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
 602                reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
 603                reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
 604                reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
 605        } else {
 606                reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
 607                reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
 608                reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
 609                reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
 610        }
 611        reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
 612        reg->message_rbuffer =  MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
 613        reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
 614}
 615
 616static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
 617{
 618        struct MessageUnit_D *reg = acb->pmuD;
 619
 620        reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
 621        reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
 622        reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
 623        reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
 624        reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
 625        reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
 626        reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
 627        reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
 628        reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
 629        reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
 630        reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
 631        reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
 632        reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
 633        reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
 634        reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
 635        reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
 636        reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
 637        reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
 638        reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
 639        reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
 640        reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
 641        reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
 642        reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
 643        reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
 644        reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
 645        reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
 646}
 647
 648static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
 649{
 650        dma_addr_t host_buffer_dma;
 651        struct MessageUnit_F __iomem *pmuF;
 652
 653        memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
 654        acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
 655                acb->completeQ_size, 4);
 656        acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
 657        acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
 658        memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
 659        host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
 660        pmuF = acb->pmuF;
 661        /* host buffer low address, bit0:1 all buffer active */
 662        writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
 663        /* host buffer high address */
 664        writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
 665        /* set host buffer physical address */
 666        writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
 667}
 668
 669static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
 670{
 671        bool rtn = true;
 672        void *dma_coherent;
 673        dma_addr_t dma_coherent_handle;
 674        struct pci_dev *pdev = acb->pdev;
 675
 676        switch (acb->adapter_type) {
 677        case ACB_ADAPTER_TYPE_B: {
 678                acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
 679                dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 680                        &dma_coherent_handle, GFP_KERNEL);
 681                if (!dma_coherent) {
 682                        pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
 683                        return false;
 684                }
 685                acb->dma_coherent_handle2 = dma_coherent_handle;
 686                acb->dma_coherent2 = dma_coherent;
 687                acb->pmuB = (struct MessageUnit_B *)dma_coherent;
 688                arcmsr_hbaB_assign_regAddr(acb);
 689                }
 690                break;
 691        case ACB_ADAPTER_TYPE_D: {
 692                acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
 693                dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 694                        &dma_coherent_handle, GFP_KERNEL);
 695                if (!dma_coherent) {
 696                        pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
 697                        return false;
 698                }
 699                acb->dma_coherent_handle2 = dma_coherent_handle;
 700                acb->dma_coherent2 = dma_coherent;
 701                acb->pmuD = (struct MessageUnit_D *)dma_coherent;
 702                arcmsr_hbaD_assign_regAddr(acb);
 703                }
 704                break;
 705        case ACB_ADAPTER_TYPE_E: {
 706                uint32_t completeQ_size;
 707                completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
 708                acb->ioqueue_size = roundup(completeQ_size, 32);
 709                dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 710                        &dma_coherent_handle, GFP_KERNEL);
 711                if (!dma_coherent){
 712                        pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
 713                        return false;
 714                }
 715                acb->dma_coherent_handle2 = dma_coherent_handle;
 716                acb->dma_coherent2 = dma_coherent;
 717                acb->pCompletionQ = dma_coherent;
 718                acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
 719                acb->doneq_index = 0;
 720                }
 721                break;
 722        case ACB_ADAPTER_TYPE_F: {
 723                uint32_t QueueDepth;
 724                uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
 725
 726                arcmsr_wait_firmware_ready(acb);
 727                QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
 728                acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
 729                acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
 730                dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 731                        &dma_coherent_handle, GFP_KERNEL);
 732                if (!dma_coherent) {
 733                        pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
 734                        return false;
 735                }
 736                acb->dma_coherent_handle2 = dma_coherent_handle;
 737                acb->dma_coherent2 = dma_coherent;
 738                acb->pCompletionQ = dma_coherent;
 739                acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
 740                acb->doneq_index = 0;
 741                arcmsr_hbaF_assign_regAddr(acb);
 742                }
 743                break;
 744        default:
 745                break;
 746        }
 747        return rtn;
 748}
 749
 750static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
 751{
 752        struct pci_dev *pdev = acb->pdev;
 753        void *dma_coherent;
 754        dma_addr_t dma_coherent_handle;
 755        struct CommandControlBlock *ccb_tmp;
 756        int i = 0, j = 0;
 757        unsigned long cdb_phyaddr, next_ccb_phy;
 758        unsigned long roundup_ccbsize;
 759        unsigned long max_xfer_len;
 760        unsigned long max_sg_entrys;
 761        uint32_t  firm_config_version, curr_phy_upper32;
 762
 763        for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 764                for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
 765                        acb->devstate[i][j] = ARECA_RAID_GONE;
 766
 767        max_xfer_len = ARCMSR_MAX_XFER_LEN;
 768        max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
 769        firm_config_version = acb->firm_cfg_version;
 770        if((firm_config_version & 0xFF) >= 3){
 771                max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
 772                max_sg_entrys = (max_xfer_len/4096);
 773        }
 774        acb->host->max_sectors = max_xfer_len/512;
 775        acb->host->sg_tablesize = max_sg_entrys;
 776        roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
 777        acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
 778        if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
 779                acb->uncache_size += acb->ioqueue_size;
 780        dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
 781        if(!dma_coherent){
 782                printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
 783                return -ENOMEM;
 784        }
 785        acb->dma_coherent = dma_coherent;
 786        acb->dma_coherent_handle = dma_coherent_handle;
 787        memset(dma_coherent, 0, acb->uncache_size);
 788        acb->ccbsize = roundup_ccbsize;
 789        ccb_tmp = dma_coherent;
 790        curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
 791        acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
 792        for(i = 0; i < acb->maxFreeCCB; i++){
 793                cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
 794                switch (acb->adapter_type) {
 795                case ACB_ADAPTER_TYPE_A:
 796                case ACB_ADAPTER_TYPE_B:
 797                        ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
 798                        break;
 799                case ACB_ADAPTER_TYPE_C:
 800                case ACB_ADAPTER_TYPE_D:
 801                case ACB_ADAPTER_TYPE_E:
 802                case ACB_ADAPTER_TYPE_F:
 803                        ccb_tmp->cdb_phyaddr = cdb_phyaddr;
 804                        break;
 805                }
 806                acb->pccb_pool[i] = ccb_tmp;
 807                ccb_tmp->acb = acb;
 808                ccb_tmp->smid = (u32)i << 16;
 809                INIT_LIST_HEAD(&ccb_tmp->list);
 810                next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
 811                if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
 812                        acb->maxFreeCCB = i;
 813                        acb->host->can_queue = i;
 814                        break;
 815                }
 816                else
 817                        list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
 818                ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
 819                dma_coherent_handle = next_ccb_phy;
 820        }
 821        if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
 822                acb->dma_coherent_handle2 = dma_coherent_handle;
 823                acb->dma_coherent2 = ccb_tmp;
 824        }
 825        switch (acb->adapter_type) {
 826        case ACB_ADAPTER_TYPE_B:
 827                acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
 828                arcmsr_hbaB_assign_regAddr(acb);
 829                break;
 830        case ACB_ADAPTER_TYPE_D:
 831                acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
 832                arcmsr_hbaD_assign_regAddr(acb);
 833                break;
 834        case ACB_ADAPTER_TYPE_E:
 835                acb->pCompletionQ = acb->dma_coherent2;
 836                acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
 837                acb->doneq_index = 0;
 838                break;
 839        }       
 840        return 0;
 841}
 842
 843static void arcmsr_message_isr_bh_fn(struct work_struct *work) 
 844{
 845        struct AdapterControlBlock *acb = container_of(work,
 846                struct AdapterControlBlock, arcmsr_do_message_isr_bh);
 847        char *acb_dev_map = (char *)acb->device_map;
 848        uint32_t __iomem *signature = NULL;
 849        char __iomem *devicemap = NULL;
 850        int target, lun;
 851        struct scsi_device *psdev;
 852        char diff, temp;
 853
 854        switch (acb->adapter_type) {
 855        case ACB_ADAPTER_TYPE_A: {
 856                struct MessageUnit_A __iomem *reg  = acb->pmuA;
 857
 858                signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
 859                devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
 860                break;
 861        }
 862        case ACB_ADAPTER_TYPE_B: {
 863                struct MessageUnit_B *reg  = acb->pmuB;
 864
 865                signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
 866                devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
 867                break;
 868        }
 869        case ACB_ADAPTER_TYPE_C: {
 870                struct MessageUnit_C __iomem *reg  = acb->pmuC;
 871
 872                signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
 873                devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
 874                break;
 875        }
 876        case ACB_ADAPTER_TYPE_D: {
 877                struct MessageUnit_D *reg  = acb->pmuD;
 878
 879                signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
 880                devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
 881                break;
 882        }
 883        case ACB_ADAPTER_TYPE_E: {
 884                struct MessageUnit_E __iomem *reg  = acb->pmuE;
 885
 886                signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
 887                devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
 888                break;
 889                }
 890        case ACB_ADAPTER_TYPE_F: {
 891                signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
 892                devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
 893                break;
 894                }
 895        }
 896        if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
 897                return;
 898        for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
 899                target++) {
 900                temp = readb(devicemap);
 901                diff = (*acb_dev_map) ^ temp;
 902                if (diff != 0) {
 903                        *acb_dev_map = temp;
 904                        for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
 905                                lun++) {
 906                                if ((diff & 0x01) == 1 &&
 907                                        (temp & 0x01) == 1) {
 908                                        scsi_add_device(acb->host,
 909                                                0, target, lun);
 910                                } else if ((diff & 0x01) == 1
 911                                        && (temp & 0x01) == 0) {
 912                                        psdev = scsi_device_lookup(acb->host,
 913                                                0, target, lun);
 914                                        if (psdev != NULL) {
 915                                                scsi_remove_device(psdev);
 916                                                scsi_device_put(psdev);
 917                                        }
 918                                }
 919                                temp >>= 1;
 920                                diff >>= 1;
 921                        }
 922                }
 923                devicemap++;
 924                acb_dev_map++;
 925        }
 926        acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
 927}
 928
 929static int
 930arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
 931{
 932        unsigned long flags;
 933        int nvec, i;
 934
 935        if (msix_enable == 0)
 936                goto msi_int0;
 937        nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
 938                        PCI_IRQ_MSIX);
 939        if (nvec > 0) {
 940                pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
 941                flags = 0;
 942        } else {
 943msi_int0:
 944                if (msi_enable == 1) {
 945                        nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
 946                        if (nvec == 1) {
 947                                dev_info(&pdev->dev, "msi enabled\n");
 948                                goto msi_int1;
 949                        }
 950                }
 951                nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
 952                if (nvec < 1)
 953                        return FAILED;
 954msi_int1:
 955                flags = IRQF_SHARED;
 956        }
 957
 958        acb->vector_count = nvec;
 959        for (i = 0; i < nvec; i++) {
 960                if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
 961                                flags, "arcmsr", acb)) {
 962                        pr_warn("arcmsr%d: request_irq =%d failed!\n",
 963                                acb->host->host_no, pci_irq_vector(pdev, i));
 964                        goto out_free_irq;
 965                }
 966        }
 967
 968        return SUCCESS;
 969out_free_irq:
 970        while (--i >= 0)
 971                free_irq(pci_irq_vector(pdev, i), acb);
 972        pci_free_irq_vectors(pdev);
 973        return FAILED;
 974}
 975
 976static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
 977{
 978        INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
 979        pacb->fw_flag = FW_NORMAL;
 980        timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
 981        pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
 982        add_timer(&pacb->eternal_timer);
 983}
 984
 985static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
 986{
 987        timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
 988        pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
 989        add_timer(&pacb->refresh_timer);
 990}
 991
 992static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
 993{
 994        struct pci_dev *pcidev = acb->pdev;
 995
 996        if (IS_DMA64) {
 997                if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
 998                    dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
 999                        goto    dma32;
1000                if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
1001                    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
1002                        printk("arcmsr: set DMA 64 mask failed\n");
1003                        return -ENXIO;
1004                }
1005        } else {
1006dma32:
1007                if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1008                    dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1009                    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1010                        printk("arcmsr: set DMA 32-bit mask failed\n");
1011                        return -ENXIO;
1012                }
1013        }
1014        return 0;
1015}
1016
1017static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1018{
1019        struct Scsi_Host *host;
1020        struct AdapterControlBlock *acb;
1021        uint8_t bus,dev_fun;
1022        int error;
1023        error = pci_enable_device(pdev);
1024        if(error){
1025                return -ENODEV;
1026        }
1027        host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
1028        if(!host){
1029                goto pci_disable_dev;
1030        }
1031        init_waitqueue_head(&wait_q);
1032        bus = pdev->bus->number;
1033        dev_fun = pdev->devfn;
1034        acb = (struct AdapterControlBlock *) host->hostdata;
1035        memset(acb,0,sizeof(struct AdapterControlBlock));
1036        acb->pdev = pdev;
1037        acb->adapter_type = id->driver_data;
1038        if (arcmsr_set_dma_mask(acb))
1039                goto scsi_host_release;
1040        acb->host = host;
1041        host->max_lun = ARCMSR_MAX_TARGETLUN;
1042        host->max_id = ARCMSR_MAX_TARGETID;             /*16:8*/
1043        host->max_cmd_len = 16;                         /*this is issue of 64bit LBA ,over 2T byte*/
1044        if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
1045                host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
1046        host->can_queue = host_can_queue;       /* max simultaneous cmds */
1047        if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
1048                cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
1049        host->cmd_per_lun = cmd_per_lun;
1050        host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1051        host->unique_id = (bus << 8) | dev_fun;
1052        pci_set_drvdata(pdev, host);
1053        pci_set_master(pdev);
1054        error = pci_request_regions(pdev, "arcmsr");
1055        if(error){
1056                goto scsi_host_release;
1057        }
1058        spin_lock_init(&acb->eh_lock);
1059        spin_lock_init(&acb->ccblist_lock);
1060        spin_lock_init(&acb->postq_lock);
1061        spin_lock_init(&acb->doneq_lock);
1062        spin_lock_init(&acb->rqbuffer_lock);
1063        spin_lock_init(&acb->wqbuffer_lock);
1064        acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1065                        ACB_F_MESSAGE_RQBUFFER_CLEARED |
1066                        ACB_F_MESSAGE_WQBUFFER_READED);
1067        acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1068        INIT_LIST_HEAD(&acb->ccb_free_list);
1069        error = arcmsr_remap_pciregion(acb);
1070        if(!error){
1071                goto pci_release_regs;
1072        }
1073        error = arcmsr_alloc_io_queue(acb);
1074        if (!error)
1075                goto unmap_pci_region;
1076        error = arcmsr_get_firmware_spec(acb);
1077        if(!error){
1078                goto free_hbb_mu;
1079        }
1080        if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1081                arcmsr_free_io_queue(acb);
1082        error = arcmsr_alloc_ccb_pool(acb);
1083        if(error){
1084                goto unmap_pci_region;
1085        }
1086        error = scsi_add_host(host, &pdev->dev);
1087        if(error){
1088                goto free_ccb_pool;
1089        }
1090        if (arcmsr_request_irq(pdev, acb) == FAILED)
1091                goto scsi_host_remove;
1092        arcmsr_iop_init(acb);
1093        arcmsr_init_get_devmap_timer(acb);
1094        if (set_date_time)
1095                arcmsr_init_set_datetime_timer(acb);
1096        if(arcmsr_alloc_sysfs_attr(acb))
1097                goto out_free_sysfs;
1098        scsi_scan_host(host);
1099        return 0;
1100out_free_sysfs:
1101        if (set_date_time)
1102                del_timer_sync(&acb->refresh_timer);
1103        del_timer_sync(&acb->eternal_timer);
1104        flush_work(&acb->arcmsr_do_message_isr_bh);
1105        arcmsr_stop_adapter_bgrb(acb);
1106        arcmsr_flush_adapter_cache(acb);
1107        arcmsr_free_irq(pdev, acb);
1108scsi_host_remove:
1109        scsi_remove_host(host);
1110free_ccb_pool:
1111        arcmsr_free_ccb_pool(acb);
1112        goto unmap_pci_region;
1113free_hbb_mu:
1114        arcmsr_free_io_queue(acb);
1115unmap_pci_region:
1116        arcmsr_unmap_pciregion(acb);
1117pci_release_regs:
1118        pci_release_regions(pdev);
1119scsi_host_release:
1120        scsi_host_put(host);
1121pci_disable_dev:
1122        pci_disable_device(pdev);
1123        return -ENODEV;
1124}
1125
1126static void arcmsr_free_irq(struct pci_dev *pdev,
1127                struct AdapterControlBlock *acb)
1128{
1129        int i;
1130
1131        for (i = 0; i < acb->vector_count; i++)
1132                free_irq(pci_irq_vector(pdev, i), acb);
1133        pci_free_irq_vectors(pdev);
1134}
1135
1136static int __maybe_unused arcmsr_suspend(struct device *dev)
1137{
1138        struct pci_dev *pdev = to_pci_dev(dev);
1139        struct Scsi_Host *host = pci_get_drvdata(pdev);
1140        struct AdapterControlBlock *acb =
1141                (struct AdapterControlBlock *)host->hostdata;
1142
1143        arcmsr_disable_outbound_ints(acb);
1144        arcmsr_free_irq(pdev, acb);
1145        del_timer_sync(&acb->eternal_timer);
1146        if (set_date_time)
1147                del_timer_sync(&acb->refresh_timer);
1148        flush_work(&acb->arcmsr_do_message_isr_bh);
1149        arcmsr_stop_adapter_bgrb(acb);
1150        arcmsr_flush_adapter_cache(acb);
1151        return 0;
1152}
1153
1154static int __maybe_unused arcmsr_resume(struct device *dev)
1155{
1156        struct pci_dev *pdev = to_pci_dev(dev);
1157        struct Scsi_Host *host = pci_get_drvdata(pdev);
1158        struct AdapterControlBlock *acb =
1159                (struct AdapterControlBlock *)host->hostdata;
1160
1161        if (arcmsr_set_dma_mask(acb))
1162                goto controller_unregister;
1163        if (arcmsr_request_irq(pdev, acb) == FAILED)
1164                goto controller_stop;
1165        switch (acb->adapter_type) {
1166        case ACB_ADAPTER_TYPE_B: {
1167                struct MessageUnit_B *reg = acb->pmuB;
1168                uint32_t i;
1169                for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1170                        reg->post_qbuffer[i] = 0;
1171                        reg->done_qbuffer[i] = 0;
1172                }
1173                reg->postq_index = 0;
1174                reg->doneq_index = 0;
1175                break;
1176                }
1177        case ACB_ADAPTER_TYPE_E:
1178                writel(0, &acb->pmuE->host_int_status);
1179                writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1180                acb->in_doorbell = 0;
1181                acb->out_doorbell = 0;
1182                acb->doneq_index = 0;
1183                break;
1184        case ACB_ADAPTER_TYPE_F:
1185                writel(0, &acb->pmuF->host_int_status);
1186                writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1187                acb->in_doorbell = 0;
1188                acb->out_doorbell = 0;
1189                acb->doneq_index = 0;
1190                arcmsr_hbaF_assign_regAddr(acb);
1191                break;
1192        }
1193        arcmsr_iop_init(acb);
1194        arcmsr_init_get_devmap_timer(acb);
1195        if (set_date_time)
1196                arcmsr_init_set_datetime_timer(acb);
1197        return 0;
1198controller_stop:
1199        arcmsr_stop_adapter_bgrb(acb);
1200        arcmsr_flush_adapter_cache(acb);
1201controller_unregister:
1202        scsi_remove_host(host);
1203        arcmsr_free_ccb_pool(acb);
1204        if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1205                arcmsr_free_io_queue(acb);
1206        arcmsr_unmap_pciregion(acb);
1207        scsi_host_put(host);
1208        return -ENODEV;
1209}
1210
1211static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1212{
1213        struct MessageUnit_A __iomem *reg = acb->pmuA;
1214        writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1215        if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1216                printk(KERN_NOTICE
1217                        "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1218                        , acb->host->host_no);
1219                return false;
1220        }
1221        return true;
1222}
1223
1224static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1225{
1226        struct MessageUnit_B *reg = acb->pmuB;
1227
1228        writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1229        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1230                printk(KERN_NOTICE
1231                        "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1232                        , acb->host->host_no);
1233                return false;
1234        }
1235        return true;
1236}
1237static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1238{
1239        struct MessageUnit_C __iomem *reg = pACB->pmuC;
1240        writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1241        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1242        if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1243                printk(KERN_NOTICE
1244                        "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1245                        , pACB->host->host_no);
1246                return false;
1247        }
1248        return true;
1249}
1250
1251static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1252{
1253        struct MessageUnit_D *reg = pACB->pmuD;
1254
1255        writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1256        if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1257                pr_notice("arcmsr%d: wait 'abort all outstanding "
1258                        "command' timeout\n", pACB->host->host_no);
1259                return false;
1260        }
1261        return true;
1262}
1263
1264static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1265{
1266        struct MessageUnit_E __iomem *reg = pACB->pmuE;
1267
1268        writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1269        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1270        writel(pACB->out_doorbell, &reg->iobound_doorbell);
1271        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1272                pr_notice("arcmsr%d: wait 'abort all outstanding "
1273                        "command' timeout\n", pACB->host->host_no);
1274                return false;
1275        }
1276        return true;
1277}
1278
1279static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1280{
1281        uint8_t rtnval = 0;
1282        switch (acb->adapter_type) {
1283        case ACB_ADAPTER_TYPE_A:
1284                rtnval = arcmsr_hbaA_abort_allcmd(acb);
1285                break;
1286        case ACB_ADAPTER_TYPE_B:
1287                rtnval = arcmsr_hbaB_abort_allcmd(acb);
1288                break;
1289        case ACB_ADAPTER_TYPE_C:
1290                rtnval = arcmsr_hbaC_abort_allcmd(acb);
1291                break;
1292        case ACB_ADAPTER_TYPE_D:
1293                rtnval = arcmsr_hbaD_abort_allcmd(acb);
1294                break;
1295        case ACB_ADAPTER_TYPE_E:
1296        case ACB_ADAPTER_TYPE_F:
1297                rtnval = arcmsr_hbaE_abort_allcmd(acb);
1298                break;
1299        }
1300        return rtnval;
1301}
1302
1303static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
1304{
1305        struct scsi_cmnd *pcmd = ccb->pcmd;
1306
1307        scsi_dma_unmap(pcmd);
1308}
1309
1310static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1311{
1312        struct AdapterControlBlock *acb = ccb->acb;
1313        struct scsi_cmnd *pcmd = ccb->pcmd;
1314        unsigned long flags;
1315        atomic_dec(&acb->ccboutstandingcount);
1316        arcmsr_pci_unmap_dma(ccb);
1317        ccb->startdone = ARCMSR_CCB_DONE;
1318        spin_lock_irqsave(&acb->ccblist_lock, flags);
1319        list_add_tail(&ccb->list, &acb->ccb_free_list);
1320        spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1321        pcmd->scsi_done(pcmd);
1322}
1323
1324static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1325{
1326
1327        struct scsi_cmnd *pcmd = ccb->pcmd;
1328        struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1329        pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1330        if (sensebuffer) {
1331                int sense_data_length =
1332                        sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
1333                        ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
1334                memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
1335                memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
1336                sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1337                sensebuffer->Valid = 1;
1338                pcmd->result |= (DRIVER_SENSE << 24);
1339        }
1340}
1341
1342static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1343{
1344        u32 orig_mask = 0;
1345        switch (acb->adapter_type) {    
1346        case ACB_ADAPTER_TYPE_A : {
1347                struct MessageUnit_A __iomem *reg = acb->pmuA;
1348                orig_mask = readl(&reg->outbound_intmask);
1349                writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1350                                                &reg->outbound_intmask);
1351                }
1352                break;
1353        case ACB_ADAPTER_TYPE_B : {
1354                struct MessageUnit_B *reg = acb->pmuB;
1355                orig_mask = readl(reg->iop2drv_doorbell_mask);
1356                writel(0, reg->iop2drv_doorbell_mask);
1357                }
1358                break;
1359        case ACB_ADAPTER_TYPE_C:{
1360                struct MessageUnit_C __iomem *reg = acb->pmuC;
1361                /* disable all outbound interrupt */
1362                orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
1363                writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
1364                }
1365                break;
1366        case ACB_ADAPTER_TYPE_D: {
1367                struct MessageUnit_D *reg = acb->pmuD;
1368                /* disable all outbound interrupt */
1369                writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1370                }
1371                break;
1372        case ACB_ADAPTER_TYPE_E:
1373        case ACB_ADAPTER_TYPE_F: {
1374                struct MessageUnit_E __iomem *reg = acb->pmuE;
1375                orig_mask = readl(&reg->host_int_mask);
1376                writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
1377                readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
1378                }
1379                break;
1380        }
1381        return orig_mask;
1382}
1383
1384static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 
1385                        struct CommandControlBlock *ccb, bool error)
1386{
1387        uint8_t id, lun;
1388        id = ccb->pcmd->device->id;
1389        lun = ccb->pcmd->device->lun;
1390        if (!error) {
1391                if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1392                        acb->devstate[id][lun] = ARECA_RAID_GOOD;
1393                ccb->pcmd->result = DID_OK << 16;
1394                arcmsr_ccb_complete(ccb);
1395        }else{
1396                switch (ccb->arcmsr_cdb.DeviceStatus) {
1397                case ARCMSR_DEV_SELECT_TIMEOUT: {
1398                        acb->devstate[id][lun] = ARECA_RAID_GONE;
1399                        ccb->pcmd->result = DID_NO_CONNECT << 16;
1400                        arcmsr_ccb_complete(ccb);
1401                        }
1402                        break;
1403
1404                case ARCMSR_DEV_ABORTED:
1405
1406                case ARCMSR_DEV_INIT_FAIL: {
1407                        acb->devstate[id][lun] = ARECA_RAID_GONE;
1408                        ccb->pcmd->result = DID_BAD_TARGET << 16;
1409                        arcmsr_ccb_complete(ccb);
1410                        }
1411                        break;
1412
1413                case ARCMSR_DEV_CHECK_CONDITION: {
1414                        acb->devstate[id][lun] = ARECA_RAID_GOOD;
1415                        arcmsr_report_sense_info(ccb);
1416                        arcmsr_ccb_complete(ccb);
1417                        }
1418                        break;
1419
1420                default:
1421                        printk(KERN_NOTICE
1422                                "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1423                                but got unknown DeviceStatus = 0x%x \n"
1424                                , acb->host->host_no
1425                                , id
1426                                , lun
1427                                , ccb->arcmsr_cdb.DeviceStatus);
1428                                acb->devstate[id][lun] = ARECA_RAID_GONE;
1429                                ccb->pcmd->result = DID_NO_CONNECT << 16;
1430                                arcmsr_ccb_complete(ccb);
1431                        break;
1432                }
1433        }
1434}
1435
1436static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1437{
1438        if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1439                if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1440                        struct scsi_cmnd *abortcmd = pCCB->pcmd;
1441                        if (abortcmd) {
1442                                abortcmd->result |= DID_ABORT << 16;
1443                                arcmsr_ccb_complete(pCCB);
1444                                printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1445                                acb->host->host_no, pCCB);
1446                        }
1447                        return;
1448                }
1449                printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1450                                done acb = '0x%p'"
1451                                "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1452                                " ccboutstandingcount = %d \n"
1453                                , acb->host->host_no
1454                                , acb
1455                                , pCCB
1456                                , pCCB->acb
1457                                , pCCB->startdone
1458                                , atomic_read(&acb->ccboutstandingcount));
1459                return;
1460        }
1461        arcmsr_report_ccb_state(acb, pCCB, error);
1462}
1463
1464static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1465{
1466        int i = 0;
1467        uint32_t flag_ccb;
1468        struct ARCMSR_CDB *pARCMSR_CDB;
1469        bool error;
1470        struct CommandControlBlock *pCCB;
1471        unsigned long ccb_cdb_phy;
1472
1473        switch (acb->adapter_type) {
1474
1475        case ACB_ADAPTER_TYPE_A: {
1476                struct MessageUnit_A __iomem *reg = acb->pmuA;
1477                uint32_t outbound_intstatus;
1478                outbound_intstatus = readl(&reg->outbound_intstatus) &
1479                                        acb->outbound_int_enable;
1480                /*clear and abort all outbound posted Q*/
1481                writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1482                while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
1483                                && (i++ < acb->maxOutstanding)) {
1484                        ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1485                        if (acb->cdb_phyadd_hipart)
1486                                ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1487                        pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1488                        pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1489                        error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1490                        arcmsr_drain_donequeue(acb, pCCB, error);
1491                }
1492                }
1493                break;
1494
1495        case ACB_ADAPTER_TYPE_B: {
1496                struct MessageUnit_B *reg = acb->pmuB;
1497                /*clear all outbound posted Q*/
1498                writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1499                for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1500                        flag_ccb = reg->done_qbuffer[i];
1501                        if (flag_ccb != 0) {
1502                                reg->done_qbuffer[i] = 0;
1503                                ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1504                                if (acb->cdb_phyadd_hipart)
1505                                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1506                                pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1507                                pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1508                                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1509                                arcmsr_drain_donequeue(acb, pCCB, error);
1510                        }
1511                        reg->post_qbuffer[i] = 0;
1512                }
1513                reg->doneq_index = 0;
1514                reg->postq_index = 0;
1515                }
1516                break;
1517        case ACB_ADAPTER_TYPE_C: {
1518                struct MessageUnit_C __iomem *reg = acb->pmuC;
1519                while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1520                        /*need to do*/
1521                        flag_ccb = readl(&reg->outbound_queueport_low);
1522                        ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1523                        if (acb->cdb_phyadd_hipart)
1524                                ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1525                        pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1526                        pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1527                        error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1528                        arcmsr_drain_donequeue(acb, pCCB, error);
1529                }
1530                }
1531                break;
1532        case ACB_ADAPTER_TYPE_D: {
1533                struct MessageUnit_D  *pmu = acb->pmuD;
1534                uint32_t outbound_write_pointer;
1535                uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1536                unsigned long flags;
1537
1538                residual = atomic_read(&acb->ccboutstandingcount);
1539                for (i = 0; i < residual; i++) {
1540                        spin_lock_irqsave(&acb->doneq_lock, flags);
1541                        outbound_write_pointer =
1542                                pmu->done_qbuffer[0].addressLow + 1;
1543                        doneq_index = pmu->doneq_index;
1544                        if ((doneq_index & 0xFFF) !=
1545                                (outbound_write_pointer & 0xFFF)) {
1546                                toggle = doneq_index & 0x4000;
1547                                index_stripped = (doneq_index & 0xFFF) + 1;
1548                                index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1549                                pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1550                                        ((toggle ^ 0x4000) + 1);
1551                                doneq_index = pmu->doneq_index;
1552                                spin_unlock_irqrestore(&acb->doneq_lock, flags);
1553                                addressLow = pmu->done_qbuffer[doneq_index &
1554                                        0xFFF].addressLow;
1555                                ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1556                                if (acb->cdb_phyadd_hipart)
1557                                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1558                                pARCMSR_CDB = (struct  ARCMSR_CDB *)
1559                                        (acb->vir2phy_offset + ccb_cdb_phy);
1560                                pCCB = container_of(pARCMSR_CDB,
1561                                        struct CommandControlBlock, arcmsr_cdb);
1562                                error = (addressLow &
1563                                        ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1564                                        true : false;
1565                                arcmsr_drain_donequeue(acb, pCCB, error);
1566                                writel(doneq_index,
1567                                        pmu->outboundlist_read_pointer);
1568                        } else {
1569                                spin_unlock_irqrestore(&acb->doneq_lock, flags);
1570                                mdelay(10);
1571                        }
1572                }
1573                pmu->postq_index = 0;
1574                pmu->doneq_index = 0x40FF;
1575                }
1576                break;
1577        case ACB_ADAPTER_TYPE_E:
1578                arcmsr_hbaE_postqueue_isr(acb);
1579                break;
1580        case ACB_ADAPTER_TYPE_F:
1581                arcmsr_hbaF_postqueue_isr(acb);
1582                break;
1583        }
1584}
1585
1586static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1587{
1588        char *acb_dev_map = (char *)acb->device_map;
1589        int target, lun, i;
1590        struct scsi_device *psdev;
1591        struct CommandControlBlock *ccb;
1592        char temp;
1593
1594        for (i = 0; i < acb->maxFreeCCB; i++) {
1595                ccb = acb->pccb_pool[i];
1596                if (ccb->startdone == ARCMSR_CCB_START) {
1597                        ccb->pcmd->result = DID_NO_CONNECT << 16;
1598                        arcmsr_pci_unmap_dma(ccb);
1599                        ccb->pcmd->scsi_done(ccb->pcmd);
1600                }
1601        }
1602        for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1603                temp = *acb_dev_map;
1604                if (temp) {
1605                        for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1606                                if (temp & 1) {
1607                                        psdev = scsi_device_lookup(acb->host,
1608                                                0, target, lun);
1609                                        if (psdev != NULL) {
1610                                                scsi_remove_device(psdev);
1611                                                scsi_device_put(psdev);
1612                                        }
1613                                }
1614                                temp >>= 1;
1615                        }
1616                        *acb_dev_map = 0;
1617                }
1618                acb_dev_map++;
1619        }
1620}
1621
1622static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1623{
1624        struct pci_dev *pdev;
1625        struct Scsi_Host *host;
1626
1627        host = acb->host;
1628        arcmsr_free_sysfs_attr(acb);
1629        scsi_remove_host(host);
1630        flush_work(&acb->arcmsr_do_message_isr_bh);
1631        del_timer_sync(&acb->eternal_timer);
1632        if (set_date_time)
1633                del_timer_sync(&acb->refresh_timer);
1634        pdev = acb->pdev;
1635        arcmsr_free_irq(pdev, acb);
1636        arcmsr_free_ccb_pool(acb);
1637        if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1638                arcmsr_free_io_queue(acb);
1639        arcmsr_unmap_pciregion(acb);
1640        pci_release_regions(pdev);
1641        scsi_host_put(host);
1642        pci_disable_device(pdev);
1643}
1644
1645static void arcmsr_remove(struct pci_dev *pdev)
1646{
1647        struct Scsi_Host *host = pci_get_drvdata(pdev);
1648        struct AdapterControlBlock *acb =
1649                (struct AdapterControlBlock *) host->hostdata;
1650        int poll_count = 0;
1651        uint16_t dev_id;
1652
1653        pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1654        if (dev_id == 0xffff) {
1655                acb->acb_flags &= ~ACB_F_IOP_INITED;
1656                acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1657                arcmsr_remove_scsi_devices(acb);
1658                arcmsr_free_pcidev(acb);
1659                return;
1660        }
1661        arcmsr_free_sysfs_attr(acb);
1662        scsi_remove_host(host);
1663        flush_work(&acb->arcmsr_do_message_isr_bh);
1664        del_timer_sync(&acb->eternal_timer);
1665        if (set_date_time)
1666                del_timer_sync(&acb->refresh_timer);
1667        arcmsr_disable_outbound_ints(acb);
1668        arcmsr_stop_adapter_bgrb(acb);
1669        arcmsr_flush_adapter_cache(acb);        
1670        acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1671        acb->acb_flags &= ~ACB_F_IOP_INITED;
1672
1673        for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1674                if (!atomic_read(&acb->ccboutstandingcount))
1675                        break;
1676                arcmsr_interrupt(acb);/* FIXME: need spinlock */
1677                msleep(25);
1678        }
1679
1680        if (atomic_read(&acb->ccboutstandingcount)) {
1681                int i;
1682
1683                arcmsr_abort_allcmd(acb);
1684                arcmsr_done4abort_postqueue(acb);
1685                for (i = 0; i < acb->maxFreeCCB; i++) {
1686                        struct CommandControlBlock *ccb = acb->pccb_pool[i];
1687                        if (ccb->startdone == ARCMSR_CCB_START) {
1688                                ccb->startdone = ARCMSR_CCB_ABORTED;
1689                                ccb->pcmd->result = DID_ABORT << 16;
1690                                arcmsr_ccb_complete(ccb);
1691                        }
1692                }
1693        }
1694        arcmsr_free_irq(pdev, acb);
1695        arcmsr_free_ccb_pool(acb);
1696        if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1697                arcmsr_free_io_queue(acb);
1698        arcmsr_unmap_pciregion(acb);
1699        pci_release_regions(pdev);
1700        scsi_host_put(host);
1701        pci_disable_device(pdev);
1702}
1703
1704static void arcmsr_shutdown(struct pci_dev *pdev)
1705{
1706        struct Scsi_Host *host = pci_get_drvdata(pdev);
1707        struct AdapterControlBlock *acb =
1708                (struct AdapterControlBlock *)host->hostdata;
1709        if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1710                return;
1711        del_timer_sync(&acb->eternal_timer);
1712        if (set_date_time)
1713                del_timer_sync(&acb->refresh_timer);
1714        arcmsr_disable_outbound_ints(acb);
1715        arcmsr_free_irq(pdev, acb);
1716        flush_work(&acb->arcmsr_do_message_isr_bh);
1717        arcmsr_stop_adapter_bgrb(acb);
1718        arcmsr_flush_adapter_cache(acb);
1719}
1720
1721static int arcmsr_module_init(void)
1722{
1723        int error = 0;
1724        error = pci_register_driver(&arcmsr_pci_driver);
1725        return error;
1726}
1727
1728static void arcmsr_module_exit(void)
1729{
1730        pci_unregister_driver(&arcmsr_pci_driver);
1731}
1732module_init(arcmsr_module_init);
1733module_exit(arcmsr_module_exit);
1734
1735static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1736                                                u32 intmask_org)
1737{
1738        u32 mask;
1739        switch (acb->adapter_type) {
1740
1741        case ACB_ADAPTER_TYPE_A: {
1742                struct MessageUnit_A __iomem *reg = acb->pmuA;
1743                mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1744                             ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1745                             ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1746                writel(mask, &reg->outbound_intmask);
1747                acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1748                }
1749                break;
1750
1751        case ACB_ADAPTER_TYPE_B: {
1752                struct MessageUnit_B *reg = acb->pmuB;
1753                mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1754                        ARCMSR_IOP2DRV_DATA_READ_OK |
1755                        ARCMSR_IOP2DRV_CDB_DONE |
1756                        ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1757                writel(mask, reg->iop2drv_doorbell_mask);
1758                acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1759                }
1760                break;
1761        case ACB_ADAPTER_TYPE_C: {
1762                struct MessageUnit_C __iomem *reg = acb->pmuC;
1763                mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1764                writel(intmask_org & mask, &reg->host_int_mask);
1765                acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1766                }
1767                break;
1768        case ACB_ADAPTER_TYPE_D: {
1769                struct MessageUnit_D *reg = acb->pmuD;
1770
1771                mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1772                writel(intmask_org | mask, reg->pcief0_int_enable);
1773                break;
1774                }
1775        case ACB_ADAPTER_TYPE_E:
1776        case ACB_ADAPTER_TYPE_F: {
1777                struct MessageUnit_E __iomem *reg = acb->pmuE;
1778
1779                mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1780                writel(intmask_org & mask, &reg->host_int_mask);
1781                break;
1782                }
1783        }
1784}
1785
1786static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1787        struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1788{
1789        struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1790        int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1791        __le32 address_lo, address_hi;
1792        int arccdbsize = 0x30;
1793        __le32 length = 0;
1794        int i;
1795        struct scatterlist *sg;
1796        int nseg;
1797        ccb->pcmd = pcmd;
1798        memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1799        arcmsr_cdb->TargetID = pcmd->device->id;
1800        arcmsr_cdb->LUN = pcmd->device->lun;
1801        arcmsr_cdb->Function = 1;
1802        arcmsr_cdb->msgContext = 0;
1803        memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1804
1805        nseg = scsi_dma_map(pcmd);
1806        if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1807                return FAILED;
1808        scsi_for_each_sg(pcmd, sg, nseg, i) {
1809                /* Get the physical address of the current data pointer */
1810                length = cpu_to_le32(sg_dma_len(sg));
1811                address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1812                address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1813                if (address_hi == 0) {
1814                        struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1815
1816                        pdma_sg->address = address_lo;
1817                        pdma_sg->length = length;
1818                        psge += sizeof (struct SG32ENTRY);
1819                        arccdbsize += sizeof (struct SG32ENTRY);
1820                } else {
1821                        struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1822
1823                        pdma_sg->addresshigh = address_hi;
1824                        pdma_sg->address = address_lo;
1825                        pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1826                        psge += sizeof (struct SG64ENTRY);
1827                        arccdbsize += sizeof (struct SG64ENTRY);
1828                }
1829        }
1830        arcmsr_cdb->sgcount = (uint8_t)nseg;
1831        arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1832        arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1833        if ( arccdbsize > 256)
1834                arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1835        if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1836                arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1837        ccb->arc_cdb_size = arccdbsize;
1838        return SUCCESS;
1839}
1840
1841static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1842{
1843        uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1844        struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1845        atomic_inc(&acb->ccboutstandingcount);
1846        ccb->startdone = ARCMSR_CCB_START;
1847        switch (acb->adapter_type) {
1848        case ACB_ADAPTER_TYPE_A: {
1849                struct MessageUnit_A __iomem *reg = acb->pmuA;
1850
1851                if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1852                        writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1853                        &reg->inbound_queueport);
1854                else
1855                        writel(cdb_phyaddr, &reg->inbound_queueport);
1856                break;
1857        }
1858
1859        case ACB_ADAPTER_TYPE_B: {
1860                struct MessageUnit_B *reg = acb->pmuB;
1861                uint32_t ending_index, index = reg->postq_index;
1862
1863                ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1864                reg->post_qbuffer[ending_index] = 0;
1865                if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1866                        reg->post_qbuffer[index] =
1867                                cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1868                } else {
1869                        reg->post_qbuffer[index] = cdb_phyaddr;
1870                }
1871                index++;
1872                index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1873                reg->postq_index = index;
1874                writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1875                }
1876                break;
1877        case ACB_ADAPTER_TYPE_C: {
1878                struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1879                uint32_t ccb_post_stamp, arc_cdb_size;
1880
1881                arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1882                ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1883                writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1884                writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1885                }
1886                break;
1887        case ACB_ADAPTER_TYPE_D: {
1888                struct MessageUnit_D  *pmu = acb->pmuD;
1889                u16 index_stripped;
1890                u16 postq_index, toggle;
1891                unsigned long flags;
1892                struct InBound_SRB *pinbound_srb;
1893
1894                spin_lock_irqsave(&acb->postq_lock, flags);
1895                postq_index = pmu->postq_index;
1896                pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1897                pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1898                pinbound_srb->addressLow = cdb_phyaddr;
1899                pinbound_srb->length = ccb->arc_cdb_size >> 2;
1900                arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1901                toggle = postq_index & 0x4000;
1902                index_stripped = postq_index + 1;
1903                index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1904                pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1905                        (toggle ^ 0x4000);
1906                writel(postq_index, pmu->inboundlist_write_pointer);
1907                spin_unlock_irqrestore(&acb->postq_lock, flags);
1908                break;
1909                }
1910        case ACB_ADAPTER_TYPE_E: {
1911                struct MessageUnit_E __iomem *pmu = acb->pmuE;
1912                u32 ccb_post_stamp, arc_cdb_size;
1913
1914                arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1915                ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1916                writel(0, &pmu->inbound_queueport_high);
1917                writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1918                break;
1919                }
1920        case ACB_ADAPTER_TYPE_F: {
1921                struct MessageUnit_F __iomem *pmu = acb->pmuF;
1922                u32 ccb_post_stamp, arc_cdb_size;
1923
1924                if (ccb->arc_cdb_size <= 0x300)
1925                        arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1926                else {
1927                        arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1928                        if (arc_cdb_size > 0xF)
1929                                arc_cdb_size = 0xF;
1930                        arc_cdb_size = (arc_cdb_size << 1) | 1;
1931                }
1932                ccb_post_stamp = (ccb->smid | arc_cdb_size);
1933                writel(0, &pmu->inbound_queueport_high);
1934                writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1935                break;
1936                }
1937        }
1938}
1939
1940static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1941{
1942        struct MessageUnit_A __iomem *reg = acb->pmuA;
1943        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1944        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1945        if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1946                printk(KERN_NOTICE
1947                        "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1948                        , acb->host->host_no);
1949        }
1950}
1951
1952static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1953{
1954        struct MessageUnit_B *reg = acb->pmuB;
1955        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1956        writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1957
1958        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1959                printk(KERN_NOTICE
1960                        "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1961                        , acb->host->host_no);
1962        }
1963}
1964
1965static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1966{
1967        struct MessageUnit_C __iomem *reg = pACB->pmuC;
1968        pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1969        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1970        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1971        if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1972                printk(KERN_NOTICE
1973                        "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1974                        , pACB->host->host_no);
1975        }
1976        return;
1977}
1978
1979static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1980{
1981        struct MessageUnit_D *reg = pACB->pmuD;
1982
1983        pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1984        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1985        if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1986                pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1987                        "timeout\n", pACB->host->host_no);
1988}
1989
1990static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
1991{
1992        struct MessageUnit_E __iomem *reg = pACB->pmuE;
1993
1994        pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1995        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1996        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1997        writel(pACB->out_doorbell, &reg->iobound_doorbell);
1998        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1999                pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2000                        "timeout\n", pACB->host->host_no);
2001        }
2002}
2003
2004static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2005{
2006        switch (acb->adapter_type) {
2007        case ACB_ADAPTER_TYPE_A:
2008                arcmsr_hbaA_stop_bgrb(acb);
2009                break;
2010        case ACB_ADAPTER_TYPE_B:
2011                arcmsr_hbaB_stop_bgrb(acb);
2012                break;
2013        case ACB_ADAPTER_TYPE_C:
2014                arcmsr_hbaC_stop_bgrb(acb);
2015                break;
2016        case ACB_ADAPTER_TYPE_D:
2017                arcmsr_hbaD_stop_bgrb(acb);
2018                break;
2019        case ACB_ADAPTER_TYPE_E:
2020        case ACB_ADAPTER_TYPE_F:
2021                arcmsr_hbaE_stop_bgrb(acb);
2022                break;
2023        }
2024}
2025
2026static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2027{
2028        dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2029}
2030
2031static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2032{
2033        switch (acb->adapter_type) {
2034        case ACB_ADAPTER_TYPE_A: {
2035                struct MessageUnit_A __iomem *reg = acb->pmuA;
2036                writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2037                }
2038                break;
2039        case ACB_ADAPTER_TYPE_B: {
2040                struct MessageUnit_B *reg = acb->pmuB;
2041                writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2042                }
2043                break;
2044        case ACB_ADAPTER_TYPE_C: {
2045                struct MessageUnit_C __iomem *reg = acb->pmuC;
2046
2047                writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
2048                }
2049                break;
2050        case ACB_ADAPTER_TYPE_D: {
2051                struct MessageUnit_D *reg = acb->pmuD;
2052                writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
2053                        reg->inbound_doorbell);
2054                }
2055                break;
2056        case ACB_ADAPTER_TYPE_E:
2057        case ACB_ADAPTER_TYPE_F: {
2058                struct MessageUnit_E __iomem *reg = acb->pmuE;
2059                acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2060                writel(acb->out_doorbell, &reg->iobound_doorbell);
2061                }
2062                break;
2063        }
2064}
2065
2066static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2067{
2068        switch (acb->adapter_type) {
2069        case ACB_ADAPTER_TYPE_A: {
2070                struct MessageUnit_A __iomem *reg = acb->pmuA;
2071                /*
2072                ** push inbound doorbell tell iop, driver data write ok
2073                ** and wait reply on next hwinterrupt for next Qbuffer post
2074                */
2075                writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
2076                }
2077                break;
2078
2079        case ACB_ADAPTER_TYPE_B: {
2080                struct MessageUnit_B *reg = acb->pmuB;
2081                /*
2082                ** push inbound doorbell tell iop, driver data write ok
2083                ** and wait reply on next hwinterrupt for next Qbuffer post
2084                */
2085                writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2086                }
2087                break;
2088        case ACB_ADAPTER_TYPE_C: {
2089                struct MessageUnit_C __iomem *reg = acb->pmuC;
2090                /*
2091                ** push inbound doorbell tell iop, driver data write ok
2092                ** and wait reply on next hwinterrupt for next Qbuffer post
2093                */
2094                writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
2095                }
2096                break;
2097        case ACB_ADAPTER_TYPE_D: {
2098                struct MessageUnit_D *reg = acb->pmuD;
2099                writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2100                        reg->inbound_doorbell);
2101                }
2102                break;
2103        case ACB_ADAPTER_TYPE_E:
2104        case ACB_ADAPTER_TYPE_F: {
2105                struct MessageUnit_E __iomem *reg = acb->pmuE;
2106                acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2107                writel(acb->out_doorbell, &reg->iobound_doorbell);
2108                }
2109                break;
2110        }
2111}
2112
2113struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2114{
2115        struct QBUFFER __iomem *qbuffer = NULL;
2116        switch (acb->adapter_type) {
2117
2118        case ACB_ADAPTER_TYPE_A: {
2119                struct MessageUnit_A __iomem *reg = acb->pmuA;
2120                qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
2121                }
2122                break;
2123        case ACB_ADAPTER_TYPE_B: {
2124                struct MessageUnit_B *reg = acb->pmuB;
2125                qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2126                }
2127                break;
2128        case ACB_ADAPTER_TYPE_C: {
2129                struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2130                qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2131                }
2132                break;
2133        case ACB_ADAPTER_TYPE_D: {
2134                struct MessageUnit_D *reg = acb->pmuD;
2135                qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2136                }
2137                break;
2138        case ACB_ADAPTER_TYPE_E: {
2139                struct MessageUnit_E __iomem *reg = acb->pmuE;
2140                qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
2141                }
2142                break;
2143        case ACB_ADAPTER_TYPE_F: {
2144                qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2145                }
2146                break;
2147        }
2148        return qbuffer;
2149}
2150
2151static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2152{
2153        struct QBUFFER __iomem *pqbuffer = NULL;
2154        switch (acb->adapter_type) {
2155
2156        case ACB_ADAPTER_TYPE_A: {
2157                struct MessageUnit_A __iomem *reg = acb->pmuA;
2158                pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
2159                }
2160                break;
2161        case ACB_ADAPTER_TYPE_B: {
2162                struct MessageUnit_B  *reg = acb->pmuB;
2163                pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2164                }
2165                break;
2166        case ACB_ADAPTER_TYPE_C: {
2167                struct MessageUnit_C __iomem *reg = acb->pmuC;
2168                pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
2169                }
2170                break;
2171        case ACB_ADAPTER_TYPE_D: {
2172                struct MessageUnit_D *reg = acb->pmuD;
2173                pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2174                }
2175                break;
2176        case ACB_ADAPTER_TYPE_E: {
2177                struct MessageUnit_E __iomem *reg = acb->pmuE;
2178                pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
2179                }
2180                break;
2181        case ACB_ADAPTER_TYPE_F:
2182                pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2183                break;
2184        }
2185        return pqbuffer;
2186}
2187
2188static uint32_t
2189arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2190                struct QBUFFER __iomem *prbuffer)
2191{
2192        uint8_t *pQbuffer;
2193        uint8_t *buf1 = NULL;
2194        uint32_t __iomem *iop_data;
2195        uint32_t iop_len, data_len, *buf2 = NULL;
2196
2197        iop_data = (uint32_t __iomem *)prbuffer->data;
2198        iop_len = readl(&prbuffer->data_len);
2199        if (iop_len > 0) {
2200                buf1 = kmalloc(128, GFP_ATOMIC);
2201                buf2 = (uint32_t *)buf1;
2202                if (buf1 == NULL)
2203                        return 0;
2204                data_len = iop_len;
2205                while (data_len >= 4) {
2206                        *buf2++ = readl(iop_data);
2207                        iop_data++;
2208                        data_len -= 4;
2209                }
2210                if (data_len)
2211                        *buf2 = readl(iop_data);
2212                buf2 = (uint32_t *)buf1;
2213        }
2214        while (iop_len > 0) {
2215                pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2216                *pQbuffer = *buf1;
2217                acb->rqbuf_putIndex++;
2218                /* if last, index number set it to 0 */
2219                acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2220                buf1++;
2221                iop_len--;
2222        }
2223        kfree(buf2);
2224        /* let IOP know data has been read */
2225        arcmsr_iop_message_read(acb);
2226        return 1;
2227}
2228
2229uint32_t
2230arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2231        struct QBUFFER __iomem *prbuffer) {
2232
2233        uint8_t *pQbuffer;
2234        uint8_t __iomem *iop_data;
2235        uint32_t iop_len;
2236
2237        if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2238                return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2239        iop_data = (uint8_t __iomem *)prbuffer->data;
2240        iop_len = readl(&prbuffer->data_len);
2241        while (iop_len > 0) {
2242                pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2243                *pQbuffer = readb(iop_data);
2244                acb->rqbuf_putIndex++;
2245                acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2246                iop_data++;
2247                iop_len--;
2248        }
2249        arcmsr_iop_message_read(acb);
2250        return 1;
2251}
2252
2253static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2254{
2255        unsigned long flags;
2256        struct QBUFFER __iomem  *prbuffer;
2257        int32_t buf_empty_len;
2258
2259        spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2260        prbuffer = arcmsr_get_iop_rqbuffer(acb);
2261        buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
2262                (ARCMSR_MAX_QBUFFER - 1);
2263        if (buf_empty_len >= readl(&prbuffer->data_len)) {
2264                if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2265                        acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2266        } else
2267                acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2268        spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2269}
2270
2271static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2272{
2273        uint8_t *pQbuffer;
2274        struct QBUFFER __iomem *pwbuffer;
2275        uint8_t *buf1 = NULL;
2276        uint32_t __iomem *iop_data;
2277        uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2278
2279        if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2280                buf1 = kmalloc(128, GFP_ATOMIC);
2281                buf2 = (uint32_t *)buf1;
2282                if (buf1 == NULL)
2283                        return;
2284
2285                acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2286                pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2287                iop_data = (uint32_t __iomem *)pwbuffer->data;
2288                while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2289                        && (allxfer_len < 124)) {
2290                        pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2291                        *buf1 = *pQbuffer;
2292                        acb->wqbuf_getIndex++;
2293                        acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2294                        buf1++;
2295                        allxfer_len++;
2296                }
2297                data_len = allxfer_len;
2298                buf1 = (uint8_t *)buf2;
2299                while (data_len >= 4) {
2300                        data = *buf2++;
2301                        writel(data, iop_data);
2302                        iop_data++;
2303                        data_len -= 4;
2304                }
2305                if (data_len) {
2306                        data = *buf2;
2307                        writel(data, iop_data);
2308                }
2309                writel(allxfer_len, &pwbuffer->data_len);
2310                kfree(buf1);
2311                arcmsr_iop_message_wrote(acb);
2312        }
2313}
2314
2315void
2316arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2317{
2318        uint8_t *pQbuffer;
2319        struct QBUFFER __iomem *pwbuffer;
2320        uint8_t __iomem *iop_data;
2321        int32_t allxfer_len = 0;
2322
2323        if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2324                arcmsr_write_ioctldata2iop_in_DWORD(acb);
2325                return;
2326        }
2327        if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2328                acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2329                pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2330                iop_data = (uint8_t __iomem *)pwbuffer->data;
2331                while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2332                        && (allxfer_len < 124)) {
2333                        pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2334                        writeb(*pQbuffer, iop_data);
2335                        acb->wqbuf_getIndex++;
2336                        acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2337                        iop_data++;
2338                        allxfer_len++;
2339                }
2340                writel(allxfer_len, &pwbuffer->data_len);
2341                arcmsr_iop_message_wrote(acb);
2342        }
2343}
2344
2345static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2346{
2347        unsigned long flags;
2348
2349        spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2350        acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2351        if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2352                arcmsr_write_ioctldata2iop(acb);
2353        if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2354                acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2355        spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2356}
2357
2358static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2359{
2360        uint32_t outbound_doorbell;
2361        struct MessageUnit_A __iomem *reg = acb->pmuA;
2362        outbound_doorbell = readl(&reg->outbound_doorbell);
2363        do {
2364                writel(outbound_doorbell, &reg->outbound_doorbell);
2365                if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2366                        arcmsr_iop2drv_data_wrote_handle(acb);
2367                if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2368                        arcmsr_iop2drv_data_read_handle(acb);
2369                outbound_doorbell = readl(&reg->outbound_doorbell);
2370        } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2371                | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2372}
2373static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2374{
2375        uint32_t outbound_doorbell;
2376        struct MessageUnit_C __iomem *reg = pACB->pmuC;
2377        /*
2378        *******************************************************************
2379        **  Maybe here we need to check wrqbuffer_lock is lock or not
2380        **  DOORBELL: din! don!
2381        **  check if there are any mail need to pack from firmware
2382        *******************************************************************
2383        */
2384        outbound_doorbell = readl(&reg->outbound_doorbell);
2385        do {
2386                writel(outbound_doorbell, &reg->outbound_doorbell_clear);
2387                readl(&reg->outbound_doorbell_clear);
2388                if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2389                        arcmsr_iop2drv_data_wrote_handle(pACB);
2390                if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2391                        arcmsr_iop2drv_data_read_handle(pACB);
2392                if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2393                        arcmsr_hbaC_message_isr(pACB);
2394                outbound_doorbell = readl(&reg->outbound_doorbell);
2395        } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2396                | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2397                | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2398}
2399
2400static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2401{
2402        uint32_t outbound_doorbell;
2403        struct MessageUnit_D  *pmu = pACB->pmuD;
2404
2405        outbound_doorbell = readl(pmu->outbound_doorbell);
2406        do {
2407                writel(outbound_doorbell, pmu->outbound_doorbell);
2408                if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2409                        arcmsr_hbaD_message_isr(pACB);
2410                if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2411                        arcmsr_iop2drv_data_wrote_handle(pACB);
2412                if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2413                        arcmsr_iop2drv_data_read_handle(pACB);
2414                outbound_doorbell = readl(pmu->outbound_doorbell);
2415        } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2416                | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2417                | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2418}
2419
2420static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2421{
2422        uint32_t outbound_doorbell, in_doorbell, tmp, i;
2423        struct MessageUnit_E __iomem *reg = pACB->pmuE;
2424
2425        if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2426                for (i = 0; i < 5; i++) {
2427                        in_doorbell = readl(&reg->iobound_doorbell);
2428                        if (in_doorbell != 0)
2429                                break;
2430                }
2431        } else
2432                in_doorbell = readl(&reg->iobound_doorbell);
2433        outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2434        do {
2435                writel(0, &reg->host_int_status); /* clear interrupt */
2436                if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2437                        arcmsr_iop2drv_data_wrote_handle(pACB);
2438                }
2439                if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2440                        arcmsr_iop2drv_data_read_handle(pACB);
2441                }
2442                if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2443                        arcmsr_hbaE_message_isr(pACB);
2444                }
2445                tmp = in_doorbell;
2446                in_doorbell = readl(&reg->iobound_doorbell);
2447                outbound_doorbell = tmp ^ in_doorbell;
2448        } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2449                | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2450                | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2451        pACB->in_doorbell = in_doorbell;
2452}
2453
2454static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2455{
2456        uint32_t flag_ccb;
2457        struct MessageUnit_A __iomem *reg = acb->pmuA;
2458        struct ARCMSR_CDB *pARCMSR_CDB;
2459        struct CommandControlBlock *pCCB;
2460        bool error;
2461        unsigned long cdb_phy_addr;
2462
2463        while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
2464                cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2465                if (acb->cdb_phyadd_hipart)
2466                        cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2467                pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2468                pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2469                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2470                arcmsr_drain_donequeue(acb, pCCB, error);
2471        }
2472}
2473static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2474{
2475        uint32_t index;
2476        uint32_t flag_ccb;
2477        struct MessageUnit_B *reg = acb->pmuB;
2478        struct ARCMSR_CDB *pARCMSR_CDB;
2479        struct CommandControlBlock *pCCB;
2480        bool error;
2481        unsigned long cdb_phy_addr;
2482
2483        index = reg->doneq_index;
2484        while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2485                cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2486                if (acb->cdb_phyadd_hipart)
2487                        cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2488                pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2489                pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2490                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2491                arcmsr_drain_donequeue(acb, pCCB, error);
2492                reg->done_qbuffer[index] = 0;
2493                index++;
2494                index %= ARCMSR_MAX_HBB_POSTQUEUE;
2495                reg->doneq_index = index;
2496        }
2497}
2498
2499static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2500{
2501        struct MessageUnit_C __iomem *phbcmu;
2502        struct ARCMSR_CDB *arcmsr_cdb;
2503        struct CommandControlBlock *ccb;
2504        uint32_t flag_ccb, throttling = 0;
2505        unsigned long ccb_cdb_phy;
2506        int error;
2507
2508        phbcmu = acb->pmuC;
2509        /* areca cdb command done */
2510        /* Use correct offset and size for syncing */
2511
2512        while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2513                        0xFFFFFFFF) {
2514                ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2515                if (acb->cdb_phyadd_hipart)
2516                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2517                arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2518                        + ccb_cdb_phy);
2519                ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2520                        arcmsr_cdb);
2521                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2522                        ? true : false;
2523                /* check if command done with no error */
2524                arcmsr_drain_donequeue(acb, ccb, error);
2525                throttling++;
2526                if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2527                        writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2528                                &phbcmu->inbound_doorbell);
2529                        throttling = 0;
2530                }
2531        }
2532}
2533
2534static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2535{
2536        u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2537        uint32_t addressLow;
2538        int error;
2539        struct MessageUnit_D  *pmu;
2540        struct ARCMSR_CDB *arcmsr_cdb;
2541        struct CommandControlBlock *ccb;
2542        unsigned long flags, ccb_cdb_phy;
2543
2544        spin_lock_irqsave(&acb->doneq_lock, flags);
2545        pmu = acb->pmuD;
2546        outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2547        doneq_index = pmu->doneq_index;
2548        if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2549                do {
2550                        toggle = doneq_index & 0x4000;
2551                        index_stripped = (doneq_index & 0xFFF) + 1;
2552                        index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2553                        pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2554                                ((toggle ^ 0x4000) + 1);
2555                        doneq_index = pmu->doneq_index;
2556                        addressLow = pmu->done_qbuffer[doneq_index &
2557                                0xFFF].addressLow;
2558                        ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2559                        if (acb->cdb_phyadd_hipart)
2560                                ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2561                        arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2562                                + ccb_cdb_phy);
2563                        ccb = container_of(arcmsr_cdb,
2564                                struct CommandControlBlock, arcmsr_cdb);
2565                        error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2566                                ? true : false;
2567                        arcmsr_drain_donequeue(acb, ccb, error);
2568                        writel(doneq_index, pmu->outboundlist_read_pointer);
2569                } while ((doneq_index & 0xFFF) !=
2570                        (outbound_write_pointer & 0xFFF));
2571        }
2572        writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2573                pmu->outboundlist_interrupt_cause);
2574        readl(pmu->outboundlist_interrupt_cause);
2575        spin_unlock_irqrestore(&acb->doneq_lock, flags);
2576}
2577
2578static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2579{
2580        uint32_t doneq_index;
2581        uint16_t cmdSMID;
2582        int error;
2583        struct MessageUnit_E __iomem *pmu;
2584        struct CommandControlBlock *ccb;
2585        unsigned long flags;
2586
2587        spin_lock_irqsave(&acb->doneq_lock, flags);
2588        doneq_index = acb->doneq_index;
2589        pmu = acb->pmuE;
2590        while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2591                cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2592                ccb = acb->pccb_pool[cmdSMID];
2593                error = (acb->pCompletionQ[doneq_index].cmdFlag
2594                        & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2595                arcmsr_drain_donequeue(acb, ccb, error);
2596                doneq_index++;
2597                if (doneq_index >= acb->completionQ_entry)
2598                        doneq_index = 0;
2599        }
2600        acb->doneq_index = doneq_index;
2601        writel(doneq_index, &pmu->reply_post_consumer_index);
2602        spin_unlock_irqrestore(&acb->doneq_lock, flags);
2603}
2604
2605static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2606{
2607        uint32_t doneq_index;
2608        uint16_t cmdSMID;
2609        int error;
2610        struct MessageUnit_F __iomem *phbcmu;
2611        struct CommandControlBlock *ccb;
2612        unsigned long flags;
2613
2614        spin_lock_irqsave(&acb->doneq_lock, flags);
2615        doneq_index = acb->doneq_index;
2616        phbcmu = acb->pmuF;
2617        while (1) {
2618                cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2619                if (cmdSMID == 0xffff)
2620                        break;
2621                ccb = acb->pccb_pool[cmdSMID];
2622                error = (acb->pCompletionQ[doneq_index].cmdFlag &
2623                        ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2624                arcmsr_drain_donequeue(acb, ccb, error);
2625                acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2626                doneq_index++;
2627                if (doneq_index >= acb->completionQ_entry)
2628                        doneq_index = 0;
2629        }
2630        acb->doneq_index = doneq_index;
2631        writel(doneq_index, &phbcmu->reply_post_consumer_index);
2632        spin_unlock_irqrestore(&acb->doneq_lock, flags);
2633}
2634
2635/*
2636**********************************************************************************
2637** Handle a message interrupt
2638**
2639** The only message interrupt we expect is in response to a query for the current adapter config.  
2640** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2641**********************************************************************************
2642*/
2643static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2644{
2645        struct MessageUnit_A __iomem *reg  = acb->pmuA;
2646        /*clear interrupt and message state*/
2647        writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
2648        if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2649                schedule_work(&acb->arcmsr_do_message_isr_bh);
2650}
2651static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2652{
2653        struct MessageUnit_B *reg  = acb->pmuB;
2654
2655        /*clear interrupt and message state*/
2656        writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2657        if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2658                schedule_work(&acb->arcmsr_do_message_isr_bh);
2659}
2660/*
2661**********************************************************************************
2662** Handle a message interrupt
2663**
2664** The only message interrupt we expect is in response to a query for the
2665** current adapter config.
2666** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2667**********************************************************************************
2668*/
2669static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2670{
2671        struct MessageUnit_C __iomem *reg  = acb->pmuC;
2672        /*clear interrupt and message state*/
2673        writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
2674        if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2675                schedule_work(&acb->arcmsr_do_message_isr_bh);
2676}
2677
2678static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2679{
2680        struct MessageUnit_D *reg  = acb->pmuD;
2681
2682        writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2683        readl(reg->outbound_doorbell);
2684        if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2685                schedule_work(&acb->arcmsr_do_message_isr_bh);
2686}
2687
2688static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2689{
2690        struct MessageUnit_E __iomem *reg  = acb->pmuE;
2691
2692        writel(0, &reg->host_int_status);
2693        if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2694                schedule_work(&acb->arcmsr_do_message_isr_bh);
2695}
2696
2697static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2698{
2699        uint32_t outbound_intstatus;
2700        struct MessageUnit_A __iomem *reg = acb->pmuA;
2701        outbound_intstatus = readl(&reg->outbound_intstatus) &
2702                acb->outbound_int_enable;
2703        if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2704                return IRQ_NONE;
2705        do {
2706                writel(outbound_intstatus, &reg->outbound_intstatus);
2707                if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2708                        arcmsr_hbaA_doorbell_isr(acb);
2709                if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2710                        arcmsr_hbaA_postqueue_isr(acb);
2711                if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2712                        arcmsr_hbaA_message_isr(acb);
2713                outbound_intstatus = readl(&reg->outbound_intstatus) &
2714                        acb->outbound_int_enable;
2715        } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2716                | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2717                | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2718        return IRQ_HANDLED;
2719}
2720
2721static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2722{
2723        uint32_t outbound_doorbell;
2724        struct MessageUnit_B *reg = acb->pmuB;
2725        outbound_doorbell = readl(reg->iop2drv_doorbell) &
2726                                acb->outbound_int_enable;
2727        if (!outbound_doorbell)
2728                return IRQ_NONE;
2729        do {
2730                writel(~outbound_doorbell, reg->iop2drv_doorbell);
2731                writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2732                if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2733                        arcmsr_iop2drv_data_wrote_handle(acb);
2734                if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2735                        arcmsr_iop2drv_data_read_handle(acb);
2736                if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2737                        arcmsr_hbaB_postqueue_isr(acb);
2738                if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2739                        arcmsr_hbaB_message_isr(acb);
2740                outbound_doorbell = readl(reg->iop2drv_doorbell) &
2741                        acb->outbound_int_enable;
2742        } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2743                | ARCMSR_IOP2DRV_DATA_READ_OK
2744                | ARCMSR_IOP2DRV_CDB_DONE
2745                | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2746        return IRQ_HANDLED;
2747}
2748
2749static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2750{
2751        uint32_t host_interrupt_status;
2752        struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2753        /*
2754        *********************************************
2755        **   check outbound intstatus
2756        *********************************************
2757        */
2758        host_interrupt_status = readl(&phbcmu->host_int_status) &
2759                (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2760                ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2761        if (!host_interrupt_status)
2762                return IRQ_NONE;
2763        do {
2764                if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2765                        arcmsr_hbaC_doorbell_isr(pACB);
2766                /* MU post queue interrupts*/
2767                if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2768                        arcmsr_hbaC_postqueue_isr(pACB);
2769                host_interrupt_status = readl(&phbcmu->host_int_status);
2770        } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2771                ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2772        return IRQ_HANDLED;
2773}
2774
2775static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2776{
2777        u32 host_interrupt_status;
2778        struct MessageUnit_D  *pmu = pACB->pmuD;
2779
2780        host_interrupt_status = readl(pmu->host_int_status) &
2781                (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2782                ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2783        if (!host_interrupt_status)
2784                return IRQ_NONE;
2785        do {
2786                /* MU post queue interrupts*/
2787                if (host_interrupt_status &
2788                        ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2789                        arcmsr_hbaD_postqueue_isr(pACB);
2790                if (host_interrupt_status &
2791                        ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2792                        arcmsr_hbaD_doorbell_isr(pACB);
2793                host_interrupt_status = readl(pmu->host_int_status);
2794        } while (host_interrupt_status &
2795                (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2796                ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2797        return IRQ_HANDLED;
2798}
2799
2800static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2801{
2802        uint32_t host_interrupt_status;
2803        struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2804
2805        host_interrupt_status = readl(&pmu->host_int_status) &
2806                (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2807                ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2808        if (!host_interrupt_status)
2809                return IRQ_NONE;
2810        do {
2811                /* MU ioctl transfer doorbell interrupts*/
2812                if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2813                        arcmsr_hbaE_doorbell_isr(pACB);
2814                }
2815                /* MU post queue interrupts*/
2816                if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2817                        arcmsr_hbaE_postqueue_isr(pACB);
2818                }
2819                host_interrupt_status = readl(&pmu->host_int_status);
2820        } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2821                ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2822        return IRQ_HANDLED;
2823}
2824
2825static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2826{
2827        uint32_t host_interrupt_status;
2828        struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2829
2830        host_interrupt_status = readl(&phbcmu->host_int_status) &
2831                (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2832                ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2833        if (!host_interrupt_status)
2834                return IRQ_NONE;
2835        do {
2836                /* MU post queue interrupts*/
2837                if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2838                        arcmsr_hbaF_postqueue_isr(pACB);
2839
2840                /* MU ioctl transfer doorbell interrupts*/
2841                if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2842                        arcmsr_hbaE_doorbell_isr(pACB);
2843
2844                host_interrupt_status = readl(&phbcmu->host_int_status);
2845        } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2846                ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2847        return IRQ_HANDLED;
2848}
2849
2850static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2851{
2852        switch (acb->adapter_type) {
2853        case ACB_ADAPTER_TYPE_A:
2854                return arcmsr_hbaA_handle_isr(acb);
2855        case ACB_ADAPTER_TYPE_B:
2856                return arcmsr_hbaB_handle_isr(acb);
2857        case ACB_ADAPTER_TYPE_C:
2858                return arcmsr_hbaC_handle_isr(acb);
2859        case ACB_ADAPTER_TYPE_D:
2860                return arcmsr_hbaD_handle_isr(acb);
2861        case ACB_ADAPTER_TYPE_E:
2862                return arcmsr_hbaE_handle_isr(acb);
2863        case ACB_ADAPTER_TYPE_F:
2864                return arcmsr_hbaF_handle_isr(acb);
2865        default:
2866                return IRQ_NONE;
2867        }
2868}
2869
2870static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2871{
2872        if (acb) {
2873                /* stop adapter background rebuild */
2874                if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2875                        uint32_t intmask_org;
2876                        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2877                        intmask_org = arcmsr_disable_outbound_ints(acb);
2878                        arcmsr_stop_adapter_bgrb(acb);
2879                        arcmsr_flush_adapter_cache(acb);
2880                        arcmsr_enable_outbound_ints(acb, intmask_org);
2881                }
2882        }
2883}
2884
2885
2886void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2887{
2888        uint32_t        i;
2889
2890        if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2891                for (i = 0; i < 15; i++) {
2892                        if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2893                                acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2894                                acb->rqbuf_getIndex = 0;
2895                                acb->rqbuf_putIndex = 0;
2896                                arcmsr_iop_message_read(acb);
2897                                mdelay(30);
2898                        } else if (acb->rqbuf_getIndex !=
2899                                   acb->rqbuf_putIndex) {
2900                                acb->rqbuf_getIndex = 0;
2901                                acb->rqbuf_putIndex = 0;
2902                                mdelay(30);
2903                        } else
2904                                break;
2905                }
2906        }
2907}
2908
2909static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2910                struct scsi_cmnd *cmd)
2911{
2912        char *buffer;
2913        unsigned short use_sg;
2914        int retvalue = 0, transfer_len = 0;
2915        unsigned long flags;
2916        struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2917        uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2918                (uint32_t)cmd->cmnd[6] << 16 |
2919                (uint32_t)cmd->cmnd[7] << 8 |
2920                (uint32_t)cmd->cmnd[8];
2921        struct scatterlist *sg;
2922
2923        use_sg = scsi_sg_count(cmd);
2924        sg = scsi_sglist(cmd);
2925        buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2926        if (use_sg > 1) {
2927                retvalue = ARCMSR_MESSAGE_FAIL;
2928                goto message_out;
2929        }
2930        transfer_len += sg->length;
2931        if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2932                retvalue = ARCMSR_MESSAGE_FAIL;
2933                pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2934                goto message_out;
2935        }
2936        pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2937        switch (controlcode) {
2938        case ARCMSR_MESSAGE_READ_RQBUFFER: {
2939                unsigned char *ver_addr;
2940                uint8_t *ptmpQbuffer;
2941                uint32_t allxfer_len = 0;
2942                ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2943                if (!ver_addr) {
2944                        retvalue = ARCMSR_MESSAGE_FAIL;
2945                        pr_info("%s: memory not enough!\n", __func__);
2946                        goto message_out;
2947                }
2948                ptmpQbuffer = ver_addr;
2949                spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2950                if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
2951                        unsigned int tail = acb->rqbuf_getIndex;
2952                        unsigned int head = acb->rqbuf_putIndex;
2953                        unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
2954
2955                        allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
2956                        if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2957                                allxfer_len = ARCMSR_API_DATA_BUFLEN;
2958
2959                        if (allxfer_len <= cnt_to_end)
2960                                memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2961                        else {
2962                                memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2963                                memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2964                        }
2965                        acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
2966                }
2967                memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2968                        allxfer_len);
2969                if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2970                        struct QBUFFER __iomem *prbuffer;
2971                        acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2972                        prbuffer = arcmsr_get_iop_rqbuffer(acb);
2973                        if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2974                                acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2975                }
2976                spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2977                kfree(ver_addr);
2978                pcmdmessagefld->cmdmessage.Length = allxfer_len;
2979                if (acb->fw_flag == FW_DEADLOCK)
2980                        pcmdmessagefld->cmdmessage.ReturnCode =
2981                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2982                else
2983                        pcmdmessagefld->cmdmessage.ReturnCode =
2984                                ARCMSR_MESSAGE_RETURNCODE_OK;
2985                break;
2986        }
2987        case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2988                unsigned char *ver_addr;
2989                uint32_t user_len;
2990                int32_t cnt2end;
2991                uint8_t *pQbuffer, *ptmpuserbuffer;
2992
2993                user_len = pcmdmessagefld->cmdmessage.Length;
2994                if (user_len > ARCMSR_API_DATA_BUFLEN) {
2995                        retvalue = ARCMSR_MESSAGE_FAIL;
2996                        goto message_out;
2997                }
2998
2999                ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3000                if (!ver_addr) {
3001                        retvalue = ARCMSR_MESSAGE_FAIL;
3002                        goto message_out;
3003                }
3004                ptmpuserbuffer = ver_addr;
3005
3006                memcpy(ptmpuserbuffer,
3007                        pcmdmessagefld->messagedatabuffer, user_len);
3008                spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3009                if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3010                        struct SENSE_DATA *sensebuffer =
3011                                (struct SENSE_DATA *)cmd->sense_buffer;
3012                        arcmsr_write_ioctldata2iop(acb);
3013                        /* has error report sensedata */
3014                        sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
3015                        sensebuffer->SenseKey = ILLEGAL_REQUEST;
3016                        sensebuffer->AdditionalSenseLength = 0x0A;
3017                        sensebuffer->AdditionalSenseCode = 0x20;
3018                        sensebuffer->Valid = 1;
3019                        retvalue = ARCMSR_MESSAGE_FAIL;
3020                } else {
3021                        pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3022                        cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3023                        if (user_len > cnt2end) {
3024                                memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
3025                                ptmpuserbuffer += cnt2end;
3026                                user_len -= cnt2end;
3027                                acb->wqbuf_putIndex = 0;
3028                                pQbuffer = acb->wqbuffer;
3029                        }
3030                        memcpy(pQbuffer, ptmpuserbuffer, user_len);
3031                        acb->wqbuf_putIndex += user_len;
3032                        acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3033                        if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3034                                acb->acb_flags &=
3035                                                ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3036                                arcmsr_write_ioctldata2iop(acb);
3037                        }
3038                }
3039                spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3040                kfree(ver_addr);
3041                if (acb->fw_flag == FW_DEADLOCK)
3042                        pcmdmessagefld->cmdmessage.ReturnCode =
3043                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3044                else
3045                        pcmdmessagefld->cmdmessage.ReturnCode =
3046                                ARCMSR_MESSAGE_RETURNCODE_OK;
3047                break;
3048        }
3049        case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
3050                uint8_t *pQbuffer = acb->rqbuffer;
3051
3052                arcmsr_clear_iop2drv_rqueue_buffer(acb);
3053                spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3054                acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3055                acb->rqbuf_getIndex = 0;
3056                acb->rqbuf_putIndex = 0;
3057                memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3058                spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3059                if (acb->fw_flag == FW_DEADLOCK)
3060                        pcmdmessagefld->cmdmessage.ReturnCode =
3061                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3062                else
3063                        pcmdmessagefld->cmdmessage.ReturnCode =
3064                                ARCMSR_MESSAGE_RETURNCODE_OK;
3065                break;
3066        }
3067        case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
3068                uint8_t *pQbuffer = acb->wqbuffer;
3069                spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3070                acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3071                        ACB_F_MESSAGE_WQBUFFER_READED);
3072                acb->wqbuf_getIndex = 0;
3073                acb->wqbuf_putIndex = 0;
3074                memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3075                spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3076                if (acb->fw_flag == FW_DEADLOCK)
3077                        pcmdmessagefld->cmdmessage.ReturnCode =
3078                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3079                else
3080                        pcmdmessagefld->cmdmessage.ReturnCode =
3081                                ARCMSR_MESSAGE_RETURNCODE_OK;
3082                break;
3083        }
3084        case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
3085                uint8_t *pQbuffer;
3086                arcmsr_clear_iop2drv_rqueue_buffer(acb);
3087                spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3088                acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3089                acb->rqbuf_getIndex = 0;
3090                acb->rqbuf_putIndex = 0;
3091                pQbuffer = acb->rqbuffer;
3092                memset(pQbuffer, 0, sizeof(struct QBUFFER));
3093                spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3094                spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3095                acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3096                        ACB_F_MESSAGE_WQBUFFER_READED);
3097                acb->wqbuf_getIndex = 0;
3098                acb->wqbuf_putIndex = 0;
3099                pQbuffer = acb->wqbuffer;
3100                memset(pQbuffer, 0, sizeof(struct QBUFFER));
3101                spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3102                if (acb->fw_flag == FW_DEADLOCK)
3103                        pcmdmessagefld->cmdmessage.ReturnCode =
3104                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3105                else
3106                        pcmdmessagefld->cmdmessage.ReturnCode =
3107                                ARCMSR_MESSAGE_RETURNCODE_OK;
3108                break;
3109        }
3110        case ARCMSR_MESSAGE_RETURN_CODE_3F: {
3111                if (acb->fw_flag == FW_DEADLOCK)
3112                        pcmdmessagefld->cmdmessage.ReturnCode =
3113                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3114                else
3115                        pcmdmessagefld->cmdmessage.ReturnCode =
3116                                ARCMSR_MESSAGE_RETURNCODE_3F;
3117                break;
3118        }
3119        case ARCMSR_MESSAGE_SAY_HELLO: {
3120                int8_t *hello_string = "Hello! I am ARCMSR";
3121                if (acb->fw_flag == FW_DEADLOCK)
3122                        pcmdmessagefld->cmdmessage.ReturnCode =
3123                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3124                else
3125                        pcmdmessagefld->cmdmessage.ReturnCode =
3126                                ARCMSR_MESSAGE_RETURNCODE_OK;
3127                memcpy(pcmdmessagefld->messagedatabuffer,
3128                        hello_string, (int16_t)strlen(hello_string));
3129                break;
3130        }
3131        case ARCMSR_MESSAGE_SAY_GOODBYE: {
3132                if (acb->fw_flag == FW_DEADLOCK)
3133                        pcmdmessagefld->cmdmessage.ReturnCode =
3134                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3135                else
3136                        pcmdmessagefld->cmdmessage.ReturnCode =
3137                                ARCMSR_MESSAGE_RETURNCODE_OK;
3138                arcmsr_iop_parking(acb);
3139                break;
3140        }
3141        case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
3142                if (acb->fw_flag == FW_DEADLOCK)
3143                        pcmdmessagefld->cmdmessage.ReturnCode =
3144                                ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3145                else
3146                        pcmdmessagefld->cmdmessage.ReturnCode =
3147                                ARCMSR_MESSAGE_RETURNCODE_OK;
3148                arcmsr_flush_adapter_cache(acb);
3149                break;
3150        }
3151        default:
3152                retvalue = ARCMSR_MESSAGE_FAIL;
3153                pr_info("%s: unknown controlcode!\n", __func__);
3154        }
3155message_out:
3156        if (use_sg) {
3157                struct scatterlist *sg = scsi_sglist(cmd);
3158                kunmap_atomic(buffer - sg->offset);
3159        }
3160        return retvalue;
3161}
3162
3163static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3164{
3165        struct list_head *head;
3166        struct CommandControlBlock *ccb = NULL;
3167        unsigned long flags;
3168
3169        spin_lock_irqsave(&acb->ccblist_lock, flags);
3170        head = &acb->ccb_free_list;
3171        if (!list_empty(head)) {
3172                ccb = list_entry(head->next, struct CommandControlBlock, list);
3173                list_del_init(&ccb->list);
3174        }else{
3175                spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3176                return NULL;
3177        }
3178        spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3179        return ccb;
3180}
3181
3182static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3183                struct scsi_cmnd *cmd)
3184{
3185        switch (cmd->cmnd[0]) {
3186        case INQUIRY: {
3187                unsigned char inqdata[36];
3188                char *buffer;
3189                struct scatterlist *sg;
3190
3191                if (cmd->device->lun) {
3192                        cmd->result = (DID_TIME_OUT << 16);
3193                        cmd->scsi_done(cmd);
3194                        return;
3195                }
3196                inqdata[0] = TYPE_PROCESSOR;
3197                /* Periph Qualifier & Periph Dev Type */
3198                inqdata[1] = 0;
3199                /* rem media bit & Dev Type Modifier */
3200                inqdata[2] = 0;
3201                /* ISO, ECMA, & ANSI versions */
3202                inqdata[4] = 31;
3203                /* length of additional data */
3204                memcpy(&inqdata[8], "Areca   ", 8);
3205                /* Vendor Identification */
3206                memcpy(&inqdata[16], "RAID controller ", 16);
3207                /* Product Identification */
3208                memcpy(&inqdata[32], "R001", 4); /* Product Revision */
3209
3210                sg = scsi_sglist(cmd);
3211                buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3212
3213                memcpy(buffer, inqdata, sizeof(inqdata));
3214                sg = scsi_sglist(cmd);
3215                kunmap_atomic(buffer - sg->offset);
3216
3217                cmd->scsi_done(cmd);
3218        }
3219        break;
3220        case WRITE_BUFFER:
3221        case READ_BUFFER: {
3222                if (arcmsr_iop_message_xfer(acb, cmd))
3223                        cmd->result = (DID_ERROR << 16);
3224                cmd->scsi_done(cmd);
3225        }
3226        break;
3227        default:
3228                cmd->scsi_done(cmd);
3229        }
3230}
3231
3232static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
3233        void (* done)(struct scsi_cmnd *))
3234{
3235        struct Scsi_Host *host = cmd->device->host;
3236        struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3237        struct CommandControlBlock *ccb;
3238        int target = cmd->device->id;
3239
3240        if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3241                cmd->result = (DID_NO_CONNECT << 16);
3242                cmd->scsi_done(cmd);
3243                return 0;
3244        }
3245        cmd->scsi_done = done;
3246        cmd->host_scribble = NULL;
3247        cmd->result = 0;
3248        if (target == 16) {
3249                /* virtual device for iop message transfer */
3250                arcmsr_handle_virtual_command(acb, cmd);
3251                return 0;
3252        }
3253        ccb = arcmsr_get_freeccb(acb);
3254        if (!ccb)
3255                return SCSI_MLQUEUE_HOST_BUSY;
3256        if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3257                cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
3258                cmd->scsi_done(cmd);
3259                return 0;
3260        }
3261        arcmsr_post_ccb(acb, ccb);
3262        return 0;
3263}
3264
3265static DEF_SCSI_QCMD(arcmsr_queue_command)
3266
3267static int arcmsr_slave_config(struct scsi_device *sdev)
3268{
3269        unsigned int    dev_timeout;
3270
3271        dev_timeout = sdev->request_queue->rq_timeout;
3272        if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
3273                blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
3274        return 0;
3275}
3276
3277static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3278{
3279        int count;
3280        uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3281        uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3282        uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3283        uint32_t *firm_model = &rwbuffer[15];
3284        uint32_t *firm_version = &rwbuffer[17];
3285        uint32_t *device_map = &rwbuffer[21];
3286
3287        count = 2;
3288        while (count) {
3289                *acb_firm_model = readl(firm_model);
3290                acb_firm_model++;
3291                firm_model++;
3292                count--;
3293        }
3294        count = 4;
3295        while (count) {
3296                *acb_firm_version = readl(firm_version);
3297                acb_firm_version++;
3298                firm_version++;
3299                count--;
3300        }
3301        count = 4;
3302        while (count) {
3303                *acb_device_map = readl(device_map);
3304                acb_device_map++;
3305                device_map++;
3306                count--;
3307        }
3308        pACB->signature = readl(&rwbuffer[0]);
3309        pACB->firm_request_len = readl(&rwbuffer[1]);
3310        pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3311        pACB->firm_sdram_size = readl(&rwbuffer[3]);
3312        pACB->firm_hd_channels = readl(&rwbuffer[4]);
3313        pACB->firm_cfg_version = readl(&rwbuffer[25]);
3314        pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3315                pACB->host->host_no,
3316                pACB->firm_model,
3317                pACB->firm_version);
3318}
3319
3320static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3321{
3322        struct MessageUnit_A __iomem *reg = acb->pmuA;
3323
3324        arcmsr_wait_firmware_ready(acb);
3325        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3326        if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3327                printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3328                        miscellaneous data' timeout \n", acb->host->host_no);
3329                return false;
3330        }
3331        arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3332        return true;
3333}
3334static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3335{
3336        struct MessageUnit_B *reg = acb->pmuB;
3337
3338        arcmsr_wait_firmware_ready(acb);
3339        writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3340        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3341                printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3342                return false;
3343        }
3344        writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3345        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3346                printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3347                        miscellaneous data' timeout \n", acb->host->host_no);
3348                return false;
3349        }
3350        arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3351        return true;
3352}
3353
3354static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3355{
3356        uint32_t intmask_org;
3357        struct MessageUnit_C __iomem *reg = pACB->pmuC;
3358
3359        /* disable all outbound interrupt */
3360        intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3361        writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3362        /* wait firmware ready */
3363        arcmsr_wait_firmware_ready(pACB);
3364        /* post "get config" instruction */
3365        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3366        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3367        /* wait message ready */
3368        if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3369                printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3370                        miscellaneous data' timeout \n", pACB->host->host_no);
3371                return false;
3372        }
3373        arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3374        return true;
3375}
3376
3377static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3378{
3379        struct MessageUnit_D *reg = acb->pmuD;
3380
3381        if (readl(acb->pmuD->outbound_doorbell) &
3382                ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3383                writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3384                        acb->pmuD->outbound_doorbell);/*clear interrupt*/
3385        }
3386        arcmsr_wait_firmware_ready(acb);
3387        /* post "get config" instruction */
3388        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3389        /* wait message ready */
3390        if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3391                pr_notice("arcmsr%d: wait get adapter firmware "
3392                        "miscellaneous data timeout\n", acb->host->host_no);
3393                return false;
3394        }
3395        arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3396        return true;
3397}
3398
3399static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3400{
3401        struct MessageUnit_E __iomem *reg = pACB->pmuE;
3402        uint32_t intmask_org;
3403
3404        /* disable all outbound interrupt */
3405        intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3406        writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3407        /* wait firmware ready */
3408        arcmsr_wait_firmware_ready(pACB);
3409        mdelay(20);
3410        /* post "get config" instruction */
3411        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3412
3413        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3414        writel(pACB->out_doorbell, &reg->iobound_doorbell);
3415        /* wait message ready */
3416        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3417                pr_notice("arcmsr%d: wait get adapter firmware "
3418                        "miscellaneous data timeout\n", pACB->host->host_no);
3419                return false;
3420        }
3421        arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3422        return true;
3423}
3424
3425static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3426{
3427        struct MessageUnit_F __iomem *reg = pACB->pmuF;
3428        uint32_t intmask_org;
3429
3430        /* disable all outbound interrupt */
3431        intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3432        writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3433        /* wait firmware ready */
3434        arcmsr_wait_firmware_ready(pACB);
3435        /* post "get config" instruction */
3436        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3437
3438        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3439        writel(pACB->out_doorbell, &reg->iobound_doorbell);
3440        /* wait message ready */
3441        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3442                pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3443                          pACB->host->host_no);
3444                return false;
3445        }
3446        arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3447        return true;
3448}
3449
3450static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3451{
3452        bool rtn = false;
3453
3454        switch (acb->adapter_type) {
3455        case ACB_ADAPTER_TYPE_A:
3456                rtn = arcmsr_hbaA_get_config(acb);
3457                break;
3458        case ACB_ADAPTER_TYPE_B:
3459                rtn = arcmsr_hbaB_get_config(acb);
3460                break;
3461        case ACB_ADAPTER_TYPE_C:
3462                rtn = arcmsr_hbaC_get_config(acb);
3463                break;
3464        case ACB_ADAPTER_TYPE_D:
3465                rtn = arcmsr_hbaD_get_config(acb);
3466                break;
3467        case ACB_ADAPTER_TYPE_E:
3468                rtn = arcmsr_hbaE_get_config(acb);
3469                break;
3470        case ACB_ADAPTER_TYPE_F:
3471                rtn = arcmsr_hbaF_get_config(acb);
3472                break;
3473        default:
3474                break;
3475        }
3476        acb->maxOutstanding = acb->firm_numbers_queue - 1;
3477        if (acb->host->can_queue >= acb->firm_numbers_queue)
3478                acb->host->can_queue = acb->maxOutstanding;
3479        else
3480                acb->maxOutstanding = acb->host->can_queue;
3481        acb->maxFreeCCB = acb->host->can_queue;
3482        if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3483                acb->maxFreeCCB += 64;
3484        return rtn;
3485}
3486
3487static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3488        struct CommandControlBlock *poll_ccb)
3489{
3490        struct MessageUnit_A __iomem *reg = acb->pmuA;
3491        struct CommandControlBlock *ccb;
3492        struct ARCMSR_CDB *arcmsr_cdb;
3493        uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3494        int rtn;
3495        bool error;
3496        unsigned long ccb_cdb_phy;
3497
3498polling_hba_ccb_retry:
3499        poll_count++;
3500        outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
3501        writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
3502        while (1) {
3503                if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
3504                        if (poll_ccb_done){
3505                                rtn = SUCCESS;
3506                                break;
3507                        }else {
3508                                msleep(25);
3509                                if (poll_count > 100){
3510                                        rtn = FAILED;
3511                                        break;
3512                                }
3513                                goto polling_hba_ccb_retry;
3514                        }
3515                }
3516                ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3517                if (acb->cdb_phyadd_hipart)
3518                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3519                arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3520                ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3521                poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3522                if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3523                        if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3524                                printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3525                                        " poll command abort successfully \n"
3526                                        , acb->host->host_no
3527                                        , ccb->pcmd->device->id
3528                                        , (u32)ccb->pcmd->device->lun
3529                                        , ccb);
3530                                ccb->pcmd->result = DID_ABORT << 16;
3531                                arcmsr_ccb_complete(ccb);
3532                                continue;
3533                        }
3534                        printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3535                                " command done ccb = '0x%p'"
3536                                "ccboutstandingcount = %d \n"
3537                                , acb->host->host_no
3538                                , ccb
3539                                , atomic_read(&acb->ccboutstandingcount));
3540                        continue;
3541                }
3542                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3543                arcmsr_report_ccb_state(acb, ccb, error);
3544        }
3545        return rtn;
3546}
3547
3548static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3549                                        struct CommandControlBlock *poll_ccb)
3550{
3551        struct MessageUnit_B *reg = acb->pmuB;
3552        struct ARCMSR_CDB *arcmsr_cdb;
3553        struct CommandControlBlock *ccb;
3554        uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3555        int index, rtn;
3556        bool error;
3557        unsigned long ccb_cdb_phy;
3558
3559polling_hbb_ccb_retry:
3560        poll_count++;
3561        /* clear doorbell interrupt */
3562        writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3563        while(1){
3564                index = reg->doneq_index;
3565                flag_ccb = reg->done_qbuffer[index];
3566                if (flag_ccb == 0) {
3567                        if (poll_ccb_done){
3568                                rtn = SUCCESS;
3569                                break;
3570                        }else {
3571                                msleep(25);
3572                                if (poll_count > 100){
3573                                        rtn = FAILED;
3574                                        break;
3575                                }
3576                                goto polling_hbb_ccb_retry;
3577                        }
3578                }
3579                reg->done_qbuffer[index] = 0;
3580                index++;
3581                /*if last index number set it to 0 */
3582                index %= ARCMSR_MAX_HBB_POSTQUEUE;
3583                reg->doneq_index = index;
3584                /* check if command done with no error*/
3585                ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3586                if (acb->cdb_phyadd_hipart)
3587                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3588                arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3589                ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3590                poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3591                if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3592                        if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3593                                printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3594                                        " poll command abort successfully \n"
3595                                        ,acb->host->host_no
3596                                        ,ccb->pcmd->device->id
3597                                        ,(u32)ccb->pcmd->device->lun
3598                                        ,ccb);
3599                                ccb->pcmd->result = DID_ABORT << 16;
3600                                arcmsr_ccb_complete(ccb);
3601                                continue;
3602                        }
3603                        printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3604                                " command done ccb = '0x%p'"
3605                                "ccboutstandingcount = %d \n"
3606                                , acb->host->host_no
3607                                , ccb
3608                                , atomic_read(&acb->ccboutstandingcount));
3609                        continue;
3610                } 
3611                error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3612                arcmsr_report_ccb_state(acb, ccb, error);
3613        }
3614        return rtn;
3615}
3616
3617static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3618                struct CommandControlBlock *poll_ccb)
3619{
3620        struct MessageUnit_C __iomem *reg = acb->pmuC;
3621        uint32_t flag_ccb;
3622        struct ARCMSR_CDB *arcmsr_cdb;
3623        bool error;
3624        struct CommandControlBlock *pCCB;
3625        uint32_t poll_ccb_done = 0, poll_count = 0;
3626        int rtn;
3627        unsigned long ccb_cdb_phy;
3628
3629polling_hbc_ccb_retry:
3630        poll_count++;
3631        while (1) {
3632                if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3633                        if (poll_ccb_done) {
3634                                rtn = SUCCESS;
3635                                break;
3636                        } else {
3637                                msleep(25);
3638                                if (poll_count > 100) {
3639                                        rtn = FAILED;
3640                                        break;
3641                                }
3642                                goto polling_hbc_ccb_retry;
3643                        }
3644                }
3645                flag_ccb = readl(&reg->outbound_queueport_low);
3646                ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3647                if (acb->cdb_phyadd_hipart)
3648                        ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3649                arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3650                pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);