linux/drivers/ata/sata_mv.c
<<
>>
Prefs
   1/*
   2 * sata_mv.c - Marvell SATA support
   3 *
   4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   5 * Copyright 2005: EMC Corporation, all rights reserved.
   6 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   7 *
   8 * Originally written by Brett Russ.
   9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10 *
  11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; version 2 of the License.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 */
  27
  28/*
  29 * sata_mv TODO list:
  30 *
  31 * --> Develop a low-power-consumption strategy, and implement it.
  32 *
  33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  34 *
  35 * --> [Experiment, Marvell value added] Is it possible to use target
  36 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  37 *       creating LibATA target mode support would be very interesting.
  38 *
  39 *       Target mode, for those without docs, is the ability to directly
  40 *       connect two SATA ports.
  41 */
  42
  43/*
  44 * 80x1-B2 errata PCI#11:
  45 *
  46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  47 * should be careful to insert those cards only onto PCI-X bus #0,
  48 * and only in device slots 0..7, not higher.  The chips may not
  49 * work correctly otherwise  (note: this is a pretty rare condition).
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/pci.h>
  55#include <linux/init.h>
  56#include <linux/blkdev.h>
  57#include <linux/delay.h>
  58#include <linux/interrupt.h>
  59#include <linux/dmapool.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/device.h>
  62#include <linux/clk.h>
  63#include <linux/platform_device.h>
  64#include <linux/ata_platform.h>
  65#include <linux/mbus.h>
  66#include <linux/bitops.h>
  67#include <linux/gfp.h>
  68#include <linux/of.h>
  69#include <linux/of_irq.h>
  70#include <scsi/scsi_host.h>
  71#include <scsi/scsi_cmnd.h>
  72#include <scsi/scsi_device.h>
  73#include <linux/libata.h>
  74
  75#define DRV_NAME        "sata_mv"
  76#define DRV_VERSION     "1.28"
  77
  78/*
  79 * module options
  80 */
  81
  82static int msi;
  83#ifdef CONFIG_PCI
  84module_param(msi, int, S_IRUGO);
  85MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  86#endif
  87
  88static int irq_coalescing_io_count;
  89module_param(irq_coalescing_io_count, int, S_IRUGO);
  90MODULE_PARM_DESC(irq_coalescing_io_count,
  91                 "IRQ coalescing I/O count threshold (0..255)");
  92
  93static int irq_coalescing_usecs;
  94module_param(irq_coalescing_usecs, int, S_IRUGO);
  95MODULE_PARM_DESC(irq_coalescing_usecs,
  96                 "IRQ coalescing time threshold in usecs");
  97
  98enum {
  99        /* BAR's are enumerated in terms of pci_resource_start() terms */
 100        MV_PRIMARY_BAR          = 0,    /* offset 0x10: memory space */
 101        MV_IO_BAR               = 2,    /* offset 0x18: IO space */
 102        MV_MISC_BAR             = 3,    /* offset 0x1c: FLASH, NVRAM, SRAM */
 103
 104        MV_MAJOR_REG_AREA_SZ    = 0x10000,      /* 64KB */
 105        MV_MINOR_REG_AREA_SZ    = 0x2000,       /* 8KB */
 106
 107        /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
 108        COAL_CLOCKS_PER_USEC    = 150,          /* for calculating COAL_TIMEs */
 109        MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
 110        MAX_COAL_IO_COUNT       = 255,          /* completed I/O count */
 111
 112        MV_PCI_REG_BASE         = 0,
 113
 114        /*
 115         * Per-chip ("all ports") interrupt coalescing feature.
 116         * This is only for GEN_II / GEN_IIE hardware.
 117         *
 118         * Coalescing defers the interrupt until either the IO_THRESHOLD
 119         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 120         */
 121        COAL_REG_BASE           = 0x18000,
 122        IRQ_COAL_CAUSE          = (COAL_REG_BASE + 0x08),
 123        ALL_PORTS_COAL_IRQ      = (1 << 4),     /* all ports irq event */
 124
 125        IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 126        IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 127
 128        /*
 129         * Registers for the (unused here) transaction coalescing feature:
 130         */
 131        TRAN_COAL_CAUSE_LO      = (COAL_REG_BASE + 0x88),
 132        TRAN_COAL_CAUSE_HI      = (COAL_REG_BASE + 0x8c),
 133
 134        SATAHC0_REG_BASE        = 0x20000,
 135        FLASH_CTL               = 0x1046c,
 136        GPIO_PORT_CTL           = 0x104f0,
 137        RESET_CFG               = 0x180d8,
 138
 139        MV_PCI_REG_SZ           = MV_MAJOR_REG_AREA_SZ,
 140        MV_SATAHC_REG_SZ        = MV_MAJOR_REG_AREA_SZ,
 141        MV_SATAHC_ARBTR_REG_SZ  = MV_MINOR_REG_AREA_SZ,         /* arbiter */
 142        MV_PORT_REG_SZ          = MV_MINOR_REG_AREA_SZ,
 143
 144        MV_MAX_Q_DEPTH          = 32,
 145        MV_MAX_Q_DEPTH_MASK     = MV_MAX_Q_DEPTH - 1,
 146
 147        /* CRQB needs alignment on a 1KB boundary. Size == 1KB
 148         * CRPB needs alignment on a 256B boundary. Size == 256B
 149         * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 150         */
 151        MV_CRQB_Q_SZ            = (32 * MV_MAX_Q_DEPTH),
 152        MV_CRPB_Q_SZ            = (8 * MV_MAX_Q_DEPTH),
 153        MV_MAX_SG_CT            = 256,
 154        MV_SG_TBL_SZ            = (16 * MV_MAX_SG_CT),
 155
 156        /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 157        MV_PORT_HC_SHIFT        = 2,
 158        MV_PORTS_PER_HC         = (1 << MV_PORT_HC_SHIFT), /* 4 */
 159        /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 160        MV_PORT_MASK            = (MV_PORTS_PER_HC - 1),   /* 3 */
 161
 162        /* Host Flags */
 163        MV_FLAG_DUAL_HC         = (1 << 30),  /* two SATA Host Controllers */
 164
 165        MV_COMMON_FLAGS         = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 166
 167        MV_GEN_I_FLAGS          = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 168
 169        MV_GEN_II_FLAGS         = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 170                                  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 171
 172        MV_GEN_IIE_FLAGS        = MV_GEN_II_FLAGS | ATA_FLAG_AN,
 173
 174        CRQB_FLAG_READ          = (1 << 0),
 175        CRQB_TAG_SHIFT          = 1,
 176        CRQB_IOID_SHIFT         = 6,    /* CRQB Gen-II/IIE IO Id shift */
 177        CRQB_PMP_SHIFT          = 12,   /* CRQB Gen-II/IIE PMP shift */
 178        CRQB_HOSTQ_SHIFT        = 17,   /* CRQB Gen-II/IIE HostQueTag shift */
 179        CRQB_CMD_ADDR_SHIFT     = 8,
 180        CRQB_CMD_CS             = (0x2 << 11),
 181        CRQB_CMD_LAST           = (1 << 15),
 182
 183        CRPB_FLAG_STATUS_SHIFT  = 8,
 184        CRPB_IOID_SHIFT_6       = 5,    /* CRPB Gen-II IO Id shift */
 185        CRPB_IOID_SHIFT_7       = 7,    /* CRPB Gen-IIE IO Id shift */
 186
 187        EPRD_FLAG_END_OF_TBL    = (1 << 31),
 188
 189        /* PCI interface registers */
 190
 191        MV_PCI_COMMAND          = 0xc00,
 192        MV_PCI_COMMAND_MWRCOM   = (1 << 4),     /* PCI Master Write Combining */
 193        MV_PCI_COMMAND_MRDTRIG  = (1 << 7),     /* PCI Master Read Trigger */
 194
 195        PCI_MAIN_CMD_STS        = 0xd30,
 196        STOP_PCI_MASTER         = (1 << 2),
 197        PCI_MASTER_EMPTY        = (1 << 3),
 198        GLOB_SFT_RST            = (1 << 4),
 199
 200        MV_PCI_MODE             = 0xd00,
 201        MV_PCI_MODE_MASK        = 0x30,
 202
 203        MV_PCI_EXP_ROM_BAR_CTL  = 0xd2c,
 204        MV_PCI_DISC_TIMER       = 0xd04,
 205        MV_PCI_MSI_TRIGGER      = 0xc38,
 206        MV_PCI_SERR_MASK        = 0xc28,
 207        MV_PCI_XBAR_TMOUT       = 0x1d04,
 208        MV_PCI_ERR_LOW_ADDRESS  = 0x1d40,
 209        MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
 210        MV_PCI_ERR_ATTRIBUTE    = 0x1d48,
 211        MV_PCI_ERR_COMMAND      = 0x1d50,
 212
 213        PCI_IRQ_CAUSE           = 0x1d58,
 214        PCI_IRQ_MASK            = 0x1d5c,
 215        PCI_UNMASK_ALL_IRQS     = 0x7fffff,     /* bits 22-0 */
 216
 217        PCIE_IRQ_CAUSE          = 0x1900,
 218        PCIE_IRQ_MASK           = 0x1910,
 219        PCIE_UNMASK_ALL_IRQS    = 0x40a,        /* assorted bits */
 220
 221        /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 222        PCI_HC_MAIN_IRQ_CAUSE   = 0x1d60,
 223        PCI_HC_MAIN_IRQ_MASK    = 0x1d64,
 224        SOC_HC_MAIN_IRQ_CAUSE   = 0x20020,
 225        SOC_HC_MAIN_IRQ_MASK    = 0x20024,
 226        ERR_IRQ                 = (1 << 0),     /* shift by (2 * port #) */
 227        DONE_IRQ                = (1 << 1),     /* shift by (2 * port #) */
 228        HC0_IRQ_PEND            = 0x1ff,        /* bits 0-8 = HC0's ports */
 229        HC_SHIFT                = 9,            /* bits 9-17 = HC1's ports */
 230        DONE_IRQ_0_3            = 0x000000aa,   /* DONE_IRQ ports 0,1,2,3 */
 231        DONE_IRQ_4_7            = (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 232        PCI_ERR                 = (1 << 18),
 233        TRAN_COAL_LO_DONE       = (1 << 19),    /* transaction coalescing */
 234        TRAN_COAL_HI_DONE       = (1 << 20),    /* transaction coalescing */
 235        PORTS_0_3_COAL_DONE     = (1 << 8),     /* HC0 IRQ coalescing */
 236        PORTS_4_7_COAL_DONE     = (1 << 17),    /* HC1 IRQ coalescing */
 237        ALL_PORTS_COAL_DONE     = (1 << 21),    /* GEN_II(E) IRQ coalescing */
 238        GPIO_INT                = (1 << 22),
 239        SELF_INT                = (1 << 23),
 240        TWSI_INT                = (1 << 24),
 241        HC_MAIN_RSVD            = (0x7f << 25), /* bits 31-25 */
 242        HC_MAIN_RSVD_5          = (0x1fff << 19), /* bits 31-19 */
 243        HC_MAIN_RSVD_SOC        = (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 244
 245        /* SATAHC registers */
 246        HC_CFG                  = 0x00,
 247
 248        HC_IRQ_CAUSE            = 0x14,
 249        DMA_IRQ                 = (1 << 0),     /* shift by port # */
 250        HC_COAL_IRQ             = (1 << 4),     /* IRQ coalescing */
 251        DEV_IRQ                 = (1 << 8),     /* shift by port # */
 252
 253        /*
 254         * Per-HC (Host-Controller) interrupt coalescing feature.
 255         * This is present on all chip generations.
 256         *
 257         * Coalescing defers the interrupt until either the IO_THRESHOLD
 258         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 259         */
 260        HC_IRQ_COAL_IO_THRESHOLD        = 0x000c,
 261        HC_IRQ_COAL_TIME_THRESHOLD      = 0x0010,
 262
 263        SOC_LED_CTRL            = 0x2c,
 264        SOC_LED_CTRL_BLINK      = (1 << 0),     /* Active LED blink */
 265        SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),   /* Multiplex dev presence */
 266                                                /*  with dev activity LED */
 267
 268        /* Shadow block registers */
 269        SHD_BLK                 = 0x100,
 270        SHD_CTL_AST             = 0x20,         /* ofs from SHD_BLK */
 271
 272        /* SATA registers */
 273        SATA_STATUS             = 0x300,  /* ctrl, err regs follow status */
 274        SATA_ACTIVE             = 0x350,
 275        FIS_IRQ_CAUSE           = 0x364,
 276        FIS_IRQ_CAUSE_AN        = (1 << 9),     /* async notification */
 277
 278        LTMODE                  = 0x30c,        /* requires read-after-write */
 279        LTMODE_BIT8             = (1 << 8),     /* unknown, but necessary */
 280
 281        PHY_MODE2               = 0x330,
 282        PHY_MODE3               = 0x310,
 283
 284        PHY_MODE4               = 0x314,        /* requires read-after-write */
 285        PHY_MODE4_CFG_MASK      = 0x00000003,   /* phy internal config field */
 286        PHY_MODE4_CFG_VALUE     = 0x00000001,   /* phy internal config field */
 287        PHY_MODE4_RSVD_ZEROS    = 0x5de3fffa,   /* Gen2e always write zeros */
 288        PHY_MODE4_RSVD_ONES     = 0x00000005,   /* Gen2e always write ones */
 289
 290        SATA_IFCTL              = 0x344,
 291        SATA_TESTCTL            = 0x348,
 292        SATA_IFSTAT             = 0x34c,
 293        VENDOR_UNIQUE_FIS       = 0x35c,
 294
 295        FISCFG                  = 0x360,
 296        FISCFG_WAIT_DEV_ERR     = (1 << 8),     /* wait for host on DevErr */
 297        FISCFG_SINGLE_SYNC      = (1 << 16),    /* SYNC on DMA activation */
 298
 299        PHY_MODE9_GEN2          = 0x398,
 300        PHY_MODE9_GEN1          = 0x39c,
 301        PHYCFG_OFS              = 0x3a0,        /* only in 65n devices */
 302
 303        MV5_PHY_MODE            = 0x74,
 304        MV5_LTMODE              = 0x30,
 305        MV5_PHY_CTL             = 0x0C,
 306        SATA_IFCFG              = 0x050,
 307
 308        MV_M2_PREAMP_MASK       = 0x7e0,
 309
 310        /* Port registers */
 311        EDMA_CFG                = 0,
 312        EDMA_CFG_Q_DEPTH        = 0x1f,         /* max device queue depth */
 313        EDMA_CFG_NCQ            = (1 << 5),     /* for R/W FPDMA queued */
 314        EDMA_CFG_NCQ_GO_ON_ERR  = (1 << 14),    /* continue on error */
 315        EDMA_CFG_RD_BRST_EXT    = (1 << 11),    /* read burst 512B */
 316        EDMA_CFG_WR_BUFF_LEN    = (1 << 13),    /* write buffer 512B */
 317        EDMA_CFG_EDMA_FBS       = (1 << 16),    /* EDMA FIS-Based Switching */
 318        EDMA_CFG_FBS            = (1 << 26),    /* FIS-Based Switching */
 319
 320        EDMA_ERR_IRQ_CAUSE      = 0x8,
 321        EDMA_ERR_IRQ_MASK       = 0xc,
 322        EDMA_ERR_D_PAR          = (1 << 0),     /* UDMA data parity err */
 323        EDMA_ERR_PRD_PAR        = (1 << 1),     /* UDMA PRD parity err */
 324        EDMA_ERR_DEV            = (1 << 2),     /* device error */
 325        EDMA_ERR_DEV_DCON       = (1 << 3),     /* device disconnect */
 326        EDMA_ERR_DEV_CON        = (1 << 4),     /* device connected */
 327        EDMA_ERR_SERR           = (1 << 5),     /* SError bits [WBDST] raised */
 328        EDMA_ERR_SELF_DIS       = (1 << 7),     /* Gen II/IIE self-disable */
 329        EDMA_ERR_SELF_DIS_5     = (1 << 8),     /* Gen I self-disable */
 330        EDMA_ERR_BIST_ASYNC     = (1 << 8),     /* BIST FIS or Async Notify */
 331        EDMA_ERR_TRANS_IRQ_7    = (1 << 8),     /* Gen IIE transprt layer irq */
 332        EDMA_ERR_CRQB_PAR       = (1 << 9),     /* CRQB parity error */
 333        EDMA_ERR_CRPB_PAR       = (1 << 10),    /* CRPB parity error */
 334        EDMA_ERR_INTRL_PAR      = (1 << 11),    /* internal parity error */
 335        EDMA_ERR_IORDY          = (1 << 12),    /* IORdy timeout */
 336
 337        EDMA_ERR_LNK_CTRL_RX    = (0xf << 13),  /* link ctrl rx error */
 338        EDMA_ERR_LNK_CTRL_RX_0  = (1 << 13),    /* transient: CRC err */
 339        EDMA_ERR_LNK_CTRL_RX_1  = (1 << 14),    /* transient: FIFO err */
 340        EDMA_ERR_LNK_CTRL_RX_2  = (1 << 15),    /* fatal: caught SYNC */
 341        EDMA_ERR_LNK_CTRL_RX_3  = (1 << 16),    /* transient: FIS rx err */
 342
 343        EDMA_ERR_LNK_DATA_RX    = (0xf << 17),  /* link data rx error */
 344
 345        EDMA_ERR_LNK_CTRL_TX    = (0x1f << 21), /* link ctrl tx error */
 346        EDMA_ERR_LNK_CTRL_TX_0  = (1 << 21),    /* transient: CRC err */
 347        EDMA_ERR_LNK_CTRL_TX_1  = (1 << 22),    /* transient: FIFO err */
 348        EDMA_ERR_LNK_CTRL_TX_2  = (1 << 23),    /* transient: caught SYNC */
 349        EDMA_ERR_LNK_CTRL_TX_3  = (1 << 24),    /* transient: caught DMAT */
 350        EDMA_ERR_LNK_CTRL_TX_4  = (1 << 25),    /* transient: FIS collision */
 351
 352        EDMA_ERR_LNK_DATA_TX    = (0x1f << 26), /* link data tx error */
 353
 354        EDMA_ERR_TRANS_PROTO    = (1 << 31),    /* transport protocol error */
 355        EDMA_ERR_OVERRUN_5      = (1 << 5),
 356        EDMA_ERR_UNDERRUN_5     = (1 << 6),
 357
 358        EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 359                                  EDMA_ERR_LNK_CTRL_RX_1 |
 360                                  EDMA_ERR_LNK_CTRL_RX_3 |
 361                                  EDMA_ERR_LNK_CTRL_TX,
 362
 363        EDMA_EH_FREEZE          = EDMA_ERR_D_PAR |
 364                                  EDMA_ERR_PRD_PAR |
 365                                  EDMA_ERR_DEV_DCON |
 366                                  EDMA_ERR_DEV_CON |
 367                                  EDMA_ERR_SERR |
 368                                  EDMA_ERR_SELF_DIS |
 369                                  EDMA_ERR_CRQB_PAR |
 370                                  EDMA_ERR_CRPB_PAR |
 371                                  EDMA_ERR_INTRL_PAR |
 372                                  EDMA_ERR_IORDY |
 373                                  EDMA_ERR_LNK_CTRL_RX_2 |
 374                                  EDMA_ERR_LNK_DATA_RX |
 375                                  EDMA_ERR_LNK_DATA_TX |
 376                                  EDMA_ERR_TRANS_PROTO,
 377
 378        EDMA_EH_FREEZE_5        = EDMA_ERR_D_PAR |
 379                                  EDMA_ERR_PRD_PAR |
 380                                  EDMA_ERR_DEV_DCON |
 381                                  EDMA_ERR_DEV_CON |
 382                                  EDMA_ERR_OVERRUN_5 |
 383                                  EDMA_ERR_UNDERRUN_5 |
 384                                  EDMA_ERR_SELF_DIS_5 |
 385                                  EDMA_ERR_CRQB_PAR |
 386                                  EDMA_ERR_CRPB_PAR |
 387                                  EDMA_ERR_INTRL_PAR |
 388                                  EDMA_ERR_IORDY,
 389
 390        EDMA_REQ_Q_BASE_HI      = 0x10,
 391        EDMA_REQ_Q_IN_PTR       = 0x14,         /* also contains BASE_LO */
 392
 393        EDMA_REQ_Q_OUT_PTR      = 0x18,
 394        EDMA_REQ_Q_PTR_SHIFT    = 5,
 395
 396        EDMA_RSP_Q_BASE_HI      = 0x1c,
 397        EDMA_RSP_Q_IN_PTR       = 0x20,
 398        EDMA_RSP_Q_OUT_PTR      = 0x24,         /* also contains BASE_LO */
 399        EDMA_RSP_Q_PTR_SHIFT    = 3,
 400
 401        EDMA_CMD                = 0x28,         /* EDMA command register */
 402        EDMA_EN                 = (1 << 0),     /* enable EDMA */
 403        EDMA_DS                 = (1 << 1),     /* disable EDMA; self-negated */
 404        EDMA_RESET              = (1 << 2),     /* reset eng/trans/link/phy */
 405
 406        EDMA_STATUS             = 0x30,         /* EDMA engine status */
 407        EDMA_STATUS_CACHE_EMPTY = (1 << 6),     /* GenIIe command cache empty */
 408        EDMA_STATUS_IDLE        = (1 << 7),     /* GenIIe EDMA enabled/idle */
 409
 410        EDMA_IORDY_TMOUT        = 0x34,
 411        EDMA_ARB_CFG            = 0x38,
 412
 413        EDMA_HALTCOND           = 0x60,         /* GenIIe halt conditions */
 414        EDMA_UNKNOWN_RSVD       = 0x6C,         /* GenIIe unknown/reserved */
 415
 416        BMDMA_CMD               = 0x224,        /* bmdma command register */
 417        BMDMA_STATUS            = 0x228,        /* bmdma status register */
 418        BMDMA_PRD_LOW           = 0x22c,        /* bmdma PRD addr 31:0 */
 419        BMDMA_PRD_HIGH          = 0x230,        /* bmdma PRD addr 63:32 */
 420
 421        /* Host private flags (hp_flags) */
 422        MV_HP_FLAG_MSI          = (1 << 0),
 423        MV_HP_ERRATA_50XXB0     = (1 << 1),
 424        MV_HP_ERRATA_50XXB2     = (1 << 2),
 425        MV_HP_ERRATA_60X1B2     = (1 << 3),
 426        MV_HP_ERRATA_60X1C0     = (1 << 4),
 427        MV_HP_GEN_I             = (1 << 6),     /* Generation I: 50xx */
 428        MV_HP_GEN_II            = (1 << 7),     /* Generation II: 60xx */
 429        MV_HP_GEN_IIE           = (1 << 8),     /* Generation IIE: 6042/7042 */
 430        MV_HP_PCIE              = (1 << 9),     /* PCIe bus/regs: 7042 */
 431        MV_HP_CUT_THROUGH       = (1 << 10),    /* can use EDMA cut-through */
 432        MV_HP_FLAG_SOC          = (1 << 11),    /* SystemOnChip, no PCI */
 433        MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),   /* is led blinking enabled? */
 434
 435        /* Port private flags (pp_flags) */
 436        MV_PP_FLAG_EDMA_EN      = (1 << 0),     /* is EDMA engine enabled? */
 437        MV_PP_FLAG_NCQ_EN       = (1 << 1),     /* is EDMA set up for NCQ? */
 438        MV_PP_FLAG_FBS_EN       = (1 << 2),     /* is EDMA set up for FBS? */
 439        MV_PP_FLAG_DELAYED_EH   = (1 << 3),     /* delayed dev err handling */
 440        MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),    /* ignore initial ATA_DRDY */
 441};
 442
 443#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 444#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 445#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 446#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 447#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 448
 449#define WINDOW_CTRL(i)          (0x20030 + ((i) << 4))
 450#define WINDOW_BASE(i)          (0x20034 + ((i) << 4))
 451
 452enum {
 453        /* DMA boundary 0xffff is required by the s/g splitting
 454         * we need on /length/ in mv_fill-sg().
 455         */
 456        MV_DMA_BOUNDARY         = 0xffffU,
 457
 458        /* mask of register bits containing lower 32 bits
 459         * of EDMA request queue DMA address
 460         */
 461        EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
 462
 463        /* ditto, for response queue */
 464        EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
 465};
 466
 467enum chip_type {
 468        chip_504x,
 469        chip_508x,
 470        chip_5080,
 471        chip_604x,
 472        chip_608x,
 473        chip_6042,
 474        chip_7042,
 475        chip_soc,
 476};
 477
 478/* Command ReQuest Block: 32B */
 479struct mv_crqb {
 480        __le32                  sg_addr;
 481        __le32                  sg_addr_hi;
 482        __le16                  ctrl_flags;
 483        __le16                  ata_cmd[11];
 484};
 485
 486struct mv_crqb_iie {
 487        __le32                  addr;
 488        __le32                  addr_hi;
 489        __le32                  flags;
 490        __le32                  len;
 491        __le32                  ata_cmd[4];
 492};
 493
 494/* Command ResPonse Block: 8B */
 495struct mv_crpb {
 496        __le16                  id;
 497        __le16                  flags;
 498        __le32                  tmstmp;
 499};
 500
 501/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 502struct mv_sg {
 503        __le32                  addr;
 504        __le32                  flags_size;
 505        __le32                  addr_hi;
 506        __le32                  reserved;
 507};
 508
 509/*
 510 * We keep a local cache of a few frequently accessed port
 511 * registers here, to avoid having to read them (very slow)
 512 * when switching between EDMA and non-EDMA modes.
 513 */
 514struct mv_cached_regs {
 515        u32                     fiscfg;
 516        u32                     ltmode;
 517        u32                     haltcond;
 518        u32                     unknown_rsvd;
 519};
 520
 521struct mv_port_priv {
 522        struct mv_crqb          *crqb;
 523        dma_addr_t              crqb_dma;
 524        struct mv_crpb          *crpb;
 525        dma_addr_t              crpb_dma;
 526        struct mv_sg            *sg_tbl[MV_MAX_Q_DEPTH];
 527        dma_addr_t              sg_tbl_dma[MV_MAX_Q_DEPTH];
 528
 529        unsigned int            req_idx;
 530        unsigned int            resp_idx;
 531
 532        u32                     pp_flags;
 533        struct mv_cached_regs   cached;
 534        unsigned int            delayed_eh_pmp_map;
 535};
 536
 537struct mv_port_signal {
 538        u32                     amps;
 539        u32                     pre;
 540};
 541
 542struct mv_host_priv {
 543        u32                     hp_flags;
 544        unsigned int            board_idx;
 545        u32                     main_irq_mask;
 546        struct mv_port_signal   signal[8];
 547        const struct mv_hw_ops  *ops;
 548        int                     n_ports;
 549        void __iomem            *base;
 550        void __iomem            *main_irq_cause_addr;
 551        void __iomem            *main_irq_mask_addr;
 552        u32                     irq_cause_offset;
 553        u32                     irq_mask_offset;
 554        u32                     unmask_all_irqs;
 555
 556#if defined(CONFIG_HAVE_CLK)
 557        struct clk              *clk;
 558        struct clk              **port_clks;
 559#endif
 560        /*
 561         * These consistent DMA memory pools give us guaranteed
 562         * alignment for hardware-accessed data structures,
 563         * and less memory waste in accomplishing the alignment.
 564         */
 565        struct dma_pool         *crqb_pool;
 566        struct dma_pool         *crpb_pool;
 567        struct dma_pool         *sg_tbl_pool;
 568};
 569
 570struct mv_hw_ops {
 571        void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 572                           unsigned int port);
 573        void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 574        void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 575                           void __iomem *mmio);
 576        int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
 577                        unsigned int n_hc);
 578        void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 579        void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 580};
 581
 582static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 583static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 584static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 585static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 586static int mv_port_start(struct ata_port *ap);
 587static void mv_port_stop(struct ata_port *ap);
 588static int mv_qc_defer(struct ata_queued_cmd *qc);
 589static void mv_qc_prep(struct ata_queued_cmd *qc);
 590static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 591static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 592static int mv_hardreset(struct ata_link *link, unsigned int *class,
 593                        unsigned long deadline);
 594static void mv_eh_freeze(struct ata_port *ap);
 595static void mv_eh_thaw(struct ata_port *ap);
 596static void mv6_dev_config(struct ata_device *dev);
 597
 598static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 599                           unsigned int port);
 600static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 601static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 602                           void __iomem *mmio);
 603static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 604                        unsigned int n_hc);
 605static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 606static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 607
 608static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 609                           unsigned int port);
 610static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 611static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 612                           void __iomem *mmio);
 613static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 614                        unsigned int n_hc);
 615static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 616static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 617                                      void __iomem *mmio);
 618static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 619                                      void __iomem *mmio);
 620static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
 621                                  void __iomem *mmio, unsigned int n_hc);
 622static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 623                                      void __iomem *mmio);
 624static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 625static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 626                                  void __iomem *mmio, unsigned int port);
 627static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 628static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 629                             unsigned int port_no);
 630static int mv_stop_edma(struct ata_port *ap);
 631static int mv_stop_edma_engine(void __iomem *port_mmio);
 632static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 633
 634static void mv_pmp_select(struct ata_port *ap, int pmp);
 635static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 636                                unsigned long deadline);
 637static int  mv_softreset(struct ata_link *link, unsigned int *class,
 638                                unsigned long deadline);
 639static void mv_pmp_error_handler(struct ata_port *ap);
 640static void mv_process_crpb_entries(struct ata_port *ap,
 641                                        struct mv_port_priv *pp);
 642
 643static void mv_sff_irq_clear(struct ata_port *ap);
 644static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 645static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 646static void mv_bmdma_start(struct ata_queued_cmd *qc);
 647static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 648static u8   mv_bmdma_status(struct ata_port *ap);
 649static u8 mv_sff_check_status(struct ata_port *ap);
 650
 651/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 652 * because we have to allow room for worst case splitting of
 653 * PRDs for 64K boundaries in mv_fill_sg().
 654 */
 655static struct scsi_host_template mv5_sht = {
 656        ATA_BASE_SHT(DRV_NAME),
 657        .sg_tablesize           = MV_MAX_SG_CT / 2,
 658        .dma_boundary           = MV_DMA_BOUNDARY,
 659};
 660
 661static struct scsi_host_template mv6_sht = {
 662        ATA_NCQ_SHT(DRV_NAME),
 663        .can_queue              = MV_MAX_Q_DEPTH - 1,
 664        .sg_tablesize           = MV_MAX_SG_CT / 2,
 665        .dma_boundary           = MV_DMA_BOUNDARY,
 666};
 667
 668static struct ata_port_operations mv5_ops = {
 669        .inherits               = &ata_sff_port_ops,
 670
 671        .lost_interrupt         = ATA_OP_NULL,
 672
 673        .qc_defer               = mv_qc_defer,
 674        .qc_prep                = mv_qc_prep,
 675        .qc_issue               = mv_qc_issue,
 676
 677        .freeze                 = mv_eh_freeze,
 678        .thaw                   = mv_eh_thaw,
 679        .hardreset              = mv_hardreset,
 680
 681        .scr_read               = mv5_scr_read,
 682        .scr_write              = mv5_scr_write,
 683
 684        .port_start             = mv_port_start,
 685        .port_stop              = mv_port_stop,
 686};
 687
 688static struct ata_port_operations mv6_ops = {
 689        .inherits               = &ata_bmdma_port_ops,
 690
 691        .lost_interrupt         = ATA_OP_NULL,
 692
 693        .qc_defer               = mv_qc_defer,
 694        .qc_prep                = mv_qc_prep,
 695        .qc_issue               = mv_qc_issue,
 696
 697        .dev_config             = mv6_dev_config,
 698
 699        .freeze                 = mv_eh_freeze,
 700        .thaw                   = mv_eh_thaw,
 701        .hardreset              = mv_hardreset,
 702        .softreset              = mv_softreset,
 703        .pmp_hardreset          = mv_pmp_hardreset,
 704        .pmp_softreset          = mv_softreset,
 705        .error_handler          = mv_pmp_error_handler,
 706
 707        .scr_read               = mv_scr_read,
 708        .scr_write              = mv_scr_write,
 709
 710        .sff_check_status       = mv_sff_check_status,
 711        .sff_irq_clear          = mv_sff_irq_clear,
 712        .check_atapi_dma        = mv_check_atapi_dma,
 713        .bmdma_setup            = mv_bmdma_setup,
 714        .bmdma_start            = mv_bmdma_start,
 715        .bmdma_stop             = mv_bmdma_stop,
 716        .bmdma_status           = mv_bmdma_status,
 717
 718        .port_start             = mv_port_start,
 719        .port_stop              = mv_port_stop,
 720};
 721
 722static struct ata_port_operations mv_iie_ops = {
 723        .inherits               = &mv6_ops,
 724        .dev_config             = ATA_OP_NULL,
 725        .qc_prep                = mv_qc_prep_iie,
 726};
 727
 728static const struct ata_port_info mv_port_info[] = {
 729        {  /* chip_504x */
 730                .flags          = MV_GEN_I_FLAGS,
 731                .pio_mask       = ATA_PIO4,
 732                .udma_mask      = ATA_UDMA6,
 733                .port_ops       = &mv5_ops,
 734        },
 735        {  /* chip_508x */
 736                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 737                .pio_mask       = ATA_PIO4,
 738                .udma_mask      = ATA_UDMA6,
 739                .port_ops       = &mv5_ops,
 740        },
 741        {  /* chip_5080 */
 742                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 743                .pio_mask       = ATA_PIO4,
 744                .udma_mask      = ATA_UDMA6,
 745                .port_ops       = &mv5_ops,
 746        },
 747        {  /* chip_604x */
 748                .flags          = MV_GEN_II_FLAGS,
 749                .pio_mask       = ATA_PIO4,
 750                .udma_mask      = ATA_UDMA6,
 751                .port_ops       = &mv6_ops,
 752        },
 753        {  /* chip_608x */
 754                .flags          = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 755                .pio_mask       = ATA_PIO4,
 756                .udma_mask      = ATA_UDMA6,
 757                .port_ops       = &mv6_ops,
 758        },
 759        {  /* chip_6042 */
 760                .flags          = MV_GEN_IIE_FLAGS,
 761                .pio_mask       = ATA_PIO4,
 762                .udma_mask      = ATA_UDMA6,
 763                .port_ops       = &mv_iie_ops,
 764        },
 765        {  /* chip_7042 */
 766                .flags          = MV_GEN_IIE_FLAGS,
 767                .pio_mask       = ATA_PIO4,
 768                .udma_mask      = ATA_UDMA6,
 769                .port_ops       = &mv_iie_ops,
 770        },
 771        {  /* chip_soc */
 772                .flags          = MV_GEN_IIE_FLAGS,
 773                .pio_mask       = ATA_PIO4,
 774                .udma_mask      = ATA_UDMA6,
 775                .port_ops       = &mv_iie_ops,
 776        },
 777};
 778
 779static const struct pci_device_id mv_pci_tbl[] = {
 780        { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 781        { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 782        { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 783        { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 784        /* RocketRAID 1720/174x have different identifiers */
 785        { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 786        { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 787        { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 788
 789        { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 790        { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 791        { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 792        { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 793        { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 794
 795        { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 796
 797        /* Adaptec 1430SA */
 798        { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 799
 800        /* Marvell 7042 support */
 801        { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 802
 803        /* Highpoint RocketRAID PCIe series */
 804        { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 805        { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 806
 807        { }                     /* terminate list */
 808};
 809
 810static const struct mv_hw_ops mv5xxx_ops = {
 811        .phy_errata             = mv5_phy_errata,
 812        .enable_leds            = mv5_enable_leds,
 813        .read_preamp            = mv5_read_preamp,
 814        .reset_hc               = mv5_reset_hc,
 815        .reset_flash            = mv5_reset_flash,
 816        .reset_bus              = mv5_reset_bus,
 817};
 818
 819static const struct mv_hw_ops mv6xxx_ops = {
 820        .phy_errata             = mv6_phy_errata,
 821        .enable_leds            = mv6_enable_leds,
 822        .read_preamp            = mv6_read_preamp,
 823        .reset_hc               = mv6_reset_hc,
 824        .reset_flash            = mv6_reset_flash,
 825        .reset_bus              = mv_reset_pci_bus,
 826};
 827
 828static const struct mv_hw_ops mv_soc_ops = {
 829        .phy_errata             = mv6_phy_errata,
 830        .enable_leds            = mv_soc_enable_leds,
 831        .read_preamp            = mv_soc_read_preamp,
 832        .reset_hc               = mv_soc_reset_hc,
 833        .reset_flash            = mv_soc_reset_flash,
 834        .reset_bus              = mv_soc_reset_bus,
 835};
 836
 837static const struct mv_hw_ops mv_soc_65n_ops = {
 838        .phy_errata             = mv_soc_65n_phy_errata,
 839        .enable_leds            = mv_soc_enable_leds,
 840        .reset_hc               = mv_soc_reset_hc,
 841        .reset_flash            = mv_soc_reset_flash,
 842        .reset_bus              = mv_soc_reset_bus,
 843};
 844
 845/*
 846 * Functions
 847 */
 848
 849static inline void writelfl(unsigned long data, void __iomem *addr)
 850{
 851        writel(data, addr);
 852        (void) readl(addr);     /* flush to avoid PCI posted write */
 853}
 854
 855static inline unsigned int mv_hc_from_port(unsigned int port)
 856{
 857        return port >> MV_PORT_HC_SHIFT;
 858}
 859
 860static inline unsigned int mv_hardport_from_port(unsigned int port)
 861{
 862        return port & MV_PORT_MASK;
 863}
 864
 865/*
 866 * Consolidate some rather tricky bit shift calculations.
 867 * This is hot-path stuff, so not a function.
 868 * Simple code, with two return values, so macro rather than inline.
 869 *
 870 * port is the sole input, in range 0..7.
 871 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 872 * hardport is the other output, in range 0..3.
 873 *
 874 * Note that port and hardport may be the same variable in some cases.
 875 */
 876#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)    \
 877{                                                               \
 878        shift    = mv_hc_from_port(port) * HC_SHIFT;            \
 879        hardport = mv_hardport_from_port(port);                 \
 880        shift   += hardport * 2;                                \
 881}
 882
 883static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 884{
 885        return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 886}
 887
 888static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 889                                                 unsigned int port)
 890{
 891        return mv_hc_base(base, mv_hc_from_port(port));
 892}
 893
 894static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 895{
 896        return  mv_hc_base_from_port(base, port) +
 897                MV_SATAHC_ARBTR_REG_SZ +
 898                (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 899}
 900
 901static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 902{
 903        void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 904        unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 905
 906        return hc_mmio + ofs;
 907}
 908
 909static inline void __iomem *mv_host_base(struct ata_host *host)
 910{
 911        struct mv_host_priv *hpriv = host->private_data;
 912        return hpriv->base;
 913}
 914
 915static inline void __iomem *mv_ap_base(struct ata_port *ap)
 916{
 917        return mv_port_base(mv_host_base(ap->host), ap->port_no);
 918}
 919
 920static inline int mv_get_hc_count(unsigned long port_flags)
 921{
 922        return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 923}
 924
 925/**
 926 *      mv_save_cached_regs - (re-)initialize cached port registers
 927 *      @ap: the port whose registers we are caching
 928 *
 929 *      Initialize the local cache of port registers,
 930 *      so that reading them over and over again can
 931 *      be avoided on the hotter paths of this driver.
 932 *      This saves a few microseconds each time we switch
 933 *      to/from EDMA mode to perform (eg.) a drive cache flush.
 934 */
 935static void mv_save_cached_regs(struct ata_port *ap)
 936{
 937        void __iomem *port_mmio = mv_ap_base(ap);
 938        struct mv_port_priv *pp = ap->private_data;
 939
 940        pp->cached.fiscfg = readl(port_mmio + FISCFG);
 941        pp->cached.ltmode = readl(port_mmio + LTMODE);
 942        pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 943        pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 944}
 945
 946/**
 947 *      mv_write_cached_reg - write to a cached port register
 948 *      @addr: hardware address of the register
 949 *      @old: pointer to cached value of the register
 950 *      @new: new value for the register
 951 *
 952 *      Write a new value to a cached register,
 953 *      but only if the value is different from before.
 954 */
 955static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 956{
 957        if (new != *old) {
 958                unsigned long laddr;
 959                *old = new;
 960                /*
 961                 * Workaround for 88SX60x1-B2 FEr SATA#13:
 962                 * Read-after-write is needed to prevent generating 64-bit
 963                 * write cycles on the PCI bus for SATA interface registers
 964                 * at offsets ending in 0x4 or 0xc.
 965                 *
 966                 * Looks like a lot of fuss, but it avoids an unnecessary
 967                 * +1 usec read-after-write delay for unaffected registers.
 968                 */
 969                laddr = (long)addr & 0xffff;
 970                if (laddr >= 0x300 && laddr <= 0x33c) {
 971                        laddr &= 0x000f;
 972                        if (laddr == 0x4 || laddr == 0xc) {
 973                                writelfl(new, addr); /* read after write */
 974                                return;
 975                        }
 976                }
 977                writel(new, addr); /* unaffected by the errata */
 978        }
 979}
 980
 981static void mv_set_edma_ptrs(void __iomem *port_mmio,
 982                             struct mv_host_priv *hpriv,
 983                             struct mv_port_priv *pp)
 984{
 985        u32 index;
 986
 987        /*
 988         * initialize request queue
 989         */
 990        pp->req_idx &= MV_MAX_Q_DEPTH_MASK;     /* paranoia */
 991        index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
 992
 993        WARN_ON(pp->crqb_dma & 0x3ff);
 994        writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
 995        writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
 996                 port_mmio + EDMA_REQ_Q_IN_PTR);
 997        writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
 998
 999        /*
1000         * initialize response queue
1001         */
1002        pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;    /* paranoia */
1003        index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1004
1005        WARN_ON(pp->crpb_dma & 0xff);
1006        writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1007        writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1008        writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1009                 port_mmio + EDMA_RSP_Q_OUT_PTR);
1010}
1011
1012static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1013{
1014        /*
1015         * When writing to the main_irq_mask in hardware,
1016         * we must ensure exclusivity between the interrupt coalescing bits
1017         * and the corresponding individual port DONE_IRQ bits.
1018         *
1019         * Note that this register is really an "IRQ enable" register,
1020         * not an "IRQ mask" register as Marvell's naming might suggest.
1021         */
1022        if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1023                mask &= ~DONE_IRQ_0_3;
1024        if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1025                mask &= ~DONE_IRQ_4_7;
1026        writelfl(mask, hpriv->main_irq_mask_addr);
1027}
1028
1029static void mv_set_main_irq_mask(struct ata_host *host,
1030                                 u32 disable_bits, u32 enable_bits)
1031{
1032        struct mv_host_priv *hpriv = host->private_data;
1033        u32 old_mask, new_mask;
1034
1035        old_mask = hpriv->main_irq_mask;
1036        new_mask = (old_mask & ~disable_bits) | enable_bits;
1037        if (new_mask != old_mask) {
1038                hpriv->main_irq_mask = new_mask;
1039                mv_write_main_irq_mask(new_mask, hpriv);
1040        }
1041}
1042
1043static void mv_enable_port_irqs(struct ata_port *ap,
1044                                     unsigned int port_bits)
1045{
1046        unsigned int shift, hardport, port = ap->port_no;
1047        u32 disable_bits, enable_bits;
1048
1049        MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1050
1051        disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1052        enable_bits  = port_bits << shift;
1053        mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1054}
1055
1056static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1057                                          void __iomem *port_mmio,
1058                                          unsigned int port_irqs)
1059{
1060        struct mv_host_priv *hpriv = ap->host->private_data;
1061        int hardport = mv_hardport_from_port(ap->port_no);
1062        void __iomem *hc_mmio = mv_hc_base_from_port(
1063                                mv_host_base(ap->host), ap->port_no);
1064        u32 hc_irq_cause;
1065
1066        /* clear EDMA event indicators, if any */
1067        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1068
1069        /* clear pending irq events */
1070        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1071        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1072
1073        /* clear FIS IRQ Cause */
1074        if (IS_GEN_IIE(hpriv))
1075                writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1076
1077        mv_enable_port_irqs(ap, port_irqs);
1078}
1079
1080static void mv_set_irq_coalescing(struct ata_host *host,
1081                                  unsigned int count, unsigned int usecs)
1082{
1083        struct mv_host_priv *hpriv = host->private_data;
1084        void __iomem *mmio = hpriv->base, *hc_mmio;
1085        u32 coal_enable = 0;
1086        unsigned long flags;
1087        unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1088        const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1089                                                        ALL_PORTS_COAL_DONE;
1090
1091        /* Disable IRQ coalescing if either threshold is zero */
1092        if (!usecs || !count) {
1093                clks = count = 0;
1094        } else {
1095                /* Respect maximum limits of the hardware */
1096                clks = usecs * COAL_CLOCKS_PER_USEC;
1097                if (clks > MAX_COAL_TIME_THRESHOLD)
1098                        clks = MAX_COAL_TIME_THRESHOLD;
1099                if (count > MAX_COAL_IO_COUNT)
1100                        count = MAX_COAL_IO_COUNT;
1101        }
1102
1103        spin_lock_irqsave(&host->lock, flags);
1104        mv_set_main_irq_mask(host, coal_disable, 0);
1105
1106        if (is_dual_hc && !IS_GEN_I(hpriv)) {
1107                /*
1108                 * GEN_II/GEN_IIE with dual host controllers:
1109                 * one set of global thresholds for the entire chip.
1110                 */
1111                writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1112                writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1113                /* clear leftover coal IRQ bit */
1114                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1115                if (count)
1116                        coal_enable = ALL_PORTS_COAL_DONE;
1117                clks = count = 0; /* force clearing of regular regs below */
1118        }
1119
1120        /*
1121         * All chips: independent thresholds for each HC on the chip.
1122         */
1123        hc_mmio = mv_hc_base_from_port(mmio, 0);
1124        writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1125        writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1126        writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1127        if (count)
1128                coal_enable |= PORTS_0_3_COAL_DONE;
1129        if (is_dual_hc) {
1130                hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1131                writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1132                writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1133                writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1134                if (count)
1135                        coal_enable |= PORTS_4_7_COAL_DONE;
1136        }
1137
1138        mv_set_main_irq_mask(host, 0, coal_enable);
1139        spin_unlock_irqrestore(&host->lock, flags);
1140}
1141
1142/**
1143 *      mv_start_edma - Enable eDMA engine
1144 *      @base: port base address
1145 *      @pp: port private data
1146 *
1147 *      Verify the local cache of the eDMA state is accurate with a
1148 *      WARN_ON.
1149 *
1150 *      LOCKING:
1151 *      Inherited from caller.
1152 */
1153static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1154                         struct mv_port_priv *pp, u8 protocol)
1155{
1156        int want_ncq = (protocol == ATA_PROT_NCQ);
1157
1158        if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1159                int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1160                if (want_ncq != using_ncq)
1161                        mv_stop_edma(ap);
1162        }
1163        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1164                struct mv_host_priv *hpriv = ap->host->private_data;
1165
1166                mv_edma_cfg(ap, want_ncq, 1);
1167
1168                mv_set_edma_ptrs(port_mmio, hpriv, pp);
1169                mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1170
1171                writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1172                pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1173        }
1174}
1175
1176static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1177{
1178        void __iomem *port_mmio = mv_ap_base(ap);
1179        const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1180        const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1181        int i;
1182
1183        /*
1184         * Wait for the EDMA engine to finish transactions in progress.
1185         * No idea what a good "timeout" value might be, but measurements
1186         * indicate that it often requires hundreds of microseconds
1187         * with two drives in-use.  So we use the 15msec value above
1188         * as a rough guess at what even more drives might require.
1189         */
1190        for (i = 0; i < timeout; ++i) {
1191                u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1192                if ((edma_stat & empty_idle) == empty_idle)
1193                        break;
1194                udelay(per_loop);
1195        }
1196        /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1197}
1198
1199/**
1200 *      mv_stop_edma_engine - Disable eDMA engine
1201 *      @port_mmio: io base address
1202 *
1203 *      LOCKING:
1204 *      Inherited from caller.
1205 */
1206static int mv_stop_edma_engine(void __iomem *port_mmio)
1207{
1208        int i;
1209
1210        /* Disable eDMA.  The disable bit auto clears. */
1211        writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1212
1213        /* Wait for the chip to confirm eDMA is off. */
1214        for (i = 10000; i > 0; i--) {
1215                u32 reg = readl(port_mmio + EDMA_CMD);
1216                if (!(reg & EDMA_EN))
1217                        return 0;
1218                udelay(10);
1219        }
1220        return -EIO;
1221}
1222
1223static int mv_stop_edma(struct ata_port *ap)
1224{
1225        void __iomem *port_mmio = mv_ap_base(ap);
1226        struct mv_port_priv *pp = ap->private_data;
1227        int err = 0;
1228
1229        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1230                return 0;
1231        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1232        mv_wait_for_edma_empty_idle(ap);
1233        if (mv_stop_edma_engine(port_mmio)) {
1234                ata_port_err(ap, "Unable to stop eDMA\n");
1235                err = -EIO;
1236        }
1237        mv_edma_cfg(ap, 0, 0);
1238        return err;
1239}
1240
1241#ifdef ATA_DEBUG
1242static void mv_dump_mem(void __iomem *start, unsigned bytes)
1243{
1244        int b, w;
1245        for (b = 0; b < bytes; ) {
1246                DPRINTK("%p: ", start + b);
1247                for (w = 0; b < bytes && w < 4; w++) {
1248                        printk("%08x ", readl(start + b));
1249                        b += sizeof(u32);
1250                }
1251                printk("\n");
1252        }
1253}
1254#endif
1255
1256static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1257{
1258#ifdef ATA_DEBUG
1259        int b, w;
1260        u32 dw;
1261        for (b = 0; b < bytes; ) {
1262                DPRINTK("%02x: ", b);
1263                for (w = 0; b < bytes && w < 4; w++) {
1264                        (void) pci_read_config_dword(pdev, b, &dw);
1265                        printk("%08x ", dw);
1266                        b += sizeof(u32);
1267                }
1268                printk("\n");
1269        }
1270#endif
1271}
1272static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1273                             struct pci_dev *pdev)
1274{
1275#ifdef ATA_DEBUG
1276        void __iomem *hc_base = mv_hc_base(mmio_base,
1277                                           port >> MV_PORT_HC_SHIFT);
1278        void __iomem *port_base;
1279        int start_port, num_ports, p, start_hc, num_hcs, hc;
1280
1281        if (0 > port) {
1282                start_hc = start_port = 0;
1283                num_ports = 8;          /* shld be benign for 4 port devs */
1284                num_hcs = 2;
1285        } else {
1286                start_hc = port >> MV_PORT_HC_SHIFT;
1287                start_port = port;
1288                num_ports = num_hcs = 1;
1289        }
1290        DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1291                num_ports > 1 ? num_ports - 1 : start_port);
1292
1293        if (NULL != pdev) {
1294                DPRINTK("PCI config space regs:\n");
1295                mv_dump_pci_cfg(pdev, 0x68);
1296        }
1297        DPRINTK("PCI regs:\n");
1298        mv_dump_mem(mmio_base+0xc00, 0x3c);
1299        mv_dump_mem(mmio_base+0xd00, 0x34);
1300        mv_dump_mem(mmio_base+0xf00, 0x4);
1301        mv_dump_mem(mmio_base+0x1d00, 0x6c);
1302        for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1303                hc_base = mv_hc_base(mmio_base, hc);
1304                DPRINTK("HC regs (HC %i):\n", hc);
1305                mv_dump_mem(hc_base, 0x1c);
1306        }
1307        for (p = start_port; p < start_port + num_ports; p++) {
1308                port_base = mv_port_base(mmio_base, p);
1309                DPRINTK("EDMA regs (port %i):\n", p);
1310                mv_dump_mem(port_base, 0x54);
1311                DPRINTK("SATA regs (port %i):\n", p);
1312                mv_dump_mem(port_base+0x300, 0x60);
1313        }
1314#endif
1315}
1316
1317static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1318{
1319        unsigned int ofs;
1320
1321        switch (sc_reg_in) {
1322        case SCR_STATUS:
1323        case SCR_CONTROL:
1324        case SCR_ERROR:
1325                ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1326                break;
1327        case SCR_ACTIVE:
1328                ofs = SATA_ACTIVE;   /* active is not with the others */
1329                break;
1330        default:
1331                ofs = 0xffffffffU;
1332                break;
1333        }
1334        return ofs;
1335}
1336
1337static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1338{
1339        unsigned int ofs = mv_scr_offset(sc_reg_in);
1340
1341        if (ofs != 0xffffffffU) {
1342                *val = readl(mv_ap_base(link->ap) + ofs);
1343                return 0;
1344        } else
1345                return -EINVAL;
1346}
1347
1348static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1349{
1350        unsigned int ofs = mv_scr_offset(sc_reg_in);
1351
1352        if (ofs != 0xffffffffU) {
1353                void __iomem *addr = mv_ap_base(link->ap) + ofs;
1354                if (sc_reg_in == SCR_CONTROL) {
1355                        /*
1356                         * Workaround for 88SX60x1 FEr SATA#26:
1357                         *
1358                         * COMRESETs have to take care not to accidentally
1359                         * put the drive to sleep when writing SCR_CONTROL.
1360                         * Setting bits 12..15 prevents this problem.
1361                         *
1362                         * So if we see an outbound COMMRESET, set those bits.
1363                         * Ditto for the followup write that clears the reset.
1364                         *
1365                         * The proprietary driver does this for
1366                         * all chip versions, and so do we.
1367                         */
1368                        if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1369                                val |= 0xf000;
1370                }
1371                writelfl(val, addr);
1372                return 0;
1373        } else
1374                return -EINVAL;
1375}
1376
1377static void mv6_dev_config(struct ata_device *adev)
1378{
1379        /*
1380         * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1381         *
1382         * Gen-II does not support NCQ over a port multiplier
1383         *  (no FIS-based switching).
1384         */
1385        if (adev->flags & ATA_DFLAG_NCQ) {
1386                if (sata_pmp_attached(adev->link->ap)) {
1387                        adev->flags &= ~ATA_DFLAG_NCQ;
1388                        ata_dev_info(adev,
1389                                "NCQ disabled for command-based switching\n");
1390                }
1391        }
1392}
1393
1394static int mv_qc_defer(struct ata_queued_cmd *qc)
1395{
1396        struct ata_link *link = qc->dev->link;
1397        struct ata_port *ap = link->ap;
1398        struct mv_port_priv *pp = ap->private_data;
1399
1400        /*
1401         * Don't allow new commands if we're in a delayed EH state
1402         * for NCQ and/or FIS-based switching.
1403         */
1404        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1405                return ATA_DEFER_PORT;
1406
1407        /* PIO commands need exclusive link: no other commands [DMA or PIO]
1408         * can run concurrently.
1409         * set excl_link when we want to send a PIO command in DMA mode
1410         * or a non-NCQ command in NCQ mode.
1411         * When we receive a command from that link, and there are no
1412         * outstanding commands, mark a flag to clear excl_link and let
1413         * the command go through.
1414         */
1415        if (unlikely(ap->excl_link)) {
1416                if (link == ap->excl_link) {
1417                        if (ap->nr_active_links)
1418                                return ATA_DEFER_PORT;
1419                        qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1420                        return 0;
1421                } else
1422                        return ATA_DEFER_PORT;
1423        }
1424
1425        /*
1426         * If the port is completely idle, then allow the new qc.
1427         */
1428        if (ap->nr_active_links == 0)
1429                return 0;
1430
1431        /*
1432         * The port is operating in host queuing mode (EDMA) with NCQ
1433         * enabled, allow multiple NCQ commands.  EDMA also allows
1434         * queueing multiple DMA commands but libata core currently
1435         * doesn't allow it.
1436         */
1437        if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1438            (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1439                if (ata_is_ncq(qc->tf.protocol))
1440                        return 0;
1441                else {
1442                        ap->excl_link = link;
1443                        return ATA_DEFER_PORT;
1444                }
1445        }
1446
1447        return ATA_DEFER_PORT;
1448}
1449
1450static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1451{
1452        struct mv_port_priv *pp = ap->private_data;
1453        void __iomem *port_mmio;
1454
1455        u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1456        u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1457        u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1458
1459        ltmode   = *old_ltmode & ~LTMODE_BIT8;
1460        haltcond = *old_haltcond | EDMA_ERR_DEV;
1461
1462        if (want_fbs) {
1463                fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1464                ltmode = *old_ltmode | LTMODE_BIT8;
1465                if (want_ncq)
1466                        haltcond &= ~EDMA_ERR_DEV;
1467                else
1468                        fiscfg |=  FISCFG_WAIT_DEV_ERR;
1469        } else {
1470                fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1471        }
1472
1473        port_mmio = mv_ap_base(ap);
1474        mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1475        mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1476        mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1477}
1478
1479static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1480{
1481        struct mv_host_priv *hpriv = ap->host->private_data;
1482        u32 old, new;
1483
1484        /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1485        old = readl(hpriv->base + GPIO_PORT_CTL);
1486        if (want_ncq)
1487                new = old | (1 << 22);
1488        else
1489                new = old & ~(1 << 22);
1490        if (new != old)
1491                writel(new, hpriv->base + GPIO_PORT_CTL);
1492}
1493
1494/**
1495 *      mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1496 *      @ap: Port being initialized
1497 *
1498 *      There are two DMA modes on these chips:  basic DMA, and EDMA.
1499 *
1500 *      Bit-0 of the "EDMA RESERVED" register enables/disables use
1501 *      of basic DMA on the GEN_IIE versions of the chips.
1502 *
1503 *      This bit survives EDMA resets, and must be set for basic DMA
1504 *      to function, and should be cleared when EDMA is active.
1505 */
1506static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1507{
1508        struct mv_port_priv *pp = ap->private_data;
1509        u32 new, *old = &pp->cached.unknown_rsvd;
1510
1511        if (enable_bmdma)
1512                new = *old | 1;
1513        else
1514                new = *old & ~1;
1515        mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1516}
1517
1518/*
1519 * SOC chips have an issue whereby the HDD LEDs don't always blink
1520 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1521 * of the SOC takes care of it, generating a steady blink rate when
1522 * any drive on the chip is active.
1523 *
1524 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1525 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1526 *
1527 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1528 * LED operation works then, and provides better (more accurate) feedback.
1529 *
1530 * Note that this code assumes that an SOC never has more than one HC onboard.
1531 */
1532static void mv_soc_led_blink_enable(struct ata_port *ap)
1533{
1534        struct ata_host *host = ap->host;
1535        struct mv_host_priv *hpriv = host->private_data;
1536        void __iomem *hc_mmio;
1537        u32 led_ctrl;
1538
1539        if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1540                return;
1541        hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1542        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1543        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1544        writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1545}
1546
1547static void mv_soc_led_blink_disable(struct ata_port *ap)
1548{
1549        struct ata_host *host = ap->host;
1550        struct mv_host_priv *hpriv = host->private_data;
1551        void __iomem *hc_mmio;
1552        u32 led_ctrl;
1553        unsigned int port;
1554
1555        if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1556                return;
1557
1558        /* disable led-blink only if no ports are using NCQ */
1559        for (port = 0; port < hpriv->n_ports; port++) {
1560                struct ata_port *this_ap = host->ports[port];
1561                struct mv_port_priv *pp = this_ap->private_data;
1562
1563                if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1564                        return;
1565        }
1566
1567        hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1568        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1569        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1570        writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1571}
1572
1573static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1574{
1575        u32 cfg;
1576        struct mv_port_priv *pp    = ap->private_data;
1577        struct mv_host_priv *hpriv = ap->host->private_data;
1578        void __iomem *port_mmio    = mv_ap_base(ap);
1579
1580        /* set up non-NCQ EDMA configuration */
1581        cfg = EDMA_CFG_Q_DEPTH;         /* always 0x1f for *all* chips */
1582        pp->pp_flags &=
1583          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1584
1585        if (IS_GEN_I(hpriv))
1586                cfg |= (1 << 8);        /* enab config burst size mask */
1587
1588        else if (IS_GEN_II(hpriv)) {
1589                cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1590                mv_60x1_errata_sata25(ap, want_ncq);
1591
1592        } else if (IS_GEN_IIE(hpriv)) {
1593                int want_fbs = sata_pmp_attached(ap);
1594                /*
1595                 * Possible future enhancement:
1596                 *
1597                 * The chip can use FBS with non-NCQ, if we allow it,
1598                 * But first we need to have the error handling in place
1599                 * for this mode (datasheet section 7.3.15.4.2.3).
1600                 * So disallow non-NCQ FBS for now.
1601                 */
1602                want_fbs &= want_ncq;
1603
1604                mv_config_fbs(ap, want_ncq, want_fbs);
1605
1606                if (want_fbs) {
1607                        pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1608                        cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1609                }
1610
1611                cfg |= (1 << 23);       /* do not mask PM field in rx'd FIS */
1612                if (want_edma) {
1613                        cfg |= (1 << 22); /* enab 4-entry host queue cache */
1614                        if (!IS_SOC(hpriv))
1615                                cfg |= (1 << 18); /* enab early completion */
1616                }
1617                if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1618                        cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1619                mv_bmdma_enable_iie(ap, !want_edma);
1620
1621                if (IS_SOC(hpriv)) {
1622                        if (want_ncq)
1623                                mv_soc_led_blink_enable(ap);
1624                        else
1625                                mv_soc_led_blink_disable(ap);
1626                }
1627        }
1628
1629        if (want_ncq) {
1630                cfg |= EDMA_CFG_NCQ;
1631                pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1632        }
1633
1634        writelfl(cfg, port_mmio + EDMA_CFG);
1635}
1636
1637static void mv_port_free_dma_mem(struct ata_port *ap)
1638{
1639        struct mv_host_priv *hpriv = ap->host->private_data;
1640        struct mv_port_priv *pp = ap->private_data;
1641        int tag;
1642
1643        if (pp->crqb) {
1644                dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1645                pp->crqb = NULL;
1646        }
1647        if (pp->crpb) {
1648                dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1649                pp->crpb = NULL;
1650        }
1651        /*
1652         * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1653         * For later hardware, we have one unique sg_tbl per NCQ tag.
1654         */
1655        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1656                if (pp->sg_tbl[tag]) {
1657                        if (tag == 0 || !IS_GEN_I(hpriv))
1658                                dma_pool_free(hpriv->sg_tbl_pool,
1659                                              pp->sg_tbl[tag],
1660                                              pp->sg_tbl_dma[tag]);
1661                        pp->sg_tbl[tag] = NULL;
1662                }
1663        }
1664}
1665
1666/**
1667 *      mv_port_start - Port specific init/start routine.
1668 *      @ap: ATA channel to manipulate
1669 *
1670 *      Allocate and point to DMA memory, init port private memory,
1671 *      zero indices.
1672 *
1673 *      LOCKING:
1674 *      Inherited from caller.
1675 */
1676static int mv_port_start(struct ata_port *ap)
1677{
1678        struct device *dev = ap->host->dev;
1679        struct mv_host_priv *hpriv = ap->host->private_data;
1680        struct mv_port_priv *pp;
1681        unsigned long flags;
1682        int tag;
1683
1684        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1685        if (!pp)
1686                return -ENOMEM;
1687        ap->private_data = pp;
1688
1689        pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1690        if (!pp->crqb)
1691                return -ENOMEM;
1692        memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1693
1694        pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1695        if (!pp->crpb)
1696                goto out_port_free_dma_mem;
1697        memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1698
1699        /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1700        if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1701                ap->flags |= ATA_FLAG_AN;
1702        /*
1703         * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1704         * For later hardware, we need one unique sg_tbl per NCQ tag.
1705         */
1706        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1707                if (tag == 0 || !IS_GEN_I(hpriv)) {
1708                        pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1709                                              GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1710                        if (!pp->sg_tbl[tag])
1711                                goto out_port_free_dma_mem;
1712                } else {
1713                        pp->sg_tbl[tag]     = pp->sg_tbl[0];
1714                        pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1715                }
1716        }
1717
1718        spin_lock_irqsave(ap->lock, flags);
1719        mv_save_cached_regs(ap);
1720        mv_edma_cfg(ap, 0, 0);
1721        spin_unlock_irqrestore(ap->lock, flags);
1722
1723        return 0;
1724
1725out_port_free_dma_mem:
1726        mv_port_free_dma_mem(ap);
1727        return -ENOMEM;
1728}
1729
1730/**
1731 *      mv_port_stop - Port specific cleanup/stop routine.
1732 *      @ap: ATA channel to manipulate
1733 *
1734 *      Stop DMA, cleanup port memory.
1735 *
1736 *      LOCKING:
1737 *      This routine uses the host lock to protect the DMA stop.
1738 */
1739static void mv_port_stop(struct ata_port *ap)
1740{
1741        unsigned long flags;
1742
1743        spin_lock_irqsave(ap->lock, flags);
1744        mv_stop_edma(ap);
1745        mv_enable_port_irqs(ap, 0);
1746        spin_unlock_irqrestore(ap->lock, flags);
1747        mv_port_free_dma_mem(ap);
1748}
1749
1750/**
1751 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1752 *      @qc: queued command whose SG list to source from
1753 *
1754 *      Populate the SG list and mark the last entry.
1755 *
1756 *      LOCKING:
1757 *      Inherited from caller.
1758 */
1759static void mv_fill_sg(struct ata_queued_cmd *qc)
1760{
1761        struct mv_port_priv *pp = qc->ap->private_data;
1762        struct scatterlist *sg;
1763        struct mv_sg *mv_sg, *last_sg = NULL;
1764        unsigned int si;
1765
1766        mv_sg = pp->sg_tbl[qc->tag];
1767        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1768                dma_addr_t addr = sg_dma_address(sg);
1769                u32 sg_len = sg_dma_len(sg);
1770
1771                while (sg_len) {
1772                        u32 offset = addr & 0xffff;
1773                        u32 len = sg_len;
1774
1775                        if (offset + len > 0x10000)
1776                                len = 0x10000 - offset;
1777
1778                        mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1779                        mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1780                        mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1781                        mv_sg->reserved = 0;
1782
1783                        sg_len -= len;
1784                        addr += len;
1785
1786                        last_sg = mv_sg;
1787                        mv_sg++;
1788                }
1789        }
1790
1791        if (likely(last_sg))
1792                last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1793        mb(); /* ensure data structure is visible to the chipset */
1794}
1795
1796static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1797{
1798        u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1799                (last ? CRQB_CMD_LAST : 0);
1800        *cmdw = cpu_to_le16(tmp);
1801}
1802
1803/**
1804 *      mv_sff_irq_clear - Clear hardware interrupt after DMA.
1805 *      @ap: Port associated with this ATA transaction.
1806 *
1807 *      We need this only for ATAPI bmdma transactions,
1808 *      as otherwise we experience spurious interrupts
1809 *      after libata-sff handles the bmdma interrupts.
1810 */
1811static void mv_sff_irq_clear(struct ata_port *ap)
1812{
1813        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1814}
1815
1816/**
1817 *      mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1818 *      @qc: queued command to check for chipset/DMA compatibility.
1819 *
1820 *      The bmdma engines cannot handle speculative data sizes
1821 *      (bytecount under/over flow).  So only allow DMA for
1822 *      data transfer commands with known data sizes.
1823 *
1824 *      LOCKING:
1825 *      Inherited from caller.
1826 */
1827static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1828{
1829        struct scsi_cmnd *scmd = qc->scsicmd;
1830
1831        if (scmd) {
1832                switch (scmd->cmnd[0]) {
1833                case READ_6:
1834                case READ_10:
1835                case READ_12:
1836                case WRITE_6:
1837                case WRITE_10:
1838                case WRITE_12:
1839                case GPCMD_READ_CD:
1840                case GPCMD_SEND_DVD_STRUCTURE:
1841                case GPCMD_SEND_CUE_SHEET:
1842                        return 0; /* DMA is safe */
1843                }
1844        }
1845        return -EOPNOTSUPP; /* use PIO instead */
1846}
1847
1848/**
1849 *      mv_bmdma_setup - Set up BMDMA transaction
1850 *      @qc: queued command to prepare DMA for.
1851 *
1852 *      LOCKING:
1853 *      Inherited from caller.
1854 */
1855static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1856{
1857        struct ata_port *ap = qc->ap;
1858        void __iomem *port_mmio = mv_ap_base(ap);
1859        struct mv_port_priv *pp = ap->private_data;
1860
1861        mv_fill_sg(qc);
1862
1863        /* clear all DMA cmd bits */
1864        writel(0, port_mmio + BMDMA_CMD);
1865
1866        /* load PRD table addr. */
1867        writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1868                port_mmio + BMDMA_PRD_HIGH);
1869        writelfl(pp->sg_tbl_dma[qc->tag],
1870                port_mmio + BMDMA_PRD_LOW);
1871
1872        /* issue r/w command */
1873        ap->ops->sff_exec_command(ap, &qc->tf);
1874}
1875
1876/**
1877 *      mv_bmdma_start - Start a BMDMA transaction
1878 *      @qc: queued command to start DMA on.
1879 *
1880 *      LOCKING:
1881 *      Inherited from caller.
1882 */
1883static void mv_bmdma_start(struct ata_queued_cmd *qc)
1884{
1885        struct ata_port *ap = qc->ap;
1886        void __iomem *port_mmio = mv_ap_base(ap);
1887        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1888        u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1889
1890        /* start host DMA transaction */
1891        writelfl(cmd, port_mmio + BMDMA_CMD);
1892}
1893
1894/**
1895 *      mv_bmdma_stop - Stop BMDMA transfer
1896 *      @qc: queued command to stop DMA on.
1897 *
1898 *      Clears the ATA_DMA_START flag in the bmdma control register
1899 *
1900 *      LOCKING:
1901 *      Inherited from caller.
1902 */
1903static void mv_bmdma_stop_ap(struct ata_port *ap)
1904{
1905        void __iomem *port_mmio = mv_ap_base(ap);
1906        u32 cmd;
1907
1908        /* clear start/stop bit */
1909        cmd = readl(port_mmio + BMDMA_CMD);
1910        if (cmd & ATA_DMA_START) {
1911                cmd &= ~ATA_DMA_START;
1912                writelfl(cmd, port_mmio + BMDMA_CMD);
1913
1914                /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1915                ata_sff_dma_pause(ap);
1916        }
1917}
1918
1919static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1920{
1921        mv_bmdma_stop_ap(qc->ap);
1922}
1923
1924/**
1925 *      mv_bmdma_status - Read BMDMA status
1926 *      @ap: port for which to retrieve DMA status.
1927 *
1928 *      Read and return equivalent of the sff BMDMA status register.
1929 *
1930 *      LOCKING:
1931 *      Inherited from caller.
1932 */
1933static u8 mv_bmdma_status(struct ata_port *ap)
1934{
1935        void __iomem *port_mmio = mv_ap_base(ap);
1936        u32 reg, status;
1937
1938        /*
1939         * Other bits are valid only if ATA_DMA_ACTIVE==0,
1940         * and the ATA_DMA_INTR bit doesn't exist.
1941         */
1942        reg = readl(port_mmio + BMDMA_STATUS);
1943        if (reg & ATA_DMA_ACTIVE)
1944                status = ATA_DMA_ACTIVE;
1945        else if (reg & ATA_DMA_ERR)
1946                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1947        else {
1948                /*
1949                 * Just because DMA_ACTIVE is 0 (DMA completed),
1950                 * this does _not_ mean the device is "done".
1951                 * So we should not yet be signalling ATA_DMA_INTR
1952                 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1953                 */
1954                mv_bmdma_stop_ap(ap);
1955                if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1956                        status = 0;
1957                else
1958                        status = ATA_DMA_INTR;
1959        }
1960        return status;
1961}
1962
1963static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1964{
1965        struct ata_taskfile *tf = &qc->tf;
1966        /*
1967         * Workaround for 88SX60x1 FEr SATA#24.
1968         *
1969         * Chip may corrupt WRITEs if multi_count >= 4kB.
1970         * Note that READs are unaffected.
1971         *
1972         * It's not clear if this errata really means "4K bytes",
1973         * or if it always happens for multi_count > 7
1974         * regardless of device sector_size.
1975         *
1976         * So, for safety, any write with multi_count > 7
1977         * gets converted here into a regular PIO write instead:
1978         */
1979        if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1980                if (qc->dev->multi_count > 7) {
1981                        switch (tf->command) {
1982                        case ATA_CMD_WRITE_MULTI:
1983                                tf->command = ATA_CMD_PIO_WRITE;
1984                                break;
1985                        case ATA_CMD_WRITE_MULTI_FUA_EXT:
1986                                tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1987                                /* fall through */
1988                        case ATA_CMD_WRITE_MULTI_EXT:
1989                                tf->command = ATA_CMD_PIO_WRITE_EXT;
1990                                break;
1991                        }
1992                }
1993        }
1994}
1995
1996/**
1997 *      mv_qc_prep - Host specific command preparation.
1998 *      @qc: queued command to prepare
1999 *
2000 *      This routine simply redirects to the general purpose routine
2001 *      if command is not DMA.  Else, it handles prep of the CRQB
2002 *      (command request block), does some sanity checking, and calls
2003 *      the SG load routine.
2004 *
2005 *      LOCKING:
2006 *      Inherited from caller.
2007 */
2008static void mv_qc_prep(struct ata_queued_cmd *qc)
2009{
2010        struct ata_port *ap = qc->ap;
2011        struct mv_port_priv *pp = ap->private_data;
2012        __le16 *cw;
2013        struct ata_taskfile *tf = &qc->tf;
2014        u16 flags = 0;
2015        unsigned in_index;
2016
2017        switch (tf->protocol) {
2018        case ATA_PROT_DMA:
2019                if (tf->command == ATA_CMD_DSM)
2020                        return;
2021                /* fall-thru */
2022        case ATA_PROT_NCQ:
2023                break;  /* continue below */
2024        case ATA_PROT_PIO:
2025                mv_rw_multi_errata_sata24(qc);
2026                return;
2027        default:
2028                return;
2029        }
2030
2031        /* Fill in command request block
2032         */
2033        if (!(tf->flags & ATA_TFLAG_WRITE))
2034                flags |= CRQB_FLAG_READ;
2035        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2036        flags |= qc->tag << CRQB_TAG_SHIFT;
2037        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2038
2039        /* get current queue index from software */
2040        in_index = pp->req_idx;
2041
2042        pp->crqb[in_index].sg_addr =
2043                cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2044        pp->crqb[in_index].sg_addr_hi =
2045                cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2046        pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2047
2048        cw = &pp->crqb[in_index].ata_cmd[0];
2049
2050        /* Sadly, the CRQB cannot accommodate all registers--there are
2051         * only 11 bytes...so we must pick and choose required
2052         * registers based on the command.  So, we drop feature and
2053         * hob_feature for [RW] DMA commands, but they are needed for
2054         * NCQ.  NCQ will drop hob_nsect, which is not needed there
2055         * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2056         */
2057        switch (tf->command) {
2058        case ATA_CMD_READ:
2059        case ATA_CMD_READ_EXT:
2060        case ATA_CMD_WRITE:
2061        case ATA_CMD_WRITE_EXT:
2062        case ATA_CMD_WRITE_FUA_EXT:
2063                mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2064                break;
2065        case ATA_CMD_FPDMA_READ:
2066        case ATA_CMD_FPDMA_WRITE:
2067                mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2068                mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2069                break;
2070        default:
2071                /* The only other commands EDMA supports in non-queued and
2072                 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2073                 * of which are defined/used by Linux.  If we get here, this
2074                 * driver needs work.
2075                 *
2076                 * FIXME: modify libata to give qc_prep a return value and
2077                 * return error here.
2078                 */
2079                BUG_ON(tf->command);
2080                break;
2081        }
2082        mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2083        mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2084        mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2085        mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2086        mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2087        mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2088        mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2089        mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2090        mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);    /* last */
2091
2092        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2093                return;
2094        mv_fill_sg(qc);
2095}
2096
2097/**
2098 *      mv_qc_prep_iie - Host specific command preparation.
2099 *      @qc: queued command to prepare
2100 *
2101 *      This routine simply redirects to the general purpose routine
2102 *      if command is not DMA.  Else, it handles prep of the CRQB
2103 *      (command request block), does some sanity checking, and calls
2104 *      the SG load routine.
2105 *
2106 *      LOCKING:
2107 *      Inherited from caller.
2108 */
2109static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2110{
2111        struct ata_port *ap = qc->ap;
2112        struct mv_port_priv *pp = ap->private_data;
2113        struct mv_crqb_iie *crqb;
2114        struct ata_taskfile *tf = &qc->tf;
2115        unsigned in_index;
2116        u32 flags = 0;
2117
2118        if ((tf->protocol != ATA_PROT_DMA) &&
2119            (tf->protocol != ATA_PROT_NCQ))
2120                return;
2121        if (tf->command == ATA_CMD_DSM)
2122                return;  /* use bmdma for this */
2123
2124        /* Fill in Gen IIE command request block */
2125        if (!(tf->flags & ATA_TFLAG_WRITE))
2126                flags |= CRQB_FLAG_READ;
2127
2128        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2129        flags |= qc->tag << CRQB_TAG_SHIFT;
2130        flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2131        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2132
2133        /* get current queue index from software */
2134        in_index = pp->req_idx;
2135
2136        crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2137        crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2138        crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2139        crqb->flags = cpu_to_le32(flags);
2140
2141        crqb->ata_cmd[0] = cpu_to_le32(
2142                        (tf->command << 16) |
2143                        (tf->feature << 24)
2144                );
2145        crqb->ata_cmd[1] = cpu_to_le32(
2146                        (tf->lbal << 0) |
2147                        (tf->lbam << 8) |
2148                        (tf->lbah << 16) |
2149                        (tf->device << 24)
2150                );
2151        crqb->ata_cmd[2] = cpu_to_le32(
2152                        (tf->hob_lbal << 0) |
2153                        (tf->hob_lbam << 8) |
2154                        (tf->hob_lbah << 16) |
2155                        (tf->hob_feature << 24)
2156                );
2157        crqb->ata_cmd[3] = cpu_to_le32(
2158                        (tf->nsect << 0) |
2159                        (tf->hob_nsect << 8)
2160                );
2161
2162        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2163                return;
2164        mv_fill_sg(qc);
2165}
2166
2167/**
2168 *      mv_sff_check_status - fetch device status, if valid
2169 *      @ap: ATA port to fetch status from
2170 *
2171 *      When using command issue via mv_qc_issue_fis(),
2172 *      the initial ATA_BUSY state does not show up in the
2173 *      ATA status (shadow) register.  This can confuse libata!
2174 *
2175 *      So we have a hook here to fake ATA_BUSY for that situation,
2176 *      until the first time a BUSY, DRQ, or ERR bit is seen.
2177 *
2178 *      The rest of the time, it simply returns the ATA status register.
2179 */
2180static u8 mv_sff_check_status(struct ata_port *ap)
2181{
2182        u8 stat = ioread8(ap->ioaddr.status_addr);
2183        struct mv_port_priv *pp = ap->private_data;
2184
2185        if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2186                if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2187                        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2188                else
2189                        stat = ATA_BUSY;
2190        }
2191        return stat;
2192}
2193
2194/**
2195 *      mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2196 *      @fis: fis to be sent
2197 *      @nwords: number of 32-bit words in the fis
2198 */
2199static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2200{
2201        void __iomem *port_mmio = mv_ap_base(ap);
2202        u32 ifctl, old_ifctl, ifstat;
2203        int i, timeout = 200, final_word = nwords - 1;
2204
2205        /* Initiate FIS transmission mode */
2206        old_ifctl = readl(port_mmio + SATA_IFCTL);
2207        ifctl = 0x100 | (old_ifctl & 0xf);
2208        writelfl(ifctl, port_mmio + SATA_IFCTL);
2209
2210        /* Send all words of the FIS except for the final word */
2211        for (i = 0; i < final_word; ++i)
2212                writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2213
2214        /* Flag end-of-transmission, and then send the final word */
2215        writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2216        writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2217
2218        /*
2219         * Wait for FIS transmission to complete.
2220         * This typically takes just a single iteration.
2221         */
2222        do {
2223                ifstat = readl(port_mmio + SATA_IFSTAT);
2224        } while (!(ifstat & 0x1000) && --timeout);
2225
2226        /* Restore original port configuration */
2227        writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2228
2229        /* See if it worked */
2230        if ((ifstat & 0x3000) != 0x1000) {
2231                ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2232                              __func__, ifstat);
2233                return AC_ERR_OTHER;
2234        }
2235        return 0;
2236}
2237
2238/**
2239 *      mv_qc_issue_fis - Issue a command directly as a FIS
2240 *      @qc: queued command to start
2241 *
2242 *      Note that the ATA shadow registers are not updated
2243 *      after command issue, so the device will appear "READY"
2244 *      if polled, even while it is BUSY processing the command.
2245 *
2246 *      So we use a status hook to fake ATA_BUSY until the drive changes state.
2247 *
2248 *      Note: we don't get updated shadow regs on *completion*
2249 *      of non-data commands. So avoid sending them via this function,
2250 *      as they will appear to have completed immediately.
2251 *
2252 *      GEN_IIE has special registers that we could get the result tf from,
2253 *      but earlier chipsets do not.  For now, we ignore those registers.
2254 */
2255static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2256{
2257        struct ata_port *ap = qc->ap;
2258        struct mv_port_priv *pp = ap->private_data;
2259        struct ata_link *link = qc->dev->link;
2260        u32 fis[5];
2261        int err = 0;
2262
2263        ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2264        err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2265        if (err)
2266                return err;
2267
2268        switch (qc->tf.protocol) {
2269        case ATAPI_PROT_PIO:
2270                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2271                /* fall through */
2272        case ATAPI_PROT_NODATA:
2273                ap->hsm_task_state = HSM_ST_FIRST;
2274                break;
2275        case ATA_PROT_PIO:
2276                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2277                if (qc->tf.flags & ATA_TFLAG_WRITE)
2278                        ap->hsm_task_state = HSM_ST_FIRST;
2279                else
2280                        ap->hsm_task_state = HSM_ST;
2281                break;
2282        default:
2283                ap->hsm_task_state = HSM_ST_LAST;
2284                break;
2285        }
2286
2287        if (qc->tf.flags & ATA_TFLAG_POLLING)
2288                ata_sff_queue_pio_task(link, 0);
2289        return 0;
2290}
2291
2292/**
2293 *      mv_qc_issue - Initiate a command to the host
2294 *      @qc: queued command to start
2295 *
2296 *      This routine simply redirects to the general purpose routine
2297 *      if command is not DMA.  Else, it sanity checks our local
2298 *      caches of the request producer/consumer indices then enables
2299 *      DMA and bumps the request producer index.
2300 *
2301 *      LOCKING:
2302 *      Inherited from caller.
2303 */
2304static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2305{
2306        static int limit_warnings = 10;
2307        struct ata_port *ap = qc->ap;
2308        void __iomem *port_mmio = mv_ap_base(ap);
2309        struct mv_port_priv *pp = ap->private_data;
2310        u32 in_index;
2311        unsigned int port_irqs;
2312
2313        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2314
2315        switch (qc->tf.protocol) {
2316        case ATA_PROT_DMA:
2317                if (qc->tf.command == ATA_CMD_DSM) {
2318                        if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2319                                return AC_ERR_OTHER;
2320                        break;  /* use bmdma for this */
2321                }
2322                /* fall thru */
2323        case ATA_PROT_NCQ:
2324                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2325                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2326                in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2327
2328                /* Write the request in pointer to kick the EDMA to life */
2329                writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2330                                        port_mmio + EDMA_REQ_Q_IN_PTR);
2331                return 0;
2332
2333        case ATA_PROT_PIO:
2334                /*
2335                 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2336                 *
2337                 * Someday, we might implement special polling workarounds
2338                 * for these, but it all seems rather unnecessary since we
2339                 * normally use only DMA for commands which transfer more
2340                 * than a single block of data.
2341                 *
2342                 * Much of the time, this could just work regardless.
2343                 * So for now, just log the incident, and allow the attempt.
2344                 */
2345                if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2346                        --limit_warnings;
2347                        ata_link_warn(qc->dev->link, DRV_NAME
2348                                      ": attempting PIO w/multiple DRQ: "
2349                                      "this may fail due to h/w errata\n");
2350                }
2351                /* drop through */
2352        case ATA_PROT_NODATA:
2353        case ATAPI_PROT_PIO:
2354        case ATAPI_PROT_NODATA:
2355                if (ap->flags & ATA_FLAG_PIO_POLLING)
2356                        qc->tf.flags |= ATA_TFLAG_POLLING;
2357                break;
2358        }
2359
2360        if (qc->tf.flags & ATA_TFLAG_POLLING)
2361                port_irqs = ERR_IRQ;    /* mask device interrupt when polling */
2362        else
2363                port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2364
2365        /*
2366         * We're about to send a non-EDMA capable command to the
2367         * port.  Turn off EDMA so there won't be problems accessing
2368         * shadow block, etc registers.
2369         */
2370        mv_stop_edma(ap);
2371        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2372        mv_pmp_select(ap, qc->dev->link->pmp);
2373
2374        if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2375                struct mv_host_priv *hpriv = ap->host->private_data;
2376                /*
2377                 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2378                 *
2379                 * After any NCQ error, the READ_LOG_EXT command
2380                 * from libata-eh *must* use mv_qc_issue_fis().
2381                 * Otherwise it might fail, due to chip errata.
2382                 *
2383                 * Rather than special-case it, we'll just *always*
2384                 * use this method here for READ_LOG_EXT, making for
2385                 * easier testing.
2386                 */
2387                if (IS_GEN_II(hpriv))
2388                        return mv_qc_issue_fis(qc);
2389        }
2390        return ata_bmdma_qc_issue(qc);
2391}
2392
2393static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2394{
2395        struct mv_port_priv *pp = ap->private_data;
2396        struct ata_queued_cmd *qc;
2397
2398        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2399                return NULL;
2400        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2401        if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2402                return qc;
2403        return NULL;
2404}
2405
2406static void mv_pmp_error_handler(struct ata_port *ap)
2407{
2408        unsigned int pmp, pmp_map;
2409        struct mv_port_priv *pp = ap->private_data;
2410
2411        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2412                /*
2413                 * Perform NCQ error analysis on failed PMPs
2414                 * before we freeze the port entirely.
2415                 *
2416                 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2417                 */
2418                pmp_map = pp->delayed_eh_pmp_map;
2419                pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2420                for (pmp = 0; pmp_map != 0; pmp++) {
2421                        unsigned int this_pmp = (1 << pmp);
2422                        if (pmp_map & this_pmp) {
2423                                struct ata_link *link = &ap->pmp_link[pmp];
2424                                pmp_map &= ~this_pmp;
2425                                ata_eh_analyze_ncq_error(link);
2426                        }
2427                }
2428                ata_port_freeze(ap);
2429        }
2430        sata_pmp_error_handler(ap);
2431}
2432
2433static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2434{
2435        void __iomem *port_mmio = mv_ap_base(ap);
2436
2437        return readl(port_mmio + SATA_TESTCTL) >> 16;
2438}
2439
2440static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2441{
2442        struct ata_eh_info *ehi;
2443        unsigned int pmp;
2444
2445        /*
2446         * Initialize EH info for PMPs which saw device errors
2447         */
2448        ehi = &ap->link.eh_info;
2449        for (pmp = 0; pmp_map != 0; pmp++) {
2450                unsigned int this_pmp = (1 << pmp);
2451                if (pmp_map & this_pmp) {
2452                        struct ata_link *link = &ap->pmp_link[pmp];
2453
2454                        pmp_map &= ~this_pmp;
2455                        ehi = &link->eh_info;
2456                        ata_ehi_clear_desc(ehi);
2457                        ata_ehi_push_desc(ehi, "dev err");
2458                        ehi->err_mask |= AC_ERR_DEV;
2459                        ehi->action |= ATA_EH_RESET;
2460                        ata_link_abort(link);
2461                }
2462        }
2463}
2464
2465static int mv_req_q_empty(struct ata_port *ap)
2466{
2467        void __iomem *port_mmio = mv_ap_base(ap);
2468        u32 in_ptr, out_ptr;
2469
2470        in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2471                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2472        out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2473                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2474        return (in_ptr == out_ptr);     /* 1 == queue_is_empty */
2475}
2476
2477static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2478{
2479        struct mv_port_priv *pp = ap->private_data;
2480        int failed_links;
2481        unsigned int old_map, new_map;
2482
2483        /*
2484         * Device error during FBS+NCQ operation:
2485         *
2486         * Set a port flag to prevent further I/O being enqueued.
2487         * Leave the EDMA running to drain outstanding commands from this port.
2488         * Perform the post-mortem/EH only when all responses are complete.
2489         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2490         */
2491        if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2492                pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2493                pp->delayed_eh_pmp_map = 0;
2494        }
2495        old_map = pp->delayed_eh_pmp_map;
2496        new_map = old_map | mv_get_err_pmp_map(ap);
2497
2498        if (old_map != new_map) {
2499                pp->delayed_eh_pmp_map = new_map;
2500                mv_pmp_eh_prep(ap, new_map & ~old_map);
2501        }
2502        failed_links = hweight16(new_map);
2503
2504        ata_port_info(ap,
2505                      "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2506                      __func__, pp->delayed_eh_pmp_map,
2507                      ap->qc_active, failed_links,
2508                      ap->nr_active_links);
2509
2510        if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2511                mv_process_crpb_entries(ap, pp);
2512                mv_stop_edma(ap);
2513                mv_eh_freeze(ap);
2514                ata_port_info(ap, "%s: done\n", __func__);
2515                return 1;       /* handled */
2516        }
2517        ata_port_info(ap, "%s: waiting\n", __func__);
2518        return 1;       /* handled */
2519}
2520
2521static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2522{
2523        /*
2524         * Possible future enhancement:
2525         *
2526         * FBS+non-NCQ operation is not yet implemented.
2527         * See related notes in mv_edma_cfg().
2528         *
2529         * Device error during FBS+non-NCQ operation:
2530         *
2531         * We need to snapshot the shadow registers for each failed command.
2532         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2533         */
2534        return 0;       /* not handled */
2535}
2536
2537static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2538{
2539        struct mv_port_priv *pp = ap->private_data;
2540
2541        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2542                return 0;       /* EDMA was not active: not handled */
2543        if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2544                return 0;       /* FBS was not active: not handled */
2545
2546        if (!(edma_err_cause & EDMA_ERR_DEV))
2547                return 0;       /* non DEV error: not handled */
2548        edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2549        if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2550                return 0;       /* other problems: not handled */
2551
2552        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2553                /*
2554                 * EDMA should NOT have self-disabled for this case.
2555                 * If it did, then something is wrong elsewhere,
2556                 * and we cannot handle it here.
2557                 */
2558                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2559                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2560                                      __func__, edma_err_cause, pp->pp_flags);
2561                        return 0; /* not handled */
2562                }
2563                return mv_handle_fbs_ncq_dev_err(ap);
2564        } else {
2565                /*
2566                 * EDMA should have self-disabled for this case.
2567                 * If it did not, then something is wrong elsewhere,
2568                 * and we cannot handle it here.
2569                 */
2570                if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2571                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2572                                      __func__, edma_err_cause, pp->pp_flags);
2573                        return 0; /* not handled */
2574                }
2575                return mv_handle_fbs_non_ncq_dev_err(ap);
2576        }
2577        return 0;       /* not handled */
2578}
2579
2580static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2581{
2582        struct ata_eh_info *ehi = &ap->link.eh_info;
2583        char *when = "idle";
2584
2585        ata_ehi_clear_desc(ehi);
2586        if (edma_was_enabled) {
2587                when = "EDMA enabled";
2588        } else {
2589                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2590                if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2591                        when = "polling";
2592        }
2593        ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2594        ehi->err_mask |= AC_ERR_OTHER;
2595        ehi->action   |= ATA_EH_RESET;
2596        ata_port_freeze(ap);
2597}
2598
2599/**
2600 *      mv_err_intr - Handle error interrupts on the port
2601 *      @ap: ATA channel to manipulate
2602 *
2603 *      Most cases require a full reset of the chip's state machine,
2604 *      which also performs a COMRESET.
2605 *      Also, if the port disabled DMA, update our cached copy to match.
2606 *
2607 *      LOCKING:
2608 *      Inherited from caller.
2609 */
2610static void mv_err_intr(struct ata_port *ap)
2611{
2612        void __iomem *port_mmio = mv_ap_base(ap);
2613        u32 edma_err_cause, eh_freeze_mask, serr = 0;
2614        u32 fis_cause = 0;
2615        struct mv_port_priv *pp = ap->private_data;
2616        struct mv_host_priv *hpriv = ap->host->private_data;
2617        unsigned int action = 0, err_mask = 0;
2618        struct ata_eh_info *ehi = &ap->link.eh_info;
2619        struct ata_queued_cmd *qc;
2620        int abort = 0;
2621
2622        /*
2623         * Read and clear the SError and err_cause bits.
2624         * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2625         * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2626         */
2627        sata_scr_read(&ap->link, SCR_ERROR, &serr);
2628        sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2629
2630        edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2631        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2632                fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2633                writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2634        }
2635        writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2636
2637        if (edma_err_cause & EDMA_ERR_DEV) {
2638                /*
2639                 * Device errors during FIS-based switching operation
2640                 * require special handling.
2641                 */
2642                if (mv_handle_dev_err(ap, edma_err_cause))
2643                        return;
2644        }
2645
2646        qc = mv_get_active_qc(ap);
2647        ata_ehi_clear_desc(ehi);
2648        ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2649                          edma_err_cause, pp->pp_flags);
2650
2651        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2652                ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2653                if (fis_cause & FIS_IRQ_CAUSE_AN) {
2654                        u32 ec = edma_err_cause &
2655                               ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2656                        sata_async_notification(ap);
2657                        if (!ec)
2658                                return; /* Just an AN; no need for the nukes */
2659                        ata_ehi_push_desc(ehi, "SDB notify");
2660                }
2661        }
2662        /*
2663         * All generations share these EDMA error cause bits:
2664         */
2665        if (edma_err_cause & EDMA_ERR_DEV) {
2666                err_mask |= AC_ERR_DEV;
2667                action |= ATA_EH_RESET;
2668                ata_ehi_push_desc(ehi, "dev error");
2669        }
2670        if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2671                        EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2672                        EDMA_ERR_INTRL_PAR)) {
2673                err_mask |= AC_ERR_ATA_BUS;
2674                action |= ATA_EH_RESET;
2675                ata_ehi_push_desc(ehi, "parity error");
2676        }
2677        if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2678                ata_ehi_hotplugged(ehi);
2679                ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2680                        "dev disconnect" : "dev connect");
2681                action |= ATA_EH_RESET;
2682        }
2683
2684        /*
2685         * Gen-I has a different SELF_DIS bit,
2686         * different FREEZE bits, and no SERR bit:
2687         */
2688        if (IS_GEN_I(hpriv)) {
2689                eh_freeze_mask = EDMA_EH_FREEZE_5;
2690                if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2691                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2692                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2693                }
2694        } else {
2695                eh_freeze_mask = EDMA_EH_FREEZE;
2696                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2697                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2698                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2699                }
2700                if (edma_err_cause & EDMA_ERR_SERR) {
2701                        ata_ehi_push_desc(ehi, "SError=%08x", serr);
2702                        err_mask |= AC_ERR_ATA_BUS;
2703                        action |= ATA_EH_RESET;
2704                }
2705        }
2706
2707        if (!err_mask) {
2708                err_mask = AC_ERR_OTHER;
2709                action |= ATA_EH_RESET;
2710        }
2711
2712        ehi->serror |= serr;
2713        ehi->action |= action;
2714
2715        if (qc)
2716                qc->err_mask |= err_mask;
2717        else
2718                ehi->err_mask |= err_mask;
2719
2720        if (err_mask == AC_ERR_DEV) {
2721                /*
2722                 * Cannot do ata_port_freeze() here,
2723                 * because it would kill PIO access,
2724                 * which is needed for further diagnosis.
2725                 */
2726                mv_eh_freeze(ap);
2727                abort = 1;
2728        } else if (edma_err_cause & eh_freeze_mask) {
2729                /*
2730                 * Note to self: ata_port_freeze() calls ata_port_abort()
2731                 */
2732                ata_port_freeze(ap);
2733        } else {
2734                abort = 1;
2735        }
2736
2737        if (abort) {
2738                if (qc)
2739                        ata_link_abort(qc->dev->link);
2740                else
2741                        ata_port_abort(ap);
2742        }
2743}
2744
2745static bool mv_process_crpb_response(struct ata_port *ap,
2746                struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2747{
2748        u8 ata_status;
2749        u16 edma_status = le16_to_cpu(response->flags);
2750
2751        /*
2752         * edma_status from a response queue entry:
2753         *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2754         *   MSB is saved ATA status from command completion.
2755         */
2756        if (!ncq_enabled) {
2757                u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2758                if (err_cause) {
2759                        /*
2760                         * Error will be seen/handled by
2761                         * mv_err_intr().  So do nothing at all here.
2762                         */
2763                        return false;
2764                }
2765        }
2766        ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2767        if (!ac_err_mask(ata_status))
2768                return true;
2769        /* else: leave it for mv_err_intr() */
2770        return false;
2771}
2772
2773static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2774{
2775        void __iomem *port_mmio = mv_ap_base(ap);
2776        struct mv_host_priv *hpriv = ap->host->private_data;
2777        u32 in_index;
2778        bool work_done = false;
2779        u32 done_mask = 0;
2780        int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2781
2782        /* Get the hardware queue position index */
2783        in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2784                        >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2785
2786        /* Process new responses from since the last time we looked */
2787        while (in_index != pp->resp_idx) {
2788                unsigned int tag;
2789                struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2790
2791                pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2792
2793                if (IS_GEN_I(hpriv)) {
2794                        /* 50xx: no NCQ, only one command active at a time */
2795                        tag = ap->link.active_tag;
2796                } else {
2797                        /* Gen II/IIE: get command tag from CRPB entry */
2798                        tag = le16_to_cpu(response->id) & 0x1f;
2799                }
2800                if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2801                        done_mask |= 1 << tag;
2802                work_done = true;
2803        }
2804
2805        if (work_done) {
2806                ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2807
2808                /* Update the software queue position index in hardware */
2809                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2810                         (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2811                         port_mmio + EDMA_RSP_Q_OUT_PTR);
2812        }
2813}
2814
2815static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2816{
2817        struct mv_port_priv *pp;
2818        int edma_was_enabled;
2819
2820        /*
2821         * Grab a snapshot of the EDMA_EN flag setting,
2822         * so that we have a consistent view for this port,
2823         * even if something we call of our routines changes it.
2824         */
2825        pp = ap->private_data;
2826        edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2827        /*
2828         * Process completed CRPB response(s) before other events.
2829         */
2830        if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2831                mv_process_crpb_entries(ap, pp);
2832                if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2833                        mv_handle_fbs_ncq_dev_err(ap);
2834        }
2835        /*
2836         * Handle chip-reported errors, or continue on to handle PIO.
2837         */
2838        if (unlikely(port_cause & ERR_IRQ)) {
2839                mv_err_intr(ap);
2840        } else if (!edma_was_enabled) {
2841                struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2842                if (qc)
2843                        ata_bmdma_port_intr(ap, qc);
2844                else
2845                        mv_unexpected_intr(ap, edma_was_enabled);
2846        }
2847}
2848
2849/**
2850 *      mv_host_intr - Handle all interrupts on the given host controller
2851 *      @host: host specific structure
2852 *      @main_irq_cause: Main interrupt cause register for the chip.
2853 *
2854 *      LOCKING:
2855 *      Inherited from caller.
2856 */
2857static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2858{
2859        struct mv_host_priv *hpriv = host->private_data;
2860        void __iomem *mmio = hpriv->base, *hc_mmio;
2861        unsigned int handled = 0, port;
2862
2863        /* If asserted, clear the "all ports" IRQ coalescing bit */
2864        if (main_irq_cause & ALL_PORTS_COAL_DONE)
2865                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2866
2867        for (port = 0; port < hpriv->n_ports; port++) {
2868                struct ata_port *ap = host->ports[port];
2869                unsigned int p, shift, hardport, port_cause;
2870
2871                MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2872                /*
2873                 * Each hc within the host has its own hc_irq_cause register,
2874                 * where the interrupting ports bits get ack'd.
2875                 */
2876                if (hardport == 0) {    /* first port on this hc ? */
2877                        u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2878                        u32 port_mask, ack_irqs;
2879                        /*
2880                         * Skip this entire hc if nothing pending for any ports
2881                         */
2882                        if (!hc_cause) {
2883                                port += MV_PORTS_PER_HC - 1;
2884                                continue;
2885                        }
2886                        /*
2887                         * We don't need/want to read the hc_irq_cause register,
2888                         * because doing so hurts performance, and
2889                         * main_irq_cause already gives us everything we need.
2890                         *
2891                         * But we do have to *write* to the hc_irq_cause to ack
2892                         * the ports that we are handling this time through.
2893                         *
2894                         * This requires that we create a bitmap for those
2895                         * ports which interrupted us, and use that bitmap
2896                         * to ack (only) those ports via hc_irq_cause.
2897                         */
2898                        ack_irqs = 0;
2899                        if (hc_cause & PORTS_0_3_COAL_DONE)
2900                                ack_irqs = HC_COAL_IRQ;
2901                        for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2902                                if ((port + p) >= hpriv->n_ports)
2903                                        break;
2904                                port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2905                                if (hc_cause & port_mask)
2906                                        ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2907                        }
2908                        hc_mmio = mv_hc_base_from_port(mmio, port);
2909                        writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2910                        handled = 1;
2911                }
2912                /*
2913                 * Handle interrupts signalled for this port:
2914                 */
2915                port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2916                if (port_cause)
2917                        mv_port_intr(ap, port_cause);
2918        }
2919        return handled;
2920}
2921
2922static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2923{
2924        struct mv_host_priv *hpriv = host->private_data;
2925        struct ata_port *ap;
2926        struct ata_queued_cmd *qc;
2927        struct ata_eh_info *ehi;
2928        unsigned int i, err_mask, printed = 0;
2929        u32 err_cause;
2930
2931        err_cause = readl(mmio + hpriv->irq_cause_offset);
2932
2933        dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2934
2935        DPRINTK("All regs @ PCI error\n");
2936        mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2937
2938        writelfl(0, mmio + hpriv->irq_cause_offset);
2939
2940        for (i = 0; i < host->n_ports; i++) {
2941                ap = host->ports[i];
2942                if (!ata_link_offline(&ap->link)) {
2943                        ehi = &ap->link.eh_info;
2944                        ata_ehi_clear_desc(ehi);
2945                        if (!printed++)
2946                                ata_ehi_push_desc(ehi,
2947                                        "PCI err cause 0x%08x", err_cause);
2948                        err_mask = AC_ERR_HOST_BUS;
2949                        ehi->action = ATA_EH_RESET;
2950                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2951                        if (qc)
2952                                qc->err_mask |= err_mask;
2953                        else
2954                                ehi->err_mask |= err_mask;
2955
2956                        ata_port_freeze(ap);
2957                }
2958        }
2959        return 1;       /* handled */
2960}
2961
2962/**
2963 *      mv_interrupt - Main interrupt event handler
2964 *      @irq: unused
2965 *      @dev_instance: private data; in this case the host structure
2966 *
2967 *      Read the read only register to determine if any host
2968 *      controllers have pending interrupts.  If so, call lower level
2969 *      routine to handle.  Also check for PCI errors which are only
2970 *      reported here.
2971 *
2972 *      LOCKING:
2973 *      This routine holds the host lock while processing pending
2974 *      interrupts.
2975 */
2976static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2977{
2978        struct ata_host *host = dev_instance;
2979        struct mv_host_priv *hpriv = host->private_data;
2980        unsigned int handled = 0;
2981        int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2982        u32 main_irq_cause, pending_irqs;
2983
2984        spin_lock(&host->lock);
2985
2986        /* for MSI:  block new interrupts while in here */
2987        if (using_msi)
2988                mv_write_main_irq_mask(0, hpriv);
2989
2990        main_irq_cause = readl(hpriv->main_irq_cause_addr);
2991        pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
2992        /*
2993         * Deal with cases where we either have nothing pending, or have read
2994         * a bogus register value which can indicate HW removal or PCI fault.
2995         */
2996        if (pending_irqs && main_irq_cause != 0xffffffffU) {
2997                if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2998                        handled = mv_pci_error(host, hpriv->base);
2999                else
3000                        handled = mv_host_intr(host, pending_irqs);
3001        }
3002
3003        /* for MSI: unmask; interrupt cause bits will retrigger now */
3004        if (using_msi)
3005                mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3006
3007        spin_unlock(&host->lock);
3008
3009        return IRQ_RETVAL(handled);
3010}
3011
3012static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3013{
3014        unsigned int ofs;
3015
3016        switch (sc_reg_in) {
3017        case SCR_STATUS:
3018        case SCR_ERROR:
3019        case SCR_CONTROL:
3020                ofs = sc_reg_in * sizeof(u32);
3021                break;
3022        default:
3023                ofs = 0xffffffffU;
3024                break;
3025        }
3026        return ofs;
3027}
3028
3029static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3030{
3031        struct mv_host_priv *hpriv = link->ap->host->private_data;
3032        void __iomem *mmio = hpriv->base;
3033        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3034        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3035
3036        if (ofs != 0xffffffffU) {
3037                *val = readl(addr + ofs);
3038                return 0;
3039        } else
3040                return -EINVAL;
3041}
3042
3043static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3044{
3045        struct mv_host_priv *hpriv = link->ap->host->private_data;
3046        void __iomem *mmio = hpriv->base;
3047        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3048        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3049
3050        if (ofs != 0xffffffffU) {
3051                writelfl(val, addr + ofs);
3052                return 0;
3053        } else
3054                return -EINVAL;
3055}
3056
3057static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3058{
3059        struct pci_dev *pdev = to_pci_dev(host->dev);
3060        int early_5080;
3061
3062        early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3063
3064        if (!early_5080) {
3065                u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3066                tmp |= (1 << 0);
3067                writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3068        }
3069
3070        mv_reset_pci_bus(host, mmio);
3071}
3072
3073static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3074{
3075        writel(0x0fcfffff, mmio + FLASH_CTL);
3076}
3077
3078static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3079                           void __iomem *mmio)
3080{
3081        void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3082        u32 tmp;
3083
3084        tmp = readl(phy_mmio + MV5_PHY_MODE);
3085
3086        hpriv->signal[idx].pre = tmp & 0x1800;  /* bits 12:11 */
3087        hpriv->signal[idx].amps = tmp & 0xe0;   /* bits 7:5 */
3088}
3089
3090static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3091{
3092        u32 tmp;
3093
3094        writel(0, mmio + GPIO_PORT_CTL);
3095
3096        /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3097
3098        tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3099        tmp |= ~(1 << 0);
3100        writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3101}
3102
3103static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3104                           unsigned int port)
3105{
3106        void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3107        const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3108        u32 tmp;
3109        int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3110
3111        if (fix_apm_sq) {
3112                tmp = readl(phy_mmio + MV5_LTMODE);
3113                tmp |= (1 << 19);
3114                writel(tmp, phy_mmio + MV5_LTMODE);
3115
3116                tmp = readl(phy_mmio + MV5_PHY_CTL);
3117                tmp &= ~0x3;
3118                tmp |= 0x1;
3119                writel(tmp, phy_mmio + MV5_PHY_CTL);
3120        }
3121
3122        tmp = readl(phy_mmio + MV5_PHY_MODE);
3123        tmp &= ~mask;
3124        tmp |= hpriv->signal[port].pre;
3125        tmp |= hpriv->signal[port].amps;
3126        writel(tmp, phy_mmio + MV5_PHY_MODE);
3127}
3128
3129
3130#undef ZERO
3131#define ZERO(reg) writel(0, port_mmio + (reg))
3132static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3133                             unsigned int port)
3134{
3135        void __iomem *port_mmio = mv_port_base(mmio, port);
3136
3137        mv_reset_channel(hpriv, mmio, port);
3138
3139        ZERO(0x028);    /* command */
3140        writel(0x11f, port_mmio + EDMA_CFG);
3141        ZERO(0x004);    /* timer */
3142        ZERO(0x008);    /* irq err cause */
3143        ZERO(0x00c);    /* irq err mask */
3144        ZERO(0x010);    /* rq bah */
3145        ZERO(0x014);    /* rq inp */
3146        ZERO(0x018);    /* rq outp */
3147        ZERO(0x01c);    /* respq bah */
3148        ZERO(0x024);    /* respq outp */
3149        ZERO(0x020);    /* respq inp */
3150        ZERO(0x02c);    /* test control */
3151        writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3152}
3153#undef ZERO
3154
3155#define ZERO(reg) writel(0, hc_mmio + (reg))
3156static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3157                        unsigned int hc)
3158{
3159        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3160        u32 tmp;
3161
3162        ZERO(0x00c);
3163        ZERO(0x010);
3164        ZERO(0x014);
3165        ZERO(0x018);
3166
3167        tmp = readl(hc_mmio + 0x20);
3168        tmp &= 0x1c1c1c1c;
3169        tmp |= 0x03030303;
3170        writel(tmp, hc_mmio + 0x20);
3171}
3172#undef ZERO
3173
3174static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3175                        unsigned int n_hc)
3176{
3177        unsigned int hc, port;
3178
3179        for (hc = 0; hc < n_hc; hc++) {
3180                for (port = 0; port < MV_PORTS_PER_HC; port++)
3181                        mv5_reset_hc_port(hpriv, mmio,
3182                                          (hc * MV_PORTS_PER_HC) + port);
3183
3184                mv5_reset_one_hc(hpriv, mmio, hc);
3185        }
3186
3187        return 0;
3188}
3189
3190#undef ZERO
3191#define ZERO(reg) writel(0, mmio + (reg))
3192static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3193{
3194        struct mv_host_priv *hpriv = host->private_data;
3195        u32 tmp;
3196
3197        tmp = readl(mmio + MV_PCI_MODE);
3198        tmp &= 0xff00ffff;
3199        writel(tmp, mmio + MV_PCI_MODE);
3200
3201        ZERO(MV_PCI_DISC_TIMER);
3202        ZERO(MV_PCI_MSI_TRIGGER);
3203        writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3204        ZERO(MV_PCI_SERR_MASK);
3205        ZERO(hpriv->irq_cause_offset);
3206        ZERO(hpriv->irq_mask_offset);
3207        ZERO(MV_PCI_ERR_LOW_ADDRESS);
3208        ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3209        ZERO(MV_PCI_ERR_ATTRIBUTE);
3210        ZERO(MV_PCI_ERR_COMMAND);
3211}
3212#undef ZERO
3213
3214static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3215{
3216        u32 tmp;
3217
3218        mv5_reset_flash(hpriv, mmio);
3219
3220        tmp = readl(mmio + GPIO_PORT_CTL);
3221        tmp &= 0x3;
3222        tmp |= (1 << 5) | (1 << 6);
3223        writel(tmp, mmio + GPIO_PORT_CTL);
3224}
3225
3226/**
3227 *      mv6_reset_hc - Perform the 6xxx global soft reset
3228 *      @mmio: base address of the HBA
3229 *
3230 *      This routine only applies to 6xxx parts.
3231 *
3232 *      LOCKING:
3233 *      Inherited from caller.
3234 */
3235static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3236                        unsigned int n_hc)
3237{
3238        void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3239        int i, rc = 0;
3240        u32 t;
3241
3242        /* Following procedure defined in PCI "main command and status
3243         * register" table.
3244         */
3245        t = readl(reg);
3246        writel(t | STOP_PCI_MASTER, reg);
3247
3248        for (i = 0; i < 1000; i++) {
3249                udelay(1);
3250                t = readl(reg);
3251                if (PCI_MASTER_EMPTY & t)
3252                        break;
3253        }
3254        if (!(PCI_MASTER_EMPTY & t)) {
3255                printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3256                rc = 1;
3257                goto done;
3258        }
3259
3260        /* set reset */
3261        i = 5;
3262        do {
3263                writel(t | GLOB_SFT_RST, reg);
3264                t = readl(reg);
3265                udelay(1);
3266        } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3267
3268        if (!(GLOB_SFT_RST & t)) {
3269                printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3270                rc = 1;
3271                goto done;
3272        }
3273
3274        /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3275        i = 5;
3276        do {
3277                writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3278                t = readl(reg);
3279                udelay(1);
3280        } while ((GLOB_SFT_RST & t) && (i-- > 0));
3281
3282        if (GLOB_SFT_RST & t) {
3283                printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3284                rc = 1;
3285        }
3286done:
3287        return rc;
3288}
3289
3290static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3291                           void __iomem *mmio)
3292{
3293        void __iomem *port_mmio;
3294        u32 tmp;
3295
3296        tmp = readl(mmio + RESET_CFG);
3297        if ((tmp & (1 << 0)) == 0) {
3298                hpriv->signal[idx].amps = 0x7 << 8;
3299                hpriv->signal[idx].pre = 0x1 << 5;
3300                return;
3301        }
3302
3303        port_mmio = mv_port_base(mmio, idx);
3304        tmp = readl(port_mmio + PHY_MODE2);
3305
3306        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3307        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3308}
3309
3310static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3311{
3312        writel(0x00000060, mmio + GPIO_PORT_CTL);
3313}
3314
3315static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3316                           unsigned int port)
3317{
3318        void __iomem *port_mmio = mv_port_base(mmio, port);
3319
3320        u32 hp_flags = hpriv->hp_flags;
3321        int fix_phy_mode2 =
3322                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3323        int fix_phy_mode4 =
3324                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3325        u32 m2, m3;
3326
3327        if (fix_phy_mode2) {
3328                m2 = readl(port_mmio + PHY_MODE2);
3329                m2 &= ~(1 << 16);
3330                m2 |= (1 << 31);
3331                writel(m2, port_mmio + PHY_MODE2);
3332
3333                udelay(200);
3334
3335                m2 = readl(port_mmio + PHY_MODE2);
3336                m2 &= ~((1 << 16) | (1 << 31));
3337                writel(m2, port_mmio + PHY_MODE2);
3338
3339                udelay(200);
3340        }
3341
3342        /*
3343         * Gen-II/IIe PHY_MODE3 errata RM#2:
3344         * Achieves better receiver noise performance than the h/w default:
3345         */
3346        m3 = readl(port_mmio + PHY_MODE3);
3347        m3 = (m3 & 0x1f) | (0x5555601 << 5);
3348
3349        /* Guideline 88F5182 (GL# SATA-S11) */
3350        if (IS_SOC(hpriv))
3351                m3 &= ~0x1c;
3352
3353        if (fix_phy_mode4) {
3354                u32 m4 = readl(port_mmio + PHY_MODE4);
3355                /*
3356                 * Enforce reserved-bit restrictions on GenIIe devices only.
3357                 * For earlier chipsets, force only the internal config field
3358                 *  (workaround for errata FEr SATA#10 part 1).
3359                 */
3360                if (IS_GEN_IIE(hpriv))
3361                        m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3362                else
3363                        m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3364                writel(m4, port_mmio + PHY_MODE4);
3365        }
3366        /*
3367         * Workaround for 60x1-B2 errata SATA#13:
3368         * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3369         * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3370         * Or ensure we use writelfl() when writing PHY_MODE4.
3371         */
3372        writel(m3, port_mmio + PHY_MODE3);
3373
3374        /* Revert values of pre-emphasis and signal amps to the saved ones */
3375        m2 = readl(port_mmio + PHY_MODE2);
3376
3377        m2 &= ~MV_M2_PREAMP_MASK;
3378        m2 |= hpriv->signal[port].amps;
3379        m2 |= hpriv->signal[port].pre;
3380        m2 &= ~(1 << 16);
3381
3382        /* according to mvSata 3.6.1, some IIE values are fixed */
3383        if (IS_GEN_IIE(hpriv)) {
3384                m2 &= ~0xC30FF01F;
3385                m2 |= 0x0000900F;
3386        }
3387
3388        writel(m2, port_mmio + PHY_MODE2);
3389}
3390
3391/* TODO: use the generic LED interface to configure the SATA Presence */
3392/* & Acitivy LEDs on the board */
3393static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3394                                      void __iomem *mmio)
3395{
3396        return;
3397}
3398
3399static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3400                           void __iomem *mmio)
3401{
3402        void __iomem *port_mmio;
3403        u32 tmp;
3404
3405        port_mmio = mv_port_base(mmio, idx);
3406        tmp = readl(port_mmio + PHY_MODE2);
3407
3408        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3409        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3410}
3411
3412#undef ZERO
3413#define ZERO(reg) writel(0, port_mmio + (reg))
3414static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3415                                        void __iomem *mmio, unsigned int port)
3416{
3417        void __iomem *port_mmio = mv_port_base(mmio, port);
3418
3419        mv_reset_channel(hpriv, mmio, port);
3420
3421        ZERO(0x028);            /* command */
3422        writel(0x101f, port_mmio + EDMA_CFG);
3423        ZERO(0x004);            /* timer */
3424        ZERO(0x008);            /* irq err cause */
3425        ZERO(0x00c);            /* irq err mask */
3426        ZERO(0x010);            /* rq bah */
3427        ZERO(0x014);            /* rq inp */
3428        ZERO(0x018);            /* rq outp */
3429        ZERO(0x01c);            /* respq bah */
3430        ZERO(0x024);            /* respq outp */
3431        ZERO(0x020);            /* respq inp */
3432        ZERO(0x02c);            /* test control */
3433        writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3434}
3435
3436#undef ZERO
3437
3438#define ZERO(reg) writel(0, hc_mmio + (reg))
3439static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3440                                       void __iomem *mmio)
3441{
3442        void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3443
3444        ZERO(0x00c);
3445        ZERO(0x010);
3446        ZERO(0x014);
3447
3448}
3449
3450#undef ZERO
3451
3452static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3453                                  void __iomem *mmio, unsigned int n_hc)
3454{
3455        unsigned int port;
3456
3457        for (port = 0; port < hpriv->n_ports; port++)
3458                mv_soc_reset_hc_port(hpriv, mmio, port);
3459
3460        mv_soc_reset_one_hc(hpriv, mmio);
3461
3462        return 0;
3463}
3464
3465static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3466                                      void __iomem *mmio)
3467{
3468        return;
3469}
3470
3471static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3472{
3473        return;
3474}
3475
3476static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3477                                  void __iomem *mmio, unsigned int port)
3478{
3479        void __iomem *port_mmio = mv_port_base(mmio, port);
3480        u32     reg;
3481
3482        reg = readl(port_mmio + PHY_MODE3);
3483        reg &= ~(0x3 << 27);    /* SELMUPF (bits 28:27) to 1 */
3484        reg |= (0x1 << 27);
3485        reg &= ~(0x3 << 29);    /* SELMUPI (bits 30:29) to 1 */
3486        reg |= (0x1 << 29);
3487        writel(reg, port_mmio + PHY_MODE3);
3488
3489        reg = readl(port_mmio + PHY_MODE4);
3490        reg &= ~0x1;    /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3491        reg |= (0x1 << 16);
3492        writel(reg, port_mmio + PHY_MODE4);
3493
3494        reg = readl(port_mmio + PHY_MODE9_GEN2);
3495        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3496        reg |= 0x8;
3497        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3498        writel(reg, port_mmio + PHY_MODE9_GEN2);
3499
3500        reg = readl(port_mmio + PHY_MODE9_GEN1);
3501        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3502        reg |= 0x8;
3503        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3504        writel(reg, port_mmio + PHY_MODE9_GEN1);
3505}
3506
3507/**
3508 *      soc_is_65 - check if the soc is 65 nano device
3509 *
3510 *      Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3511 *      register, this register should contain non-zero value and it exists only
3512 *      in the 65 nano devices, when reading it from older devices we get 0.
3513 */
3514static bool soc_is_65n(struct mv_host_priv *hpriv)
3515{
3516        void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3517
3518        if (readl(port0_mmio + PHYCFG_OFS))
3519                return true;
3520        return false;
3521}
3522
3523static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3524{
3525        u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3526
3527        ifcfg = (ifcfg & 0xf7f) | 0x9b1000;     /* from chip spec */
3528        if (want_gen2i)
3529                ifcfg |= (1 << 7);              /* enable gen2i speed */
3530        writelfl(ifcfg, port_mmio + SATA_IFCFG);
3531}
3532
3533static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3534                             unsigned int port_no)
3535{
3536        void __iomem *port_mmio = mv_port_base(mmio, port_no);
3537
3538        /*
3539         * The datasheet warns against setting EDMA_RESET when EDMA is active
3540         * (but doesn't say what the problem might be).  So we first try
3541         * to disable the EDMA engine before doing the EDMA_RESET operation.
3542         */
3543        mv_stop_edma_engine(port_mmio);
3544        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3545
3546        if (!IS_GEN_I(hpriv)) {
3547                /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3548                mv_setup_ifcfg(port_mmio, 1);
3549        }
3550        /*
3551         * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3552         * link, and physical layers.  It resets all SATA interface registers
3553         * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3554         */
3555        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3556        udelay(25);     /* allow reset propagation */
3557        writelfl(0, port_mmio + EDMA_CMD);
3558
3559        hpriv->ops->phy_errata(hpriv, mmio, port_no);
3560
3561        if (IS_GEN_I(hpriv))
3562                mdelay(1);
3563}
3564
3565static void mv_pmp_select(struct ata_port *ap, int pmp)
3566{
3567        if (sata_pmp_supported(ap)) {
3568                void __iomem *port_mmio = mv_ap_base(ap);
3569                u32 reg = readl(port_mmio + SATA_IFCTL);
3570                int old = reg & 0xf;
3571
3572                if (old != pmp) {
3573                        reg = (reg & ~0xf) | pmp;
3574                        writelfl(reg, port_mmio + SATA_IFCTL);
3575                }
3576        }
3577}
3578
3579static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3580                                unsigned long deadline)
3581{
3582        mv_pmp_select(link->ap, sata_srst_pmp(link));
3583        return sata_std_hardreset(link, class, deadline);
3584}
3585
3586static int mv_softreset(struct ata_link *link, unsigned int *class,
3587                                unsigned long deadline)
3588{
3589        mv_pmp_select(link->ap, sata_srst_pmp(link));
3590        return ata_sff_softreset(link, class, deadline);
3591}
3592
3593static int mv_hardreset(struct ata_link *link, unsigned int *class,
3594                        unsigned long deadline)
3595{
3596        struct ata_port *ap = link->ap;
3597        struct mv_host_priv *hpriv = ap->host->private_data;
3598        struct mv_port_priv *pp = ap->private_data;
3599        void __iomem *mmio = hpriv->base;
3600        int rc, attempts = 0, extra = 0;
3601        u32 sstatus;
3602        bool online;
3603
3604        mv_reset_channel(hpriv, mmio, ap->port_no);
3605        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3606        pp->pp_flags &=
3607          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3608
3609        /* Workaround for errata FEr SATA#10 (part 2) */
3610        do {
3611                const unsigned long *timing =
3612                                sata_ehc_deb_timing(&link->eh_context);
3613
3614                rc = sata_link_hardreset(link, timing, deadline + extra,
3615                                         &online, NULL);
3616                rc = online ? -EAGAIN : rc;
3617                if (rc)
3618                        return rc;
3619                sata_scr_read(link, SCR_STATUS, &sstatus);
3620                if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3621                        /* Force 1.5gb/s link speed and try again */
3622                        mv_setup_ifcfg(mv_ap_base(ap), 0);
3623                        if (time_after(jiffies + HZ, deadline))
3624                                extra = HZ; /* only extend it once, max */
3625                }
3626        } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3627        mv_save_cached_regs(ap);
3628        mv_edma_cfg(ap, 0, 0);
3629
3630        return rc;
3631}
3632
3633static void mv_eh_freeze(struct ata_port *ap)
3634{
3635        mv_stop_edma(ap);
3636        mv_enable_port_irqs(ap, 0);
3637}
3638
3639static void mv_eh_thaw(struct ata_port *ap)
3640{
3641        struct mv_host_priv *hpriv = ap->host->private_data;
3642        unsigned int port = ap->port_no;
3643        unsigned int hardport = mv_hardport_from_port(port);
3644        void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3645        void __iomem *port_mmio = mv_ap_base(ap);
3646        u32 hc_irq_cause;
3647
3648        /* clear EDMA errors on this port */
3649        writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3650
3651        /* clear pending irq events */
3652        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3653        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3654
3655        mv_enable_port_irqs(ap, ERR_IRQ);
3656}
3657
3658/**
3659 *      mv_port_init - Perform some early initialization on a single port.
3660 *      @port: libata data structure storing shadow register addresses
3661 *      @port_mmio: base address of the port
3662 *
3663 *      Initialize shadow register mmio addresses, clear outstanding
3664 *      interrupts on the port, and unmask interrupts for the future
3665 *      start of the port.
3666 *
3667 *      LOCKING:
3668 *      Inherited from caller.
3669 */
3670static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3671{
3672        void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3673
3674        /* PIO related setup
3675         */
3676        port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3677        port->error_addr =
3678                port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3679        port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3680        port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3681        port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3682        port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3683        port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3684        port->status_addr =
3685                port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3686        /* special case: control/altstatus doesn't have ATA_REG_ address */
3687        port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3688
3689        /* Clear any currently outstanding port interrupt conditions */
3690        serr = port_mmio + mv_scr_offset(SCR_ERROR);
3691        writelfl(readl(serr), serr);
3692        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3693
3694        /* unmask all non-transient EDMA error interrupts */
3695        writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3696
3697        VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3698                readl(port_mmio + EDMA_CFG),
3699                readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3700                readl(port_mmio + EDMA_ERR_IRQ_MASK));
3701}
3702
3703static unsigned int mv_in_pcix_mode(struct ata_host *host)
3704{
3705        struct mv_host_priv *hpriv = host->private_data;
3706        void __iomem *mmio = hpriv->base;
3707        u32 reg;
3708
3709        if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3710                return 0;       /* not PCI-X capable */
3711        reg = readl(mmio + MV_PCI_MODE);
3712        if ((reg & MV_PCI_MODE_MASK) == 0)
3713                return 0;       /* conventional PCI mode */
3714        return 1;       /* chip is in PCI-X mode */
3715}
3716
3717static int mv_pci_cut_through_okay(struct ata_host *host)
3718{
3719        struct mv_host_priv *hpriv = host->private_data;
3720        void __iomem *mmio = hpriv->base;
3721        u32 reg;
3722
3723        if (!mv_in_pcix_mode(host)) {
3724                reg = readl(mmio + MV_PCI_COMMAND);
3725                if (reg & MV_PCI_COMMAND_MRDTRIG)
3726                        return 0; /* not okay */
3727        }
3728        return 1; /* okay */
3729}
3730
3731static void mv_60x1b2_errata_pci7(struct ata_host *host)
3732{
3733        struct mv_host_priv *hpriv = host->private_data;
3734        void __iomem *mmio = hpriv->base;
3735
3736        /* workaround for 60x1-B2 errata PCI#7 */
3737        if (mv_in_pcix_mode(host)) {
3738                u32 reg = readl(mmio + MV_PCI_COMMAND);
3739                writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3740        }
3741}
3742
3743static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3744{
3745        struct pci_dev *pdev = to_pci_dev(host->dev);
3746        struct mv_host_priv *hpriv = host->private_data;
3747        u32 hp_flags = hpriv->hp_flags;
3748
3749        switch (board_idx) {
3750        case chip_5080:
3751                hpriv->ops = &mv5xxx_ops;
3752                hp_flags |= MV_HP_GEN_I;
3753
3754                switch (pdev->revision) {
3755                case 0x1:
3756                        hp_flags |= MV_HP_ERRATA_50XXB0;
3757                        break;
3758                case 0x3:
3759                        hp_flags |= MV_HP_ERRATA_50XXB2;
3760                        break;
3761                default:
3762                        dev_warn(&pdev->dev,
3763                                 "Applying 50XXB2 workarounds to unknown rev\n");
3764                        hp_flags |= MV_HP_ERRATA_50XXB2;
3765                        break;
3766                }
3767                break;
3768
3769        case chip_504x:
3770        case chip_508x:
3771                hpriv->ops = &mv5xxx_ops;
3772                hp_flags |= MV_HP_GEN_I;
3773
3774                switch (pdev->revision) {
3775                case 0x0:
3776                        hp_flags |= MV_HP_ERRATA_50XXB0;
3777                        break;
3778                case 0x3:
3779                        hp_flags |= MV_HP_ERRATA_50XXB2;
3780                        break;
3781                default:
3782                        dev_warn(&pdev->dev,
3783                                 "Applying B2 workarounds to unknown rev\n");
3784                        hp_flags |= MV_HP_ERRATA_50XXB2;
3785                        break;
3786                }
3787                break;
3788
3789        case chip_604x:
3790        case chip_608x:
3791                hpriv->ops = &mv6xxx_ops;
3792                hp_flags |= MV_HP_GEN_II;
3793
3794                switch (pdev->revision) {
3795                case 0x7:
3796                        mv_60x1b2_errata_pci7(host);
3797                        hp_flags |= MV_HP_ERRATA_60X1B2;
3798                        break;
3799                case 0x9:
3800                        hp_flags |= MV_HP_ERRATA_60X1C0;
3801                        break;
3802                default:
3803                        dev_warn(&pdev->dev,
3804                                 "Applying B2 workarounds to unknown rev\n");
3805                        hp_flags |= MV_HP_ERRATA_60X1B2;
3806                        break;
3807                }
3808                break;
3809
3810        case chip_7042:
3811                hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3812                if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3813                    (pdev->device == 0x2300 || pdev->device == 0x2310))
3814                {
3815                        /*
3816                         * Highpoint RocketRAID PCIe 23xx series cards:
3817                         *
3818                         * Unconfigured drives are treated as "Legacy"
3819                         * by the BIOS, and it overwrites sector 8 with
3820                         * a "Lgcy" metadata block prior to Linux boot.
3821                         *
3822                         * Configured drives (RAID or JBOD) leave sector 8
3823                         * alone, but instead overwrite a high numbered
3824                         * sector for the RAID metadata.  This sector can
3825                         * be determined exactly, by truncating the physical
3826                         * drive capacity to a nice even GB value.
3827                         *
3828                         * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3829                         *
3830                         * Warn the user, lest they think we're just buggy.
3831                         */
3832                        printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3833                                " BIOS CORRUPTS DATA on all attached drives,"
3834                                " regardless of if/how they are configured."
3835                                " BEWARE!\n");
3836                        printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3837                                " use sectors 8-9 on \"Legacy\" drives,"
3838                                " and avoid the final two gigabytes on"
3839                                " all RocketRAID BIOS initialized drives.\n");
3840                }
3841                /* drop through */
3842        case chip_6042:
3843                hpriv->ops = &mv6xxx_ops;
3844                hp_flags |= MV_HP_GEN_IIE;
3845                if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3846                        hp_flags |= MV_HP_CUT_THROUGH;
3847
3848                switch (pdev->revision) {
3849                case 0x2: /* Rev.B0: the first/only public release */
3850                        hp_flags |= MV_HP_ERRATA_60X1C0;
3851                        break;
3852                default:
3853                        dev_warn(&pdev->dev,
3854                                 "Applying 60X1C0 workarounds to unknown rev\n");
3855                        hp_flags |= MV_HP_ERRATA_60X1C0;
3856                        break;
3857                }
3858                break;
3859        case chip_soc:
3860                if (soc_is_65n(hpriv))
3861                        hpriv->ops = &mv_soc_65n_ops;
3862                else
3863                        hpriv->ops = &mv_soc_ops;
3864                hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3865                        MV_HP_ERRATA_60X1C0;
3866                break;
3867
3868        default:
3869                dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3870                return 1;
3871        }
3872
3873        hpriv->hp_flags = hp_flags;
3874        if (hp_flags & MV_HP_PCIE) {
3875                hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3876                hpriv->irq_mask_offset  = PCIE_IRQ_MASK;
3877                hpriv->unmask_all_irqs  = PCIE_UNMASK_ALL_IRQS;
3878        } else {
3879                hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3880                hpriv->irq_mask_offset  = PCI_IRQ_MASK;
3881                hpriv->unmask_all_irqs  = PCI_UNMASK_ALL_IRQS;
3882        }
3883
3884        return 0;
3885}
3886
3887/**
3888 *      mv_init_host - Perform some early initialization of the host.
3889 *      @host: ATA host to initialize
3890 *
3891 *      If possible, do an early global reset of the host.  Then do
3892 *      our port init and clear/unmask all/relevant host interrupts.
3893 *
3894 *      LOCKING:
3895 *      Inherited from caller.
3896 */
3897static int mv_init_host(struct ata_host *host)
3898{
3899        int rc = 0, n_hc, port, hc;
3900        struct mv_host_priv *hpriv = host->private_data;
3901        void __iomem *mmio = hpriv->base;
3902
3903        rc = mv_chip_id(host, hpriv->board_idx);
3904        if (rc)
3905                goto done;
3906
3907        if (IS_SOC(hpriv)) {
3908                hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3909                hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3910        } else {
3911                hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3912                hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3913        }
3914
3915        /* initialize shadow irq mask with register's value */
3916        hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3917
3918        /* global interrupt mask: 0 == mask everything */
3919        mv_set_main_irq_mask(host, ~0, 0);
3920
3921        n_hc = mv_get_hc_count(host->ports[0]->flags);
3922
3923        for (port = 0; port < host->n_ports; port++)
3924                if (hpriv->ops->read_preamp)
3925                        hpriv->ops->read_preamp(hpriv, port, mmio);
3926
3927        rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3928        if (rc)
3929                goto done;
3930
3931        hpriv->ops->reset_flash(hpriv, mmio);
3932        hpriv->ops->reset_bus(host, mmio);
3933        hpriv->ops->enable_leds(hpriv, mmio);
3934
3935        for (port = 0; port < host->n_ports; port++) {
3936                struct ata_port *ap = host->ports[port];
3937                void __iomem *port_mmio = mv_port_base(mmio, port);
3938
3939                mv_port_init(&ap->ioaddr, port_mmio);
3940        }
3941
3942        for (hc = 0; hc < n_hc; hc++) {
3943                void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3944
3945                VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3946                        "(before clear)=0x%08x\n", hc,
3947                        readl(hc_mmio + HC_CFG),
3948                        readl(hc_mmio + HC_IRQ_CAUSE));
3949
3950                /* Clear any currently outstanding hc interrupt conditions */
3951                writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3952        }
3953
3954        if (!IS_SOC(hpriv)) {
3955                /* Clear any currently outstanding host interrupt conditions */
3956                writelfl(0, mmio + hpriv->irq_cause_offset);
3957
3958                /* and unmask interrupt generation for host regs */
3959                writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3960        }
3961
3962        /*
3963         * enable only global host interrupts for now.
3964         * The per-port interrupts get done later as ports are set up.
3965         */
3966        mv_set_main_irq_mask(host, 0, PCI_ERR);
3967        mv_set_irq_coalescing(host, irq_coalescing_io_count,
3968                                    irq_coalescing_usecs);
3969done:
3970        return rc;
3971}
3972
3973static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3974{
3975        hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3976                                                             MV_CRQB_Q_SZ, 0);
3977        if (!hpriv->crqb_pool)
3978                return -ENOMEM;
3979
3980        hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3981                                                             MV_CRPB_Q_SZ, 0);
3982        if (!hpriv->crpb_pool)
3983                return -ENOMEM;
3984
3985        hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3986                                                             MV_SG_TBL_SZ, 0);
3987        if (!hpriv->sg_tbl_pool)
3988                return -ENOMEM;
3989
3990        return 0;
3991}
3992
3993static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3994                                 const struct mbus_dram_target_info *dram)
3995{
3996        int i;
3997
3998        for (i = 0; i < 4; i++) {
3999                writel(0, hpriv->base + WINDOW_CTRL(i));
4000                writel(0, hpriv->base + WINDOW_BASE(i));
4001        }
4002
4003        for (i = 0; i < dram->num_cs; i++) {
4004                const struct mbus_dram_window *cs = dram->cs + i;
4005
4006                writel(((cs->size - 1) & 0xffff0000) |
4007                        (cs->mbus_attr << 8) |
4008                        (dram->mbus_dram_target_id << 4) | 1,
4009                        hpriv->base + WINDOW_CTRL(i));
4010                writel(cs->base, hpriv->base + WINDOW_BASE(i));
4011        }
4012}
4013
4014/**
4015 *      mv_platform_probe - handle a positive probe of an soc Marvell
4016 *      host
4017 *      @pdev: platform device found
4018 *
4019 *      LOCKING:
4020 *      Inherited from caller.
4021 */
4022static int mv_platform_probe(struct platform_device *pdev)
4023{
4024        const struct mv_sata_platform_data *mv_platform_data;
4025        const struct mbus_dram_target_info *dram;
4026        const struct ata_port_info *ppi[] =
4027            { &mv_port_info[chip_soc], NULL };
4028        struct ata_host *host;
4029        struct mv_host_priv *hpriv;
4030        struct resource *res;
4031        int n_ports = 0, irq = 0;
4032        int rc;
4033#if defined(CONFIG_HAVE_CLK)
4034        int port;
4035#endif
4036
4037        ata_print_version_once(&pdev->dev, DRV_VERSION);
4038
4039        /*
4040         * Simple resource validation ..
4041         */
4042        if (unlikely(pdev->num_resources != 2)) {
4043                dev_err(&pdev->dev, "invalid number of resources\n");
4044                return -EINVAL;
4045        }
4046
4047        /*
4048         * Get the register base first
4049         */
4050        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4051        if (res == NULL)
4052                return -EINVAL;
4053
4054        /* allocate host */
4055        if (pdev->dev.of_node) {
4056                of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4057                irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4058        } else {
4059                mv_platform_data = pdev->dev.platform_data;
4060                n_ports = mv_platform_data->n_ports;
4061                irq = platform_get_irq(pdev, 0);
4062        }
4063
4064        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4065        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4066
4067        if (!host || !hpriv)
4068                return -ENOMEM;
4069#if defined(CONFIG_HAVE_CLK)
4070        hpriv->port_clks = devm_kzalloc(&pdev->dev,
4071                                        sizeof(struct clk *) * n_ports,
4072                                        GFP_KERNEL);
4073        if (!hpriv->port_clks)
4074                return -ENOMEM;
4075#endif
4076        host->private_data = hpriv;
4077        hpriv->n_ports = n_ports;
4078        hpriv->board_idx = chip_soc;
4079
4080        host->iomap = NULL;
4081        hpriv->base = devm_ioremap(&pdev->dev, res->start,
4082                                   resource_size(res));
4083        hpriv->base -= SATAHC0_REG_BASE;
4084
4085#if defined(CONFIG_HAVE_CLK)
4086        hpriv->clk = clk_get(&pdev->dev, NULL);
4087        if (IS_ERR(hpriv->clk))
4088                dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4089        else
4090                clk_prepare_enable(hpriv->clk);
4091
4092        for (port = 0; port < n_ports; port++) {
4093                char port_number[16];
4094                sprintf(port_number, "%d", port);
4095                hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4096                if (!IS_ERR(hpriv->port_clks[port]))
4097                        clk_prepare_enable(hpriv->port_clks[port]);
4098        }
4099#endif
4100
4101        /*
4102         * (Re-)program MBUS remapping windows if we are asked to.
4103         */
4104        dram = mv_mbus_dram_info();
4105        if (dram)
4106                mv_conf_mbus_windows(hpriv, dram);
4107
4108        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4109        if (rc)
4110                goto err;
4111
4112        /* initialize adapter */
4113        rc = mv_init_host(host);
4114        if (rc)
4115                goto err;
4116
4117        dev_info(&pdev->dev, "slots %u ports %d\n",
4118                 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4119
4120        rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4121        if (!rc)
4122                return 0;
4123
4124err:
4125#if defined(CONFIG_HAVE_CLK)
4126        if (!IS_ERR(hpriv->clk)) {
4127                clk_disable_unprepare(hpriv->clk);
4128                clk_put(hpriv->clk);
4129        }
4130        for (port = 0; port < n_ports; port++) {
4131                if (!IS_ERR(hpriv->port_clks[port])) {
4132                        clk_disable_unprepare(hpriv->port_clks[port]);
4133                        clk_put(hpriv->port_clks[port]);
4134                }
4135        }
4136#endif
4137
4138        return rc;
4139}
4140
4141/*
4142 *
4143 *      mv_platform_remove    -       unplug a platform interface
4144 *      @pdev: platform device
4145 *
4146 *      A platform bus SATA device has been unplugged. Perform the needed
4147 *      cleanup. Also called on module unload for any active devices.
4148 */
4149static int __devexit mv_platform_remove(struct platform_device *pdev)
4150{
4151        struct ata_host *host = platform_get_drvdata(pdev);
4152#if defined(CONFIG_HAVE_CLK)
4153        struct mv_host_priv *hpriv = host->private_data;
4154        int port;
4155#endif
4156        ata_host_detach(host);
4157
4158#if defined(CONFIG_HAVE_CLK)
4159        if (!IS_ERR(hpriv->clk)) {
4160                clk_disable_unprepare(hpriv->clk);
4161                clk_put(hpriv->clk);
4162        }
4163        for (port = 0; port < host->n_ports; port++) {
4164                if (!IS_ERR(hpriv->port_clks[port])) {
4165                        clk_disable_unprepare(hpriv->port_clks[port]);
4166                        clk_put(hpriv->port_clks[port]);
4167                }
4168        }
4169#endif
4170        return 0;
4171}
4172
4173#ifdef CONFIG_PM
4174static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4175{
4176        struct ata_host *host = platform_get_drvdata(pdev);
4177        if (host)
4178                return ata_host_suspend(host, state);
4179        else
4180                return 0;
4181}
4182
4183static int mv_platform_resume(struct platform_device *pdev)
4184{
4185        struct ata_host *host = platform_get_drvdata(pdev);
4186        const struct mbus_dram_target_info *dram;
4187        int ret;
4188
4189        if (host) {
4190                struct mv_host_priv *hpriv = host->private_data;
4191
4192                /*
4193                 * (Re-)program MBUS remapping windows if we are asked to.
4194                 */
4195                dram = mv_mbus_dram_info();
4196                if (dram)
4197                        mv_conf_mbus_windows(hpriv, dram);
4198
4199                /* initialize adapter */
4200                ret = mv_init_host(host);
4201                if (ret) {
4202                        printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4203                        return ret;
4204                }
4205                ata_host_resume(host);
4206        }
4207
4208        return 0;
4209}
4210#else
4211#define mv_platform_suspend NULL
4212#define mv_platform_resume NULL
4213#endif
4214
4215#ifdef CONFIG_OF
4216static struct of_device_id mv_sata_dt_ids[] __devinitdata = {
4217        { .compatible = "marvell,orion-sata", },
4218        {},
4219};
4220MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4221#endif
4222
4223static struct platform_driver mv_platform_driver = {
4224        .probe          = mv_platform_probe,
4225        .remove         = __devexit_p(mv_platform_remove),
4226        .suspend        = mv_platform_suspend,
4227        .resume         = mv_platform_resume,
4228        .driver         = {
4229                .name = DRV_NAME,
4230                .owner = THIS_MODULE,
4231                .of_match_table = of_match_ptr(mv_sata_dt_ids),
4232        },
4233};
4234
4235
4236#ifdef CONFIG_PCI
4237static int mv_pci_init_one(struct pci_dev *pdev,
4238                           const struct pci_device_id *ent);
4239#ifdef CONFIG_PM
4240static int mv_pci_device_resume(struct pci_dev *pdev);
4241#endif
4242
4243
4244static struct pci_driver mv_pci_driver = {
4245        .name                   = DRV_NAME,
4246        .id_table               = mv_pci_tbl,
4247        .probe                  = mv_pci_init_one,
4248        .remove                 = ata_pci_remove_one,
4249#ifdef CONFIG_PM
4250        .suspend                = ata_pci_device_suspend,
4251        .resume                 = mv_pci_device_resume,
4252#endif
4253
4254};
4255
4256/* move to PCI layer or libata core? */
4257static int pci_go_64(struct pci_dev *pdev)
4258{
4259        int rc;
4260
4261        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4262                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4263                if (rc) {
4264                        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4265                        if (rc) {
4266                                dev_err(&pdev->dev,
4267                                        "64-bit DMA enable failed\n");
4268                                return rc;
4269                        }
4270                }
4271        } else {
4272                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4273                if (rc) {
4274                        dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4275                        return rc;
4276                }
4277                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4278                if (rc) {
4279                        dev_err(&pdev->dev,
4280                                "32-bit consistent DMA enable failed\n");
4281                        return rc;
4282                }
4283        }
4284
4285        return rc;
4286}
4287
4288/**
4289 *      mv_print_info - Dump key info to kernel log for perusal.
4290 *      @host: ATA host to print info about
4291 *
4292 *      FIXME: complete this.
4293 *
4294 *      LOCKING:
4295 *      Inherited from caller.
4296 */
4297static void mv_print_info(struct ata_host *host)
4298{
4299        struct pci_dev *pdev = to_pci_dev(host->dev);
4300        struct mv_host_priv *hpriv = host->private_data;
4301        u8 scc;
4302        const char *scc_s, *gen;
4303
4304        /* Use this to determine the HW stepping of the chip so we know
4305         * what errata to workaround
4306         */
4307        pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4308        if (scc == 0)
4309                scc_s = "SCSI";
4310        else if (scc == 0x01)
4311                scc_s = "RAID";
4312        else
4313                scc_s = "?";
4314
4315        if (IS_GEN_I(hpriv))
4316                gen = "I";
4317        else if (IS_GEN_II(hpriv))
4318                gen = "II";
4319        else if (IS_GEN_IIE(hpriv))
4320                gen = "IIE";
4321        else
4322                gen = "?";
4323
4324        dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4325                 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4326                 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4327}
4328
4329/**
4330 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4331 *      @pdev: PCI device found
4332 *      @ent: PCI device ID entry for the matched host
4333 *
4334 *      LOCKING:
4335 *      Inherited from caller.
4336 */
4337static int mv_pci_init_one(struct pci_dev *pdev,
4338                           const struct pci_device_id *ent)
4339{
4340        unsigned int board_idx = (unsigned int)ent->driver_data;
4341        const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4342        struct ata_host *host;
4343        struct mv_host_priv *hpriv;
4344        int n_ports, port, rc;
4345
4346        ata_print_version_once(&pdev->dev, DRV_VERSION);
4347
4348        /* allocate host */
4349        n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4350
4351        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4352        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4353        if (!host || !hpriv)
4354                return -ENOMEM;
4355        host->private_data = hpriv;
4356        hpriv->n_ports = n_ports;
4357        hpriv->board_idx = board_idx;
4358
4359        /* acquire resources */
4360        rc = pcim_enable_device(pdev);
4361        if (rc)
4362                return rc;
4363
4364        rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4365        if (rc == -EBUSY)
4366                pcim_pin_device(pdev);
4367        if (rc)
4368                return rc;
4369        host->iomap = pcim_iomap_table(pdev);
4370        hpriv->base = host->iomap[MV_PRIMARY_BAR];
4371
4372        rc = pci_go_64(pdev);
4373        if (rc)
4374                return rc;
4375
4376        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4377        if (rc)
4378                return rc;
4379
4380        for (port = 0; port < host->n_ports; port++) {
4381                struct ata_port *ap = host->ports[port];
4382                void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4383                unsigned int offset = port_mmio - hpriv->base;
4384
4385                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4386                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4387        }
4388
4389        /* initialize adapter */
4390        rc = mv_init_host(host);
4391        if (rc)
4392                return rc;
4393
4394        /* Enable message-switched interrupts, if requested */
4395        if (msi && pci_enable_msi(pdev) == 0)
4396                hpriv->hp_flags |= MV_HP_FLAG_MSI;
4397
4398        mv_dump_pci_cfg(pdev, 0x68);
4399        mv_print_info(host);
4400
4401        pci_set_master(pdev);
4402        pci_try_set_mwi(pdev);
4403        return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4404                                 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4405}
4406
4407#ifdef CONFIG_PM
4408static int mv_pci_device_resume(struct pci_dev *pdev)
4409{
4410        struct ata_host *host = pci_get_drvdata(pdev);
4411        int rc;
4412
4413        rc = ata_pci_device_do_resume(pdev);
4414        if (rc)
4415                return rc;
4416
4417        /* initialize adapter */
4418        rc = mv_init_host(host);
4419        if (rc)
4420                return rc;
4421
4422        ata_host_resume(host);
4423
4424        return 0;
4425}
4426#endif
4427#endif
4428
4429static int mv_platform_probe(struct platform_device *pdev);
4430static int __devexit mv_platform_remove(struct platform_device *pdev);
4431
4432static int __init mv_init(void)
4433{
4434        int rc = -ENODEV;
4435#ifdef CONFIG_PCI
4436        rc = pci_register_driver(&mv_pci_driver);
4437        if (rc < 0)
4438                return rc;
4439#endif
4440        rc = platform_driver_register(&mv_platform_driver);
4441
4442#ifdef CONFIG_PCI
4443        if (rc < 0)
4444                pci_unregister_driver(&mv_pci_driver);
4445#endif
4446        return rc;
4447}
4448
4449static void __exit mv_exit(void)
4450{
4451#ifdef CONFIG_PCI
4452        pci_unregister_driver(&mv_pci_driver);
4453#endif
4454        platform_driver_unregister(&mv_platform_driver);
4455}
4456
4457MODULE_AUTHOR("Brett Russ");
4458MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4459MODULE_LICENSE("GPL");
4460MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4461MODULE_VERSION(DRV_VERSION);
4462MODULE_ALIAS("platform:" DRV_NAME);
4463
4464module_init(mv_init);
4465module_exit(mv_exit);
4466
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.