linux/drivers/ide/ide-dma.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/ide/ide-dma.c         Version 4.10    June 9, 2000
   3 *
   4 *  Copyright (c) 1999-2000     Andre Hedrick <andre@linux-ide.org>
   5 *  May be copied or modified under the terms of the GNU General Public License
   6 */
   7
   8/*
   9 *  Special Thanks to Mark for his Six years of work.
  10 *
  11 *  Copyright (c) 1995-1998  Mark Lord
  12 *  May be copied or modified under the terms of the GNU General Public License
  13 */
  14
  15/*
  16 * This module provides support for the bus-master IDE DMA functions
  17 * of various PCI chipsets, including the Intel PIIX (i82371FB for
  18 * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and 
  19 * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset)
  20 * ("PIIX" stands for "PCI ISA IDE Xcellerator").
  21 *
  22 * Pretty much the same code works for other IDE PCI bus-mastering chipsets.
  23 *
  24 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
  25 *
  26 * By default, DMA support is prepared for use, but is currently enabled only
  27 * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single),
  28 * or which are recognized as "good" (see table below).  Drives with only mode0
  29 * or mode1 (multi/single) DMA should also work with this chipset/driver
  30 * (eg. MC2112A) but are not enabled by default.
  31 *
  32 * Use "hdparm -i" to view modes supported by a given drive.
  33 *
  34 * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling
  35 * DMA support, but must be (re-)compiled against this kernel version or later.
  36 *
  37 * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
  38 * If problems arise, ide.c will disable DMA operation after a few retries.
  39 * This error recovery mechanism works and has been extremely well exercised.
  40 *
  41 * IDE drives, depending on their vintage, may support several different modes
  42 * of DMA operation.  The boot-time modes are indicated with a "*" in
  43 * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
  44 * the "hdparm -X" feature.  There is seldom a need to do this, as drives
  45 * normally power-up with their "best" PIO/DMA modes enabled.
  46 *
  47 * Testing has been done with a rather extensive number of drives,
  48 * with Quantum & Western Digital models generally outperforming the pack,
  49 * and Fujitsu & Conner (and some Seagate which are really Conner) drives
  50 * showing more lackluster throughput.
  51 *
  52 * Keep an eye on /var/adm/messages for "DMA disabled" messages.
  53 *
  54 * Some people have reported trouble with Intel Zappa motherboards.
  55 * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
  56 * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
  57 * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
  58 *
  59 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
  60 * fixing the problem with the BIOS on some Acer motherboards.
  61 *
  62 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
  63 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
  64 *
  65 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
  66 * at generic DMA -- his patches were referred to when preparing this code.
  67 *
  68 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
  69 * for supplying a Promise UDMA board & WD UDMA drive for this work!
  70 *
  71 * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports.
  72 *
  73 * ATA-66/100 and recovery functions, I forgot the rest......
  74 *
  75 */
  76
  77#include <linux/module.h>
  78#include <linux/types.h>
  79#include <linux/kernel.h>
  80#include <linux/timer.h>
  81#include <linux/mm.h>
  82#include <linux/interrupt.h>
  83#include <linux/pci.h>
  84#include <linux/init.h>
  85#include <linux/ide.h>
  86#include <linux/delay.h>
  87#include <linux/scatterlist.h>
  88
  89#include <asm/io.h>
  90#include <asm/irq.h>
  91
  92static const struct drive_list_entry drive_whitelist [] = {
  93
  94        { "Micropolis 2112A"    ,       NULL            },
  95        { "CONNER CTMA 4000"    ,       NULL            },
  96        { "CONNER CTT8000-A"    ,       NULL            },
  97        { "ST34342A"            ,       NULL            },
  98        { NULL                  ,       NULL            }
  99};
 100
 101static const struct drive_list_entry drive_blacklist [] = {
 102
 103        { "WDC AC11000H"        ,       NULL            },
 104        { "WDC AC22100H"        ,       NULL            },
 105        { "WDC AC32500H"        ,       NULL            },
 106        { "WDC AC33100H"        ,       NULL            },
 107        { "WDC AC31600H"        ,       NULL            },
 108        { "WDC AC32100H"        ,       "24.09P07"      },
 109        { "WDC AC23200L"        ,       "21.10N21"      },
 110        { "Compaq CRD-8241B"    ,       NULL            },
 111        { "CRD-8400B"           ,       NULL            },
 112        { "CRD-8480B",                  NULL            },
 113        { "CRD-8482B",                  NULL            },
 114        { "CRD-84"              ,       NULL            },
 115        { "SanDisk SDP3B"       ,       NULL            },
 116        { "SanDisk SDP3B-64"    ,       NULL            },
 117        { "SANYO CD-ROM CRD"    ,       NULL            },
 118        { "HITACHI CDR-8"       ,       NULL            },
 119        { "HITACHI CDR-8335"    ,       NULL            },
 120        { "HITACHI CDR-8435"    ,       NULL            },
 121        { "Toshiba CD-ROM XM-6202B"     ,       NULL            },
 122        { "TOSHIBA CD-ROM XM-1702BC",   NULL            },
 123        { "CD-532E-A"           ,       NULL            },
 124        { "E-IDE CD-ROM CR-840",        NULL            },
 125        { "CD-ROM Drive/F5A",   NULL            },
 126        { "WPI CDD-820",                NULL            },
 127        { "SAMSUNG CD-ROM SC-148C",     NULL            },
 128        { "SAMSUNG CD-ROM SC",  NULL            },
 129        { "ATAPI CD-ROM DRIVE 40X MAXIMUM",     NULL            },
 130        { "_NEC DV5800A",               NULL            },
 131        { "SAMSUNG CD-ROM SN-124",      "N001" },
 132        { "Seagate STT20000A",          NULL  },
 133        { "CD-ROM CDR_U200",            "1.09" },
 134        { NULL                  ,       NULL            }
 135
 136};
 137
 138/**
 139 *      ide_dma_intr    -       IDE DMA interrupt handler
 140 *      @drive: the drive the interrupt is for
 141 *
 142 *      Handle an interrupt completing a read/write DMA transfer on an 
 143 *      IDE device
 144 */
 145 
 146ide_startstop_t ide_dma_intr (ide_drive_t *drive)
 147{
 148        u8 stat = 0, dma_stat = 0;
 149
 150        dma_stat = HWIF(drive)->ide_dma_end(drive);
 151        stat = HWIF(drive)->INB(IDE_STATUS_REG);        /* get drive status */
 152        if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
 153                if (!dma_stat) {
 154                        struct request *rq = HWGROUP(drive)->rq;
 155
 156                        if (rq->rq_disk) {
 157                                ide_driver_t *drv;
 158
 159                                drv = *(ide_driver_t **)rq->rq_disk->private_data;
 160                                drv->end_request(drive, 1, rq->nr_sectors);
 161                        } else
 162                                ide_end_request(drive, 1, rq->nr_sectors);
 163                        return ide_stopped;
 164                }
 165                printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 
 166                       drive->name, dma_stat);
 167        }
 168        return ide_error(drive, "dma_intr", stat);
 169}
 170
 171EXPORT_SYMBOL_GPL(ide_dma_intr);
 172
 173static int ide_dma_good_drive(ide_drive_t *drive)
 174{
 175        return ide_in_drive_list(drive->id, drive_whitelist);
 176}
 177
 178#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
 179/**
 180 *      ide_build_sglist        -       map IDE scatter gather for DMA I/O
 181 *      @drive: the drive to build the DMA table for
 182 *      @rq: the request holding the sg list
 183 *
 184 *      Perform the PCI mapping magic necessary to access the source or
 185 *      target buffers of a request via PCI DMA. The lower layers of the
 186 *      kernel provide the necessary cache management so that we can
 187 *      operate in a portable fashion
 188 */
 189
 190int ide_build_sglist(ide_drive_t *drive, struct request *rq)
 191{
 192        ide_hwif_t *hwif = HWIF(drive);
 193        struct scatterlist *sg = hwif->sg_table;
 194
 195        BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256);
 196
 197        ide_map_sg(drive, rq);
 198
 199        if (rq_data_dir(rq) == READ)
 200                hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
 201        else
 202                hwif->sg_dma_direction = PCI_DMA_TODEVICE;
 203
 204        return pci_map_sg(hwif->pci_dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
 205}
 206
 207EXPORT_SYMBOL_GPL(ide_build_sglist);
 208
 209/**
 210 *      ide_build_dmatable      -       build IDE DMA table
 211 *
 212 *      ide_build_dmatable() prepares a dma request. We map the command
 213 *      to get the pci bus addresses of the buffers and then build up
 214 *      the PRD table that the IDE layer wants to be fed. The code
 215 *      knows about the 64K wrap bug in the CS5530.
 216 *
 217 *      Returns the number of built PRD entries if all went okay,
 218 *      returns 0 otherwise.
 219 *
 220 *      May also be invoked from trm290.c
 221 */
 222 
 223int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
 224{
 225        ide_hwif_t *hwif        = HWIF(drive);
 226        unsigned int *table     = hwif->dmatable_cpu;
 227        unsigned int is_trm290  = (hwif->chipset == ide_trm290) ? 1 : 0;
 228        unsigned int count = 0;
 229        int i;
 230        struct scatterlist *sg;
 231
 232        hwif->sg_nents = i = ide_build_sglist(drive, rq);
 233
 234        if (!i)
 235                return 0;
 236
 237        sg = hwif->sg_table;
 238        while (i) {
 239                u32 cur_addr;
 240                u32 cur_len;
 241
 242                cur_addr = sg_dma_address(sg);
 243                cur_len = sg_dma_len(sg);
 244
 245                /*
 246                 * Fill in the dma table, without crossing any 64kB boundaries.
 247                 * Most hardware requires 16-bit alignment of all blocks,
 248                 * but the trm290 requires 32-bit alignment.
 249                 */
 250
 251                while (cur_len) {
 252                        if (count++ >= PRD_ENTRIES) {
 253                                printk(KERN_ERR "%s: DMA table too small\n", drive->name);
 254                                goto use_pio_instead;
 255                        } else {
 256                                u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
 257
 258                                if (bcount > cur_len)
 259                                        bcount = cur_len;
 260                                *table++ = cpu_to_le32(cur_addr);
 261                                xcount = bcount & 0xffff;
 262                                if (is_trm290)
 263                                        xcount = ((xcount >> 2) - 1) << 16;
 264                                if (xcount == 0x0000) {
 265        /* 
 266         * Most chipsets correctly interpret a length of 0x0000 as 64KB,
 267         * but at least one (e.g. CS5530) misinterprets it as zero (!).
 268         * So here we break the 64KB entry into two 32KB entries instead.
 269         */
 270                                        if (count++ >= PRD_ENTRIES) {
 271                                                printk(KERN_ERR "%s: DMA table too small\n", drive->name);
 272                                                goto use_pio_instead;
 273                                        }
 274                                        *table++ = cpu_to_le32(0x8000);
 275                                        *table++ = cpu_to_le32(cur_addr + 0x8000);
 276                                        xcount = 0x8000;
 277                                }
 278                                *table++ = cpu_to_le32(xcount);
 279                                cur_addr += bcount;
 280                                cur_len -= bcount;
 281                        }
 282                }
 283
 284                sg = sg_next(sg);
 285                i--;
 286        }
 287
 288        if (count) {
 289                if (!is_trm290)
 290                        *--table |= cpu_to_le32(0x80000000);
 291                return count;
 292        }
 293        printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
 294use_pio_instead:
 295        pci_unmap_sg(hwif->pci_dev,
 296                     hwif->sg_table,
 297                     hwif->sg_nents,
 298                     hwif->sg_dma_direction);
 299        return 0; /* revert to PIO for this request */
 300}
 301
 302EXPORT_SYMBOL_GPL(ide_build_dmatable);
 303
 304/**
 305 *      ide_destroy_dmatable    -       clean up DMA mapping
 306 *      @drive: The drive to unmap
 307 *
 308 *      Teardown mappings after DMA has completed. This must be called
 309 *      after the completion of each use of ide_build_dmatable and before
 310 *      the next use of ide_build_dmatable. Failure to do so will cause
 311 *      an oops as only one mapping can be live for each target at a given
 312 *      time.
 313 */
 314 
 315void ide_destroy_dmatable (ide_drive_t *drive)
 316{
 317        struct pci_dev *dev = HWIF(drive)->pci_dev;
 318        struct scatterlist *sg = HWIF(drive)->sg_table;
 319        int nents = HWIF(drive)->sg_nents;
 320
 321        pci_unmap_sg(dev, sg, nents, HWIF(drive)->sg_dma_direction);
 322}
 323
 324EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
 325
 326/**
 327 *      config_drive_for_dma    -       attempt to activate IDE DMA
 328 *      @drive: the drive to place in DMA mode
 329 *
 330 *      If the drive supports at least mode 2 DMA or UDMA of any kind
 331 *      then attempt to place it into DMA mode. Drives that are known to
 332 *      support DMA but predate the DMA properties or that are known
 333 *      to have DMA handling bugs are also set up appropriately based
 334 *      on the good/bad drive lists.
 335 */
 336 
 337static int config_drive_for_dma (ide_drive_t *drive)
 338{
 339        ide_hwif_t *hwif = drive->hwif;
 340        struct hd_driveid *id = drive->id;
 341
 342        if (drive->media != ide_disk) {
 343                if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
 344                        return 0;
 345        }
 346
 347        /*
 348         * Enable DMA on any drive that has
 349         * UltraDMA (mode 0/1/2/3/4/5/6) enabled
 350         */
 351        if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
 352                return 1;
 353
 354        /*
 355         * Enable DMA on any drive that has mode2 DMA
 356         * (multi or single) enabled
 357         */
 358        if (id->field_valid & 2)        /* regular DMA */
 359                if ((id->dma_mword & 0x404) == 0x404 ||
 360                    (id->dma_1word & 0x404) == 0x404)
 361                        return 1;
 362
 363        /* Consult the list of known "good" drives */
 364        if (ide_dma_good_drive(drive))
 365                return 1;
 366
 367        return 0;
 368}
 369
 370/**
 371 *      dma_timer_expiry        -       handle a DMA timeout
 372 *      @drive: Drive that timed out
 373 *
 374 *      An IDE DMA transfer timed out. In the event of an error we ask
 375 *      the driver to resolve the problem, if a DMA transfer is still
 376 *      in progress we continue to wait (arguably we need to add a 
 377 *      secondary 'I don't care what the drive thinks' timeout here)
 378 *      Finally if we have an interrupt we let it complete the I/O.
 379 *      But only one time - we clear expiry and if it's still not
 380 *      completed after WAIT_CMD, we error and retry in PIO.
 381 *      This can occur if an interrupt is lost or due to hang or bugs.
 382 */
 383 
 384static int dma_timer_expiry (ide_drive_t *drive)
 385{
 386        ide_hwif_t *hwif        = HWIF(drive);
 387        u8 dma_stat             = hwif->INB(hwif->dma_status);
 388
 389        printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
 390                drive->name, dma_stat);
 391
 392        if ((dma_stat & 0x18) == 0x18)  /* BUSY Stupid Early Timer !! */
 393                return WAIT_CMD;
 394
 395        HWGROUP(drive)->expiry = NULL;  /* one free ride for now */
 396
 397        /* 1 dmaing, 2 error, 4 intr */
 398        if (dma_stat & 2)       /* ERROR */
 399                return -1;
 400
 401        if (dma_stat & 1)       /* DMAing */
 402                return WAIT_CMD;
 403
 404        if (dma_stat & 4)       /* Got an Interrupt */
 405                return WAIT_CMD;
 406
 407        return 0;       /* Status is unknown -- reset the bus */
 408}
 409
 410/**
 411 *      ide_dma_host_off        -       Generic DMA kill
 412 *      @drive: drive to control
 413 *
 414 *      Perform the generic IDE controller DMA off operation. This
 415 *      works for most IDE bus mastering controllers
 416 */
 417
 418void ide_dma_host_off(ide_drive_t *drive)
 419{
 420        ide_hwif_t *hwif        = HWIF(drive);
 421        u8 unit                 = (drive->select.b.unit & 0x01);
 422        u8 dma_stat             = hwif->INB(hwif->dma_status);
 423
 424        hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
 425}
 426
 427EXPORT_SYMBOL(ide_dma_host_off);
 428
 429/**
 430 *      ide_dma_off_quietly     -       Generic DMA kill
 431 *      @drive: drive to control
 432 *
 433 *      Turn off the current DMA on this IDE controller. 
 434 */
 435
 436void ide_dma_off_quietly(ide_drive_t *drive)
 437{
 438        drive->using_dma = 0;
 439        ide_toggle_bounce(drive, 0);
 440
 441        drive->hwif->dma_host_off(drive);
 442}
 443
 444EXPORT_SYMBOL(ide_dma_off_quietly);
 445#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
 446
 447/**
 448 *      ide_dma_off     -       disable DMA on a device
 449 *      @drive: drive to disable DMA on
 450 *
 451 *      Disable IDE DMA for a device on this IDE controller.
 452 *      Inform the user that DMA has been disabled.
 453 */
 454
 455void ide_dma_off(ide_drive_t *drive)
 456{
 457        printk(KERN_INFO "%s: DMA disabled\n", drive->name);
 458        drive->hwif->dma_off_quietly(drive);
 459}
 460
 461EXPORT_SYMBOL(ide_dma_off);
 462
 463#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
 464/**
 465 *      ide_dma_host_on -       Enable DMA on a host
 466 *      @drive: drive to enable for DMA
 467 *
 468 *      Enable DMA on an IDE controller following generic bus mastering
 469 *      IDE controller behaviour
 470 */
 471
 472void ide_dma_host_on(ide_drive_t *drive)
 473{
 474        if (drive->using_dma) {
 475                ide_hwif_t *hwif        = HWIF(drive);
 476                u8 unit                 = (drive->select.b.unit & 0x01);
 477                u8 dma_stat             = hwif->INB(hwif->dma_status);
 478
 479                hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
 480        }
 481}
 482
 483EXPORT_SYMBOL(ide_dma_host_on);
 484
 485/**
 486 *      __ide_dma_on            -       Enable DMA on a device
 487 *      @drive: drive to enable DMA on
 488 *
 489 *      Enable IDE DMA for a device on this IDE controller.
 490 */
 491 
 492int __ide_dma_on (ide_drive_t *drive)
 493{
 494        /* consult the list of known "bad" drives */
 495        if (__ide_dma_bad_drive(drive))
 496                return 1;
 497
 498        drive->using_dma = 1;
 499        ide_toggle_bounce(drive, 1);
 500
 501        drive->hwif->dma_host_on(drive);
 502
 503        return 0;
 504}
 505
 506EXPORT_SYMBOL(__ide_dma_on);
 507
 508/**
 509 *      ide_dma_setup   -       begin a DMA phase
 510 *      @drive: target device
 511 *
 512 *      Build an IDE DMA PRD (IDE speak for scatter gather table)
 513 *      and then set up the DMA transfer registers for a device
 514 *      that follows generic IDE PCI DMA behaviour. Controllers can
 515 *      override this function if they need to
 516 *
 517 *      Returns 0 on success. If a PIO fallback is required then 1
 518 *      is returned. 
 519 */
 520
 521int ide_dma_setup(ide_drive_t *drive)
 522{
 523        ide_hwif_t *hwif = drive->hwif;
 524        struct request *rq = HWGROUP(drive)->rq;
 525        unsigned int reading;
 526        u8 dma_stat;
 527
 528        if (rq_data_dir(rq))
 529                reading = 0;
 530        else
 531                reading = 1 << 3;
 532
 533        /* fall back to pio! */
 534        if (!ide_build_dmatable(drive, rq)) {
 535                ide_map_sg(drive, rq);
 536                return 1;
 537        }
 538
 539        /* PRD table */
 540        if (hwif->mmio)
 541                writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable);
 542        else
 543                outl(hwif->dmatable_dma, hwif->dma_prdtable);
 544
 545        /* specify r/w */
 546        hwif->OUTB(reading, hwif->dma_command);
 547
 548        /* read dma_status for INTR & ERROR flags */
 549        dma_stat = hwif->INB(hwif->dma_status);
 550
 551        /* clear INTR & ERROR flags */
 552        hwif->OUTB(dma_stat|6, hwif->dma_status);
 553        drive->waiting_for_dma = 1;
 554        return 0;
 555}
 556
 557EXPORT_SYMBOL_GPL(ide_dma_setup);
 558
 559static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
 560{
 561        /* issue cmd to drive */
 562        ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
 563}
 564
 565void ide_dma_start(ide_drive_t *drive)
 566{
 567        ide_hwif_t *hwif        = HWIF(drive);
 568        u8 dma_cmd              = hwif->INB(hwif->dma_command);
 569
 570        /* Note that this is done *after* the cmd has
 571         * been issued to the drive, as per the BM-IDE spec.
 572         * The Promise Ultra33 doesn't work correctly when
 573         * we do this part before issuing the drive cmd.
 574         */
 575        /* start DMA */
 576        hwif->OUTB(dma_cmd|1, hwif->dma_command);
 577        hwif->dma = 1;
 578        wmb();
 579}
 580
 581EXPORT_SYMBOL_GPL(ide_dma_start);
 582
 583/* returns 1 on error, 0 otherwise */
 584int __ide_dma_end (ide_drive_t *drive)
 585{
 586        ide_hwif_t *hwif        = HWIF(drive);
 587        u8 dma_stat = 0, dma_cmd = 0;
 588
 589        drive->waiting_for_dma = 0;
 590        /* get dma_command mode */
 591        dma_cmd = hwif->INB(hwif->dma_command);
 592        /* stop DMA */
 593        hwif->OUTB(dma_cmd&~1, hwif->dma_command);
 594        /* get DMA status */
 595        dma_stat = hwif->INB(hwif->dma_status);
 596        /* clear the INTR & ERROR bits */
 597        hwif->OUTB(dma_stat|6, hwif->dma_status);
 598        /* purge DMA mappings */
 599        ide_destroy_dmatable(drive);
 600        /* verify good DMA status */
 601        hwif->dma = 0;
 602        wmb();
 603        return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
 604}
 605
 606EXPORT_SYMBOL(__ide_dma_end);
 607
 608/* returns 1 if dma irq issued, 0 otherwise */
 609static int __ide_dma_test_irq(ide_drive_t *drive)
 610{
 611        ide_hwif_t *hwif        = HWIF(drive);
 612        u8 dma_stat             = hwif->INB(hwif->dma_status);
 613
 614        /* return 1 if INTR asserted */
 615        if ((dma_stat & 4) == 4)
 616                return 1;
 617        if (!drive->waiting_for_dma)
 618                printk(KERN_WARNING "%s: (%s) called while not waiting\n",
 619                        drive->name, __FUNCTION__);
 620        return 0;
 621}
 622#else
 623static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
 624#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
 625
 626int __ide_dma_bad_drive (ide_drive_t *drive)
 627{
 628        struct hd_driveid *id = drive->id;
 629
 630        int blacklist = ide_in_drive_list(id, drive_blacklist);
 631        if (blacklist) {
 632                printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
 633                                    drive->name, id->model);
 634                return blacklist;
 635        }
 636        return 0;
 637}
 638
 639EXPORT_SYMBOL(__ide_dma_bad_drive);
 640
 641static const u8 xfer_mode_bases[] = {
 642        XFER_UDMA_0,
 643        XFER_MW_DMA_0,
 644        XFER_SW_DMA_0,
 645};
 646
 647static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
 648{
 649        struct hd_driveid *id = drive->id;
 650        ide_hwif_t *hwif = drive->hwif;
 651        unsigned int mask = 0;
 652
 653        switch(base) {
 654        case XFER_UDMA_0:
 655                if ((id->field_valid & 4) == 0)
 656                        break;
 657
 658                if (hwif->udma_filter)
 659                        mask = hwif->udma_filter(drive);
 660                else
 661                        mask = hwif->ultra_mask;
 662                mask &= id->dma_ultra;
 663
 664                /*
 665                 * avoid false cable warning from eighty_ninty_three()
 666                 */
 667                if (req_mode > XFER_UDMA_2) {
 668                        if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
 669                                mask &= 0x07;
 670                }
 671                break;
 672        case XFER_MW_DMA_0:
 673                if ((id->field_valid & 2) == 0)
 674                        break;
 675                if (hwif->mdma_filter)
 676                        mask = hwif->mdma_filter(drive);
 677                else
 678                        mask = hwif->mwdma_mask;
 679                mask &= id->dma_mword;
 680                break;
 681        case XFER_SW_DMA_0:
 682                if (id->field_valid & 2) {
 683                        mask = id->dma_1word & hwif->swdma_mask;
 684                } else if (id->tDMA) {
 685                        /*
 686                         * ide_fix_driveid() doesn't convert ->tDMA to the
 687                         * CPU endianness so we need to do it here
 688                         */
 689                        u8 mode = le16_to_cpu(id->tDMA);
 690
 691                        /*
 692                         * if the mode is valid convert it to the mask
 693                         * (the maximum allowed mode is XFER_SW_DMA_2)
 694                         */
 695                        if (mode <= 2)
 696                                mask = ((2 << mode) - 1) & hwif->swdma_mask;
 697                }
 698                break;
 699        default:
 700                BUG();
 701                break;
 702        }
 703
 704        return mask;
 705}
 706
 707/**
 708 *      ide_find_dma_mode       -       compute DMA speed
 709 *      @drive: IDE device
 710 *      @req_mode: requested mode
 711 *
 712 *      Checks the drive/host capabilities and finds the speed to use for
 713 *      the DMA transfer.  The speed is then limited by the requested mode.
 714 *
 715 *      Returns 0 if the drive/host combination is incapable of DMA transfers
 716 *      or if the requested mode is not a DMA mode.
 717 */
 718
 719u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
 720{
 721        ide_hwif_t *hwif = drive->hwif;
 722        unsigned int mask;
 723        int x, i;
 724        u8 mode = 0;
 725
 726        if (drive->media != ide_disk) {
 727                if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
 728                        return 0;
 729        }
 730
 731        for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
 732                if (req_mode < xfer_mode_bases[i])
 733                        continue;
 734                mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
 735                x = fls(mask) - 1;
 736                if (x >= 0) {
 737                        mode = xfer_mode_bases[i] + x;
 738                        break;
 739                }
 740        }
 741
 742        if (hwif->chipset == ide_acorn && mode == 0) {
 743                /*
 744                 * is this correct?
 745                 */
 746                if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150)
 747                        mode = XFER_MW_DMA_1;
 748        }
 749
 750        mode = min(mode, req_mode);
 751
 752        printk(KERN_INFO "%s: %s mode selected\n", drive->name,
 753                          mode ? ide_xfer_verbose(mode) : "no DMA");
 754
 755        return mode;
 756}
 757
 758EXPORT_SYMBOL_GPL(ide_find_dma_mode);
 759
 760static int ide_tune_dma(ide_drive_t *drive)
 761{
 762        u8 speed;
 763
 764        if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
 765                return 0;
 766
 767        /* consult the list of known "bad" drives */
 768        if (__ide_dma_bad_drive(drive))
 769                return 0;
 770
 771        if (ide_id_dma_bug(drive))
 772                return 0;
 773
 774        if (drive->hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 775                return config_drive_for_dma(drive);
 776
 777        speed = ide_max_dma_mode(drive);
 778
 779        if (!speed)
 780                return 0;
 781
 782        if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
 783                return 0;
 784
 785        if (ide_set_dma_mode(drive, speed))
 786                return 0;
 787
 788        return 1;
 789}
 790
 791static int ide_dma_check(ide_drive_t *drive)
 792{
 793        ide_hwif_t *hwif = drive->hwif;
 794        int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
 795
 796        if (!vdma && ide_tune_dma(drive))
 797                return 0;
 798
 799        /* TODO: always do PIO fallback */
 800        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 801                return -1;
 802
 803        ide_set_max_pio(drive);
 804
 805        return vdma ? 0 : -1;
 806}
 807
 808int ide_id_dma_bug(ide_drive_t *drive)
 809{
 810        struct hd_driveid *id = drive->id;
 811
 812        if (id->field_valid & 4) {
 813                if ((id->dma_ultra >> 8) && (id->dma_mword >> 8))
 814                        goto err_out;
 815        } else if (id->field_valid & 2) {
 816                if ((id->dma_mword >> 8) && (id->dma_1word >> 8))
 817                        goto err_out;
 818        }
 819        return 0;
 820err_out:
 821        printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
 822        return 1;
 823}
 824
 825int ide_set_dma(ide_drive_t *drive)
 826{
 827        ide_hwif_t *hwif = drive->hwif;
 828        int rc;
 829
 830        rc = ide_dma_check(drive);
 831
 832        switch(rc) {
 833        case -1: /* DMA needs to be disabled */
 834                hwif->dma_off_quietly(drive);
 835                return -1;
 836        case  0: /* DMA needs to be enabled */
 837                return hwif->ide_dma_on(drive);
 838        case  1: /* DMA setting cannot be changed */
 839                break;
 840        default:
 841                BUG();
 842                break;
 843        }
 844
 845        return rc;
 846}
 847
 848#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
 849void ide_dma_lost_irq (ide_drive_t *drive)
 850{
 851        printk("%s: DMA interrupt recovery\n", drive->name);
 852}
 853
 854EXPORT_SYMBOL(ide_dma_lost_irq);
 855
 856void ide_dma_timeout (ide_drive_t *drive)
 857{
 858        ide_hwif_t *hwif = HWIF(drive);
 859
 860        printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
 861
 862        if (hwif->ide_dma_test_irq(drive))
 863                return;
 864
 865        hwif->ide_dma_end(drive);
 866}
 867
 868EXPORT_SYMBOL(ide_dma_timeout);
 869
 870static void ide_release_dma_engine(ide_hwif_t *hwif)
 871{
 872        if (hwif->dmatable_cpu) {
 873                pci_free_consistent(hwif->pci_dev,
 874                                    PRD_ENTRIES * PRD_BYTES,
 875                                    hwif->dmatable_cpu,
 876                                    hwif->dmatable_dma);
 877                hwif->dmatable_cpu = NULL;
 878        }
 879}
 880
 881static int ide_release_iomio_dma(ide_hwif_t *hwif)
 882{
 883        release_region(hwif->dma_base, 8);
 884        if (hwif->extra_ports)
 885                release_region(hwif->extra_base, hwif->extra_ports);
 886        return 1;
 887}
 888
 889/*
 890 * Needed for allowing full modular support of ide-driver
 891 */
 892int ide_release_dma(ide_hwif_t *hwif)
 893{
 894        ide_release_dma_engine(hwif);
 895
 896        if (hwif->mmio)
 897                return 1;
 898        else
 899                return ide_release_iomio_dma(hwif);
 900}
 901
 902static int ide_allocate_dma_engine(ide_hwif_t *hwif)
 903{
 904        hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
 905                                                  PRD_ENTRIES * PRD_BYTES,
 906                                                  &hwif->dmatable_dma);
 907
 908        if (hwif->dmatable_cpu)
 909                return 0;
 910
 911        printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
 912               hwif->cds->name);
 913
 914        return 1;
 915}
 916
 917static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
 918{
 919        printk(KERN_INFO "    %s: MMIO-DMA ", hwif->name);
 920
 921        return 0;
 922}
 923
 924static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
 925{
 926        printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx",
 927               hwif->name, base, base + ports - 1);
 928
 929        if (!request_region(base, ports, hwif->name)) {
 930                printk(" -- Error, ports in use.\n");
 931                return 1;
 932        }
 933
 934        if (hwif->cds->extra) {
 935                hwif->extra_base = base + (hwif->channel ? 8 : 16);
 936
 937                if (!hwif->mate || !hwif->mate->extra_ports) {
 938                        if (!request_region(hwif->extra_base,
 939                                            hwif->cds->extra, hwif->cds->name)) {
 940                                printk(" -- Error, extra ports in use.\n");
 941                                release_region(base, ports);
 942                                return 1;
 943                        }
 944                        hwif->extra_ports = hwif->cds->extra;
 945                }
 946        }
 947
 948        return 0;
 949}
 950
 951static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
 952{
 953        if (hwif->mmio)
 954                return ide_mapped_mmio_dma(hwif, base,ports);
 955
 956        return ide_iomio_dma(hwif, base, ports);
 957}
 958
 959void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
 960{
 961        if (ide_dma_iobase(hwif, base, num_ports))
 962                return;
 963
 964        if (ide_allocate_dma_engine(hwif)) {
 965                ide_release_dma(hwif);
 966                return;
 967        }
 968
 969        hwif->dma_base = base;
 970
 971        if (hwif->mate)
 972                hwif->dma_master = hwif->channel ? hwif->mate->dma_base : base;
 973        else
 974                hwif->dma_master = base;
 975
 976        if (!(hwif->dma_command))
 977                hwif->dma_command       = hwif->dma_base;
 978        if (!(hwif->dma_vendor1))
 979                hwif->dma_vendor1       = (hwif->dma_base + 1);
 980        if (!(hwif->dma_status))
 981                hwif->dma_status        = (hwif->dma_base + 2);
 982        if (!(hwif->dma_vendor3))
 983                hwif->dma_vendor3       = (hwif->dma_base + 3);
 984        if (!(hwif->dma_prdtable))
 985                hwif->dma_prdtable      = (hwif->dma_base + 4);
 986
 987        if (!hwif->dma_off_quietly)
 988                hwif->dma_off_quietly = &ide_dma_off_quietly;
 989        if (!hwif->dma_host_off)
 990                hwif->dma_host_off = &ide_dma_host_off;
 991        if (!hwif->ide_dma_on)
 992                hwif->ide_dma_on = &__ide_dma_on;
 993        if (!hwif->dma_host_on)
 994                hwif->dma_host_on = &ide_dma_host_on;
 995        if (!hwif->dma_setup)
 996                hwif->dma_setup = &ide_dma_setup;
 997        if (!hwif->dma_exec_cmd)
 998                hwif->dma_exec_cmd = &ide_dma_exec_cmd;
 999        if (!hwif->dma_start)
1000                hwif->dma_start = &ide_dma_start;
1001        if (!hwif->ide_dma_end)
1002                hwif->ide_dma_end = &__ide_dma_end;
1003        if (!hwif->ide_dma_test_irq)
1004                hwif->ide_dma_test_irq = &__ide_dma_test_irq;
1005        if (!hwif->dma_timeout)
1006                hwif->dma_timeout = &ide_dma_timeout;
1007        if (!hwif->dma_lost_irq)
1008                hwif->dma_lost_irq = &ide_dma_lost_irq;
1009
1010        if (hwif->chipset != ide_trm290) {
1011                u8 dma_stat = hwif->INB(hwif->dma_status);
1012                printk(", BIOS settings: %s:%s, %s:%s",
1013                       hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
1014                       hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
1015        }
1016        printk("\n");
1017
1018        BUG_ON(!hwif->dma_master);
1019}
1020
1021EXPORT_SYMBOL_GPL(ide_setup_dma);
1022#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
1023
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.