linux/drivers/ide/ide-taskfile.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
   3 *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
   4 *  Copyright (C) 2001-2002        Klaus Smolin
   5 *                                      IBM Storage Technology Division
   6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  The big the bad and the ugly.
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/errno.h>
  17#include <linux/slab.h>
  18#include <linux/delay.h>
  19#include <linux/hdreg.h>
  20#include <linux/ide.h>
  21#include <linux/scatterlist.h>
  22
  23#include <asm/uaccess.h>
  24#include <asm/io.h>
  25
  26void ide_tf_dump(const char *s, struct ide_taskfile *tf)
  27{
  28#ifdef DEBUG
  29        printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
  30                "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
  31                s, tf->feature, tf->nsect, tf->lbal,
  32                tf->lbam, tf->lbah, tf->device, tf->command);
  33        printk("%s: hob: nsect 0x%02x lbal 0x%02x "
  34                "lbam 0x%02x lbah 0x%02x\n",
  35                s, tf->hob_nsect, tf->hob_lbal,
  36                tf->hob_lbam, tf->hob_lbah);
  37#endif
  38}
  39
  40int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
  41{
  42        ide_task_t args;
  43
  44        memset(&args, 0, sizeof(ide_task_t));
  45        args.tf.nsect = 0x01;
  46        if (drive->media == ide_disk)
  47                args.tf.command = ATA_CMD_ID_ATA;
  48        else
  49                args.tf.command = ATA_CMD_ID_ATAPI;
  50        args.tf_flags   = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
  51        args.data_phase = TASKFILE_IN;
  52        return ide_raw_taskfile(drive, &args, buf, 1);
  53}
  54
  55static ide_startstop_t task_no_data_intr(ide_drive_t *);
  56static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
  57static ide_startstop_t task_in_intr(ide_drive_t *);
  58
  59ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
  60{
  61        ide_hwif_t *hwif        = HWIF(drive);
  62        struct ide_taskfile *tf = &task->tf;
  63        ide_handler_t *handler = NULL;
  64        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  65        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
  66
  67        if (task->data_phase == TASKFILE_MULTI_IN ||
  68            task->data_phase == TASKFILE_MULTI_OUT) {
  69                if (!drive->mult_count) {
  70                        printk(KERN_ERR "%s: multimode not set!\n",
  71                                        drive->name);
  72                        return ide_stopped;
  73                }
  74        }
  75
  76        if (task->tf_flags & IDE_TFLAG_FLAGGED)
  77                task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
  78
  79        memcpy(&hwif->task, task, sizeof(*task));
  80
  81        if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
  82                ide_tf_dump(drive->name, tf);
  83                tp_ops->set_irq(hwif, 1);
  84                SELECT_MASK(drive, 0);
  85                tp_ops->tf_load(drive, task);
  86        }
  87
  88        switch (task->data_phase) {
  89        case TASKFILE_MULTI_OUT:
  90        case TASKFILE_OUT:
  91                tp_ops->exec_command(hwif, tf->command);
  92                ndelay(400);    /* FIXME */
  93                return pre_task_out_intr(drive, task->rq);
  94        case TASKFILE_MULTI_IN:
  95        case TASKFILE_IN:
  96                handler = task_in_intr;
  97                /* fall-through */
  98        case TASKFILE_NO_DATA:
  99                if (handler == NULL)
 100                        handler = task_no_data_intr;
 101                ide_execute_command(drive, tf->command, handler,
 102                                    WAIT_WORSTCASE, NULL);
 103                return ide_started;
 104        default:
 105                if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
 106                    dma_ops->dma_setup(drive))
 107                        return ide_stopped;
 108                dma_ops->dma_exec_cmd(drive, tf->command);
 109                dma_ops->dma_start(drive);
 110                return ide_started;
 111        }
 112}
 113EXPORT_SYMBOL_GPL(do_rw_taskfile);
 114
 115/*
 116 * Handler for commands without a data phase
 117 */
 118static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 119{
 120        ide_hwif_t *hwif = drive->hwif;
 121        ide_task_t *task = &hwif->task;
 122        struct ide_taskfile *tf = &task->tf;
 123        int custom = (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
 124        int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
 125        u8 stat;
 126
 127        local_irq_enable_in_hardirq();
 128
 129        while (1) {
 130                stat = hwif->tp_ops->read_status(hwif);
 131                if ((stat & ATA_BUSY) == 0 || retries-- == 0)
 132                        break;
 133                udelay(10);
 134        };
 135
 136        if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
 137                if (custom && tf->command == ATA_CMD_SET_MULTI) {
 138                        drive->mult_req = drive->mult_count = 0;
 139                        drive->special.b.recalibrate = 1;
 140                        (void)ide_dump_status(drive, __func__, stat);
 141                        return ide_stopped;
 142                } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
 143                        if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
 144                                ide_set_handler(drive, &task_no_data_intr,
 145                                                WAIT_WORSTCASE, NULL);
 146                                return ide_started;
 147                        }
 148                }
 149                return ide_error(drive, "task_no_data_intr", stat);
 150                /* calls ide_end_drive_cmd */
 151        }
 152
 153        if (!custom)
 154                ide_end_drive_cmd(drive, stat, ide_read_error(drive));
 155        else if (tf->command == ATA_CMD_IDLEIMMEDIATE) {
 156                hwif->tp_ops->tf_read(drive, task);
 157                if (tf->lbal != 0xc4) {
 158                        printk(KERN_ERR "%s: head unload failed!\n",
 159                               drive->name);
 160                        ide_tf_dump(drive->name, tf);
 161                } else
 162                        drive->dev_flags |= IDE_DFLAG_PARKED;
 163                ide_end_drive_cmd(drive, stat, ide_read_error(drive));
 164        } else if (tf->command == ATA_CMD_SET_MULTI)
 165                drive->mult_count = drive->mult_req;
 166
 167        return ide_stopped;
 168}
 169
 170static u8 wait_drive_not_busy(ide_drive_t *drive)
 171{
 172        ide_hwif_t *hwif = drive->hwif;
 173        int retries;
 174        u8 stat;
 175
 176        /*
 177         * Last sector was transfered, wait until device is ready.  This can
 178         * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
 179         */
 180        for (retries = 0; retries < 1000; retries++) {
 181                stat = hwif->tp_ops->read_status(hwif);
 182
 183                if (stat & ATA_BUSY)
 184                        udelay(10);
 185                else
 186                        break;
 187        }
 188
 189        if (stat & ATA_BUSY)
 190                printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
 191
 192        return stat;
 193}
 194
 195static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
 196                           unsigned int write)
 197{
 198        ide_hwif_t *hwif = drive->hwif;
 199        struct scatterlist *sg = hwif->sg_table;
 200        struct scatterlist *cursg = hwif->cursg;
 201        struct page *page;
 202#ifdef CONFIG_HIGHMEM
 203        unsigned long flags;
 204#endif
 205        unsigned int offset;
 206        u8 *buf;
 207
 208        cursg = hwif->cursg;
 209        if (!cursg) {
 210                cursg = sg;
 211                hwif->cursg = sg;
 212        }
 213
 214        page = sg_page(cursg);
 215        offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
 216
 217        /* get the current page and offset */
 218        page = nth_page(page, (offset >> PAGE_SHIFT));
 219        offset %= PAGE_SIZE;
 220
 221#ifdef CONFIG_HIGHMEM
 222        local_irq_save(flags);
 223#endif
 224        buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
 225
 226        hwif->nleft--;
 227        hwif->cursg_ofs++;
 228
 229        if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
 230                hwif->cursg = sg_next(hwif->cursg);
 231                hwif->cursg_ofs = 0;
 232        }
 233
 234        /* do the actual data transfer */
 235        if (write)
 236                hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
 237        else
 238                hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
 239
 240        kunmap_atomic(buf, KM_BIO_SRC_IRQ);
 241#ifdef CONFIG_HIGHMEM
 242        local_irq_restore(flags);
 243#endif
 244}
 245
 246static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
 247                          unsigned int write)
 248{
 249        unsigned int nsect;
 250
 251        nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
 252        while (nsect--)
 253                ide_pio_sector(drive, rq, write);
 254}
 255
 256static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
 257                                     unsigned int write)
 258{
 259        u8 saved_io_32bit = drive->io_32bit;
 260
 261        if (rq->bio)    /* fs request */
 262                rq->errors = 0;
 263
 264        if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
 265                ide_task_t *task = rq->special;
 266
 267                if (task->tf_flags & IDE_TFLAG_IO_16BIT)
 268                        drive->io_32bit = 0;
 269        }
 270
 271        touch_softlockup_watchdog();
 272
 273        switch (drive->hwif->data_phase) {
 274        case TASKFILE_MULTI_IN:
 275        case TASKFILE_MULTI_OUT:
 276                ide_pio_multi(drive, rq, write);
 277                break;
 278        default:
 279                ide_pio_sector(drive, rq, write);
 280                break;
 281        }
 282
 283        drive->io_32bit = saved_io_32bit;
 284}
 285
 286static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
 287                                  const char *s, u8 stat)
 288{
 289        if (rq->bio) {
 290                ide_hwif_t *hwif = drive->hwif;
 291                int sectors = hwif->nsect - hwif->nleft;
 292
 293                switch (hwif->data_phase) {
 294                case TASKFILE_IN:
 295                        if (hwif->nleft)
 296                                break;
 297                        /* fall through */
 298                case TASKFILE_OUT:
 299                        sectors--;
 300                        break;
 301                case TASKFILE_MULTI_IN:
 302                        if (hwif->nleft)
 303                                break;
 304                        /* fall through */
 305                case TASKFILE_MULTI_OUT:
 306                        sectors -= drive->mult_count;
 307                default:
 308                        break;
 309                }
 310
 311                if (sectors > 0) {
 312                        ide_driver_t *drv;
 313
 314                        drv = *(ide_driver_t **)rq->rq_disk->private_data;
 315                        drv->end_request(drive, 1, sectors);
 316                }
 317        }
 318        return ide_error(drive, s, stat);
 319}
 320
 321void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
 322{
 323        if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
 324                u8 err = ide_read_error(drive);
 325
 326                ide_end_drive_cmd(drive, stat, err);
 327                return;
 328        }
 329
 330        if (rq->rq_disk) {
 331                ide_driver_t *drv;
 332
 333                drv = *(ide_driver_t **)rq->rq_disk->private_data;;
 334                drv->end_request(drive, 1, rq->nr_sectors);
 335        } else
 336                ide_end_request(drive, 1, rq->nr_sectors);
 337}
 338
 339/*
 340 * We got an interrupt on a task_in case, but no errors and no DRQ.
 341 *
 342 * It might be a spurious irq (shared irq), but it might be a
 343 * command that had no output.
 344 */
 345static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
 346{
 347        /* Command all done? */
 348        if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) {
 349                task_end_request(drive, rq, stat);
 350                return ide_stopped;
 351        }
 352
 353        /* Assume it was a spurious irq */
 354        ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
 355        return ide_started;
 356}
 357
 358/*
 359 * Handler for command with PIO data-in phase (Read/Read Multiple).
 360 */
 361static ide_startstop_t task_in_intr(ide_drive_t *drive)
 362{
 363        ide_hwif_t *hwif = drive->hwif;
 364        struct request *rq = hwif->hwgroup->rq;
 365        u8 stat = hwif->tp_ops->read_status(hwif);
 366
 367        /* Error? */
 368        if (stat & ATA_ERR)
 369                return task_error(drive, rq, __func__, stat);
 370
 371        /* Didn't want any data? Odd. */
 372        if ((stat & ATA_DRQ) == 0)
 373                return task_in_unexpected(drive, rq, stat);
 374
 375        ide_pio_datablock(drive, rq, 0);
 376
 377        /* Are we done? Check status and finish transfer. */
 378        if (!hwif->nleft) {
 379                stat = wait_drive_not_busy(drive);
 380                if (!OK_STAT(stat, 0, BAD_STAT))
 381                        return task_error(drive, rq, __func__, stat);
 382                task_end_request(drive, rq, stat);
 383                return ide_stopped;
 384        }
 385
 386        /* Still data left to transfer. */
 387        ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
 388
 389        return ide_started;
 390}
 391
 392/*
 393 * Handler for command with PIO data-out phase (Write/Write Multiple).
 394 */
 395static ide_startstop_t task_out_intr (ide_drive_t *drive)
 396{
 397        ide_hwif_t *hwif = drive->hwif;
 398        struct request *rq = HWGROUP(drive)->rq;
 399        u8 stat = hwif->tp_ops->read_status(hwif);
 400
 401        if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
 402                return task_error(drive, rq, __func__, stat);
 403
 404        /* Deal with unexpected ATA data phase. */
 405        if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
 406                return task_error(drive, rq, __func__, stat);
 407
 408        if (!hwif->nleft) {
 409                task_end_request(drive, rq, stat);
 410                return ide_stopped;
 411        }
 412
 413        /* Still data left to transfer. */
 414        ide_pio_datablock(drive, rq, 1);
 415        ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
 416
 417        return ide_started;
 418}
 419
 420static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
 421{
 422        ide_startstop_t startstop;
 423
 424        if (ide_wait_stat(&startstop, drive, ATA_DRQ,
 425                          drive->bad_wstat, WAIT_DRQ)) {
 426                printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
 427                        drive->name, drive->hwif->data_phase ? "MULT" : "",
 428                        (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
 429                return startstop;
 430        }
 431
 432        if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
 433                local_irq_disable();
 434
 435        ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
 436        ide_pio_datablock(drive, rq, 1);
 437
 438        return ide_started;
 439}
 440
 441int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
 442{
 443        struct request *rq;
 444        int error;
 445
 446        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
 447        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 448        rq->buffer = buf;
 449
 450        /*
 451         * (ks) We transfer currently only whole sectors.
 452         * This is suffient for now.  But, it would be great,
 453         * if we would find a solution to transfer any size.
 454         * To support special commands like READ LONG.
 455         */
 456        rq->hard_nr_sectors = rq->nr_sectors = nsect;
 457        rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
 458
 459        if (task->tf_flags & IDE_TFLAG_WRITE)
 460                rq->cmd_flags |= REQ_RW;
 461
 462        rq->special = task;
 463        task->rq = rq;
 464
 465        error = blk_execute_rq(drive->queue, NULL, rq, 0);
 466        blk_put_request(rq);
 467
 468        return error;
 469}
 470
 471EXPORT_SYMBOL(ide_raw_taskfile);
 472
 473int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
 474{
 475        task->data_phase = TASKFILE_NO_DATA;
 476
 477        return ide_raw_taskfile(drive, task, NULL, 0);
 478}
 479EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
 480
 481#ifdef CONFIG_IDE_TASK_IOCTL
 482int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
 483{
 484        ide_task_request_t      *req_task;
 485        ide_task_t              args;
 486        u8 *outbuf              = NULL;
 487        u8 *inbuf               = NULL;
 488        u8 *data_buf            = NULL;
 489        int err                 = 0;
 490        int tasksize            = sizeof(struct ide_task_request_s);
 491        unsigned int taskin     = 0;
 492        unsigned int taskout    = 0;
 493        u16 nsect               = 0;
 494        char __user *buf = (char __user *)arg;
 495
 496//      printk("IDE Taskfile ...\n");
 497
 498        req_task = kzalloc(tasksize, GFP_KERNEL);
 499        if (req_task == NULL) return -ENOMEM;
 500        if (copy_from_user(req_task, buf, tasksize)) {
 501                kfree(req_task);
 502                return -EFAULT;
 503        }
 504
 505        taskout = req_task->out_size;
 506        taskin  = req_task->in_size;
 507        
 508        if (taskin > 65536 || taskout > 65536) {
 509                err = -EINVAL;
 510                goto abort;
 511        }
 512
 513        if (taskout) {
 514                int outtotal = tasksize;
 515                outbuf = kzalloc(taskout, GFP_KERNEL);
 516                if (outbuf == NULL) {
 517                        err = -ENOMEM;
 518                        goto abort;
 519                }
 520                if (copy_from_user(outbuf, buf + outtotal, taskout)) {
 521                        err = -EFAULT;
 522                        goto abort;
 523                }
 524        }
 525
 526        if (taskin) {
 527                int intotal = tasksize + taskout;
 528                inbuf = kzalloc(taskin, GFP_KERNEL);
 529                if (inbuf == NULL) {
 530                        err = -ENOMEM;
 531                        goto abort;
 532                }
 533                if (copy_from_user(inbuf, buf + intotal, taskin)) {
 534                        err = -EFAULT;
 535                        goto abort;
 536                }
 537        }
 538
 539        memset(&args, 0, sizeof(ide_task_t));
 540
 541        memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 542        memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
 543
 544        args.data_phase = req_task->data_phase;
 545
 546        args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
 547                        IDE_TFLAG_IN_TF;
 548        if (drive->dev_flags & IDE_DFLAG_LBA48)
 549                args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
 550
 551        if (req_task->out_flags.all) {
 552                args.tf_flags |= IDE_TFLAG_FLAGGED;
 553
 554                if (req_task->out_flags.b.data)
 555                        args.tf_flags |= IDE_TFLAG_OUT_DATA;
 556
 557                if (req_task->out_flags.b.nsector_hob)
 558                        args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
 559                if (req_task->out_flags.b.sector_hob)
 560                        args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
 561                if (req_task->out_flags.b.lcyl_hob)
 562                        args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
 563                if (req_task->out_flags.b.hcyl_hob)
 564                        args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
 565
 566                if (req_task->out_flags.b.error_feature)
 567                        args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
 568                if (req_task->out_flags.b.nsector)
 569                        args.tf_flags |= IDE_TFLAG_OUT_NSECT;
 570                if (req_task->out_flags.b.sector)
 571                        args.tf_flags |= IDE_TFLAG_OUT_LBAL;
 572                if (req_task->out_flags.b.lcyl)
 573                        args.tf_flags |= IDE_TFLAG_OUT_LBAM;
 574                if (req_task->out_flags.b.hcyl)
 575                        args.tf_flags |= IDE_TFLAG_OUT_LBAH;
 576        } else {
 577                args.tf_flags |= IDE_TFLAG_OUT_TF;
 578                if (args.tf_flags & IDE_TFLAG_LBA48)
 579                        args.tf_flags |= IDE_TFLAG_OUT_HOB;
 580        }
 581
 582        if (req_task->in_flags.b.data)
 583                args.tf_flags |= IDE_TFLAG_IN_DATA;
 584
 585        switch(req_task->data_phase) {
 586                case TASKFILE_MULTI_OUT:
 587                        if (!drive->mult_count) {
 588                                /* (hs): give up if multcount is not set */
 589                                printk(KERN_ERR "%s: %s Multimode Write " \
 590                                        "multcount is not set\n",
 591                                        drive->name, __func__);
 592                                err = -EPERM;
 593                                goto abort;
 594                        }
 595                        /* fall through */
 596                case TASKFILE_OUT:
 597                        /* fall through */
 598                case TASKFILE_OUT_DMAQ:
 599                case TASKFILE_OUT_DMA:
 600                        nsect = taskout / SECTOR_SIZE;
 601                        data_buf = outbuf;
 602                        break;
 603                case TASKFILE_MULTI_IN:
 604                        if (!drive->mult_count) {
 605                                /* (hs): give up if multcount is not set */
 606                                printk(KERN_ERR "%s: %s Multimode Read failure " \
 607                                        "multcount is not set\n",
 608                                        drive->name, __func__);
 609                                err = -EPERM;
 610                                goto abort;
 611                        }
 612                        /* fall through */
 613                case TASKFILE_IN:
 614                        /* fall through */
 615                case TASKFILE_IN_DMAQ:
 616                case TASKFILE_IN_DMA:
 617                        nsect = taskin / SECTOR_SIZE;
 618                        data_buf = inbuf;
 619                        break;
 620                case TASKFILE_NO_DATA:
 621                        break;
 622                default:
 623                        err = -EFAULT;
 624                        goto abort;
 625        }
 626
 627        if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
 628                nsect = 0;
 629        else if (!nsect) {
 630                nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
 631
 632                if (!nsect) {
 633                        printk(KERN_ERR "%s: in/out command without data\n",
 634                                        drive->name);
 635                        err = -EFAULT;
 636                        goto abort;
 637                }
 638        }
 639
 640        if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
 641                args.tf_flags |= IDE_TFLAG_WRITE;
 642
 643        err = ide_raw_taskfile(drive, &args, data_buf, nsect);
 644
 645        memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
 646        memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
 647
 648        if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
 649            req_task->in_flags.all == 0) {
 650                req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
 651                if (drive->dev_flags & IDE_DFLAG_LBA48)
 652                        req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
 653        }
 654
 655        if (copy_to_user(buf, req_task, tasksize)) {
 656                err = -EFAULT;
 657                goto abort;
 658        }
 659        if (taskout) {
 660                int outtotal = tasksize;
 661                if (copy_to_user(buf + outtotal, outbuf, taskout)) {
 662                        err = -EFAULT;
 663                        goto abort;
 664                }
 665        }
 666        if (taskin) {
 667                int intotal = tasksize + taskout;
 668                if (copy_to_user(buf + intotal, inbuf, taskin)) {
 669                        err = -EFAULT;
 670                        goto abort;
 671                }
 672        }
 673abort:
 674        kfree(req_task);
 675        kfree(outbuf);
 676        kfree(inbuf);
 677
 678//      printk("IDE Taskfile ioctl ended. rc = %i\n", err);
 679
 680        return err;
 681}
 682#endif
 683
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.