linux/drivers/ide/ide-taskfile.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
   3 *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
   4 *  Copyright (C) 2001-2002        Klaus Smolin
   5 *                                      IBM Storage Technology Division
   6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  The big the bad and the ugly.
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/sched.h>
  16#include <linux/interrupt.h>
  17#include <linux/errno.h>
  18#include <linux/slab.h>
  19#include <linux/delay.h>
  20#include <linux/hdreg.h>
  21#include <linux/ide.h>
  22#include <linux/scatterlist.h>
  23#include <linux/uaccess.h>
  24
  25#include <asm/io.h>
  26
  27void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
  28{
  29        ide_hwif_t *hwif = drive->hwif;
  30        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  31
  32        /* Be sure we're looking at the low order bytes */
  33        tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
  34
  35        tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
  36
  37        if (cmd->tf_flags & IDE_TFLAG_LBA48) {
  38                tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
  39
  40                tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
  41        }
  42}
  43
  44void ide_tf_dump(const char *s, struct ide_cmd *cmd)
  45{
  46#ifdef DEBUG
  47        printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
  48                "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
  49               s, cmd->tf.feature, cmd->tf.nsect,
  50               cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
  51               cmd->tf.device, cmd->tf.command);
  52        printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
  53               s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
  54#endif
  55}
  56
  57int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf)
  58{
  59        struct ide_cmd cmd;
  60
  61        memset(&cmd, 0, sizeof(cmd));
  62        cmd.tf.nsect = 0x01;
  63        if (drive->media == ide_disk)
  64                cmd.tf.command = ATA_CMD_ID_ATA;
  65        else
  66                cmd.tf.command = ATA_CMD_ID_ATAPI;
  67        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
  68        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
  69        cmd.protocol = ATA_PROT_PIO;
  70
  71        return ide_raw_taskfile(drive, &cmd, buf, 1);
  72}
  73
  74static ide_startstop_t task_no_data_intr(ide_drive_t *);
  75static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
  76static ide_startstop_t task_pio_intr(ide_drive_t *);
  77
  78ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
  79{
  80        ide_hwif_t *hwif = drive->hwif;
  81        struct ide_cmd *cmd = &hwif->cmd;
  82        struct ide_taskfile *tf = &cmd->tf;
  83        ide_handler_t *handler = NULL;
  84        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  85        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
  86
  87        if (orig_cmd->protocol == ATA_PROT_PIO &&
  88            (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
  89            drive->mult_count == 0) {
  90                pr_err("%s: multimode not set!\n", drive->name);
  91                return ide_stopped;
  92        }
  93
  94        if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
  95                orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
  96
  97        memcpy(cmd, orig_cmd, sizeof(*cmd));
  98
  99        if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
 100                ide_tf_dump(drive->name, cmd);
 101                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
 102
 103                if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
 104                        u8 data[2] = { cmd->tf.data, cmd->hob.data };
 105
 106                        tp_ops->output_data(drive, cmd, data, 2);
 107                }
 108
 109                if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
 110                        u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
 111                                  0xE0 : 0xEF;
 112
 113                        if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
 114                                cmd->tf.device &= HIHI;
 115                        cmd->tf.device |= drive->select;
 116                }
 117
 118                tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
 119                tp_ops->tf_load(drive, &cmd->tf,  cmd->valid.out.tf);
 120        }
 121
 122        switch (cmd->protocol) {
 123        case ATA_PROT_PIO:
 124                if (cmd->tf_flags & IDE_TFLAG_WRITE) {
 125                        tp_ops->exec_command(hwif, tf->command);
 126                        ndelay(400);    /* FIXME */
 127                        return pre_task_out_intr(drive, cmd);
 128                }
 129                handler = task_pio_intr;
 130                /* fall-through */
 131        case ATA_PROT_NODATA:
 132                if (handler == NULL)
 133                        handler = task_no_data_intr;
 134                ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE);
 135                return ide_started;
 136        case ATA_PROT_DMA:
 137                if (ide_dma_prepare(drive, cmd))
 138                        return ide_stopped;
 139                hwif->expiry = dma_ops->dma_timer_expiry;
 140                ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
 141                dma_ops->dma_start(drive);
 142        default:
 143                return ide_started;
 144        }
 145}
 146EXPORT_SYMBOL_GPL(do_rw_taskfile);
 147
 148static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 149{
 150        ide_hwif_t *hwif = drive->hwif;
 151        struct ide_cmd *cmd = &hwif->cmd;
 152        struct ide_taskfile *tf = &cmd->tf;
 153        int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
 154        int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
 155        u8 stat;
 156
 157        local_irq_enable_in_hardirq();
 158
 159        while (1) {
 160                stat = hwif->tp_ops->read_status(hwif);
 161                if ((stat & ATA_BUSY) == 0 || retries-- == 0)
 162                        break;
 163                udelay(10);
 164        };
 165
 166        if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
 167                if (custom && tf->command == ATA_CMD_SET_MULTI) {
 168                        drive->mult_req = drive->mult_count = 0;
 169                        drive->special_flags |= IDE_SFLAG_RECALIBRATE;
 170                        (void)ide_dump_status(drive, __func__, stat);
 171                        return ide_stopped;
 172                } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
 173                        if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
 174                                ide_set_handler(drive, &task_no_data_intr,
 175                                                WAIT_WORSTCASE);
 176                                return ide_started;
 177                        }
 178                }
 179                return ide_error(drive, "task_no_data_intr", stat);
 180        }
 181
 182        if (custom && tf->command == ATA_CMD_SET_MULTI)
 183                drive->mult_count = drive->mult_req;
 184
 185        if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
 186            tf->command == ATA_CMD_CHK_POWER) {
 187                struct request *rq = hwif->rq;
 188
 189                if (blk_pm_request(rq))
 190                        ide_complete_pm_rq(drive, rq);
 191                else
 192                        ide_finish_cmd(drive, cmd, stat);
 193        }
 194
 195        return ide_stopped;
 196}
 197
 198static u8 wait_drive_not_busy(ide_drive_t *drive)
 199{
 200        ide_hwif_t *hwif = drive->hwif;
 201        int retries;
 202        u8 stat;
 203
 204        /*
 205         * Last sector was transferred, wait until device is ready.  This can
 206         * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
 207         */
 208        for (retries = 0; retries < 1000; retries++) {
 209                stat = hwif->tp_ops->read_status(hwif);
 210
 211                if (stat & ATA_BUSY)
 212                        udelay(10);
 213                else
 214                        break;
 215        }
 216
 217        if (stat & ATA_BUSY)
 218                pr_err("%s: drive still BUSY!\n", drive->name);
 219
 220        return stat;
 221}
 222
 223void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
 224                   unsigned int write, unsigned int len)
 225{
 226        ide_hwif_t *hwif = drive->hwif;
 227        struct scatterlist *sg = hwif->sg_table;
 228        struct scatterlist *cursg = cmd->cursg;
 229        unsigned long uninitialized_var(flags);
 230        struct page *page;
 231        unsigned int offset;
 232        u8 *buf;
 233
 234        cursg = cmd->cursg;
 235        if (cursg == NULL)
 236                cursg = cmd->cursg = sg;
 237
 238        while (len) {
 239                unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
 240                int page_is_high;
 241
 242                if (nr_bytes > PAGE_SIZE)
 243                        nr_bytes = PAGE_SIZE;
 244
 245                page = sg_page(cursg);
 246                offset = cursg->offset + cmd->cursg_ofs;
 247
 248                /* get the current page and offset */
 249                page = nth_page(page, (offset >> PAGE_SHIFT));
 250                offset %= PAGE_SIZE;
 251
 252                page_is_high = PageHighMem(page);
 253                if (page_is_high)
 254                        local_irq_save(flags);
 255
 256                buf = kmap_atomic(page) + offset;
 257
 258                cmd->nleft -= nr_bytes;
 259                cmd->cursg_ofs += nr_bytes;
 260
 261                if (cmd->cursg_ofs == cursg->length) {
 262                        cursg = cmd->cursg = sg_next(cmd->cursg);
 263                        cmd->cursg_ofs = 0;
 264                }
 265
 266                /* do the actual data transfer */
 267                if (write)
 268                        hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
 269                else
 270                        hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
 271
 272                kunmap_atomic(buf);
 273
 274                if (page_is_high)
 275                        local_irq_restore(flags);
 276
 277                len -= nr_bytes;
 278        }
 279}
 280EXPORT_SYMBOL_GPL(ide_pio_bytes);
 281
 282static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
 283                              unsigned int write)
 284{
 285        unsigned int nr_bytes;
 286
 287        u8 saved_io_32bit = drive->io_32bit;
 288
 289        if (cmd->tf_flags & IDE_TFLAG_FS)
 290                cmd->rq->errors = 0;
 291
 292        if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
 293                drive->io_32bit = 0;
 294
 295        touch_softlockup_watchdog();
 296
 297        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 298                nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
 299        else
 300                nr_bytes = SECTOR_SIZE;
 301
 302        ide_pio_bytes(drive, cmd, write, nr_bytes);
 303
 304        drive->io_32bit = saved_io_32bit;
 305}
 306
 307static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 308{
 309        if (cmd->tf_flags & IDE_TFLAG_FS) {
 310                int nr_bytes = cmd->nbytes - cmd->nleft;
 311
 312                if (cmd->protocol == ATA_PROT_PIO &&
 313                    ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
 314                        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 315                                nr_bytes -= drive->mult_count << 9;
 316                        else
 317                                nr_bytes -= SECTOR_SIZE;
 318                }
 319
 320                if (nr_bytes > 0)
 321                        ide_complete_rq(drive, 0, nr_bytes);
 322        }
 323}
 324
 325void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
 326{
 327        struct request *rq = drive->hwif->rq;
 328        u8 err = ide_read_error(drive), nsect = cmd->tf.nsect;
 329        u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER);
 330
 331        ide_complete_cmd(drive, cmd, stat, err);
 332        rq->errors = err;
 333
 334        if (err == 0 && set_xfer) {
 335                ide_set_xfer_rate(drive, nsect);
 336                ide_driveid_update(drive);
 337        }
 338
 339        ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
 340}
 341
 342/*
 343 * Handler for command with PIO data phase.
 344 */
 345static ide_startstop_t task_pio_intr(ide_drive_t *drive)
 346{
 347        ide_hwif_t *hwif = drive->hwif;
 348        struct ide_cmd *cmd = &drive->hwif->cmd;
 349        u8 stat = hwif->tp_ops->read_status(hwif);
 350        u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
 351
 352        if (write == 0) {
 353                /* Error? */
 354                if (stat & ATA_ERR)
 355                        goto out_err;
 356
 357                /* Didn't want any data? Odd. */
 358                if ((stat & ATA_DRQ) == 0) {
 359                        /* Command all done? */
 360                        if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
 361                                goto out_end;
 362
 363                        /* Assume it was a spurious irq */
 364                        goto out_wait;
 365                }
 366        } else {
 367                if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
 368                        goto out_err;
 369
 370                /* Deal with unexpected ATA data phase. */
 371                if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
 372                        goto out_err;
 373        }
 374
 375        if (write && cmd->nleft == 0)
 376                goto out_end;
 377
 378        /* Still data left to transfer. */
 379        ide_pio_datablock(drive, cmd, write);
 380
 381        /* Are we done? Check status and finish transfer. */
 382        if (write == 0 && cmd->nleft == 0) {
 383                stat = wait_drive_not_busy(drive);
 384                if (!OK_STAT(stat, 0, BAD_STAT))
 385                        goto out_err;
 386
 387                goto out_end;
 388        }
 389out_wait:
 390        /* Still data left to transfer. */
 391        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 392        return ide_started;
 393out_end:
 394        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 395                ide_finish_cmd(drive, cmd, stat);
 396        else
 397                ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
 398        return ide_stopped;
 399out_err:
 400        ide_error_cmd(drive, cmd);
 401        return ide_error(drive, __func__, stat);
 402}
 403
 404static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
 405                                         struct ide_cmd *cmd)
 406{
 407        ide_startstop_t startstop;
 408
 409        if (ide_wait_stat(&startstop, drive, ATA_DRQ,
 410                          drive->bad_wstat, WAIT_DRQ)) {
 411                pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name,
 412                        (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
 413                        (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
 414                return startstop;
 415        }
 416
 417        if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
 418                local_irq_disable();
 419
 420        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 421
 422        ide_pio_datablock(drive, cmd, 1);
 423
 424        return ide_started;
 425}
 426
 427int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 428                     u16 nsect)
 429{
 430        struct request *rq;
 431        int error;
 432        int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 433
 434        rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
 435        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 436
 437        /*
 438         * (ks) We transfer currently only whole sectors.
 439         * This is suffient for now.  But, it would be great,
 440         * if we would find a solution to transfer any size.
 441         * To support special commands like READ LONG.
 442         */
 443        if (nsect) {
 444                error = blk_rq_map_kern(drive->queue, rq, buf,
 445                                        nsect * SECTOR_SIZE, __GFP_WAIT);
 446                if (error)
 447                        goto put_req;
 448        }
 449
 450        rq->special = cmd;
 451        cmd->rq = rq;
 452
 453        error = blk_execute_rq(drive->queue, NULL, rq, 0);
 454
 455put_req:
 456        blk_put_request(rq);
 457        return error;
 458}
 459EXPORT_SYMBOL(ide_raw_taskfile);
 460
 461int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
 462{
 463        cmd->protocol = ATA_PROT_NODATA;
 464
 465        return ide_raw_taskfile(drive, cmd, NULL, 0);
 466}
 467EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
 468
 469#ifdef CONFIG_IDE_TASK_IOCTL
 470int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
 471{
 472        ide_task_request_t      *req_task;
 473        struct ide_cmd          cmd;
 474        u8 *outbuf              = NULL;
 475        u8 *inbuf               = NULL;
 476        u8 *data_buf            = NULL;
 477        int err                 = 0;
 478        int tasksize            = sizeof(struct ide_task_request_s);
 479        unsigned int taskin     = 0;
 480        unsigned int taskout    = 0;
 481        u16 nsect               = 0;
 482        char __user *buf = (char __user *)arg;
 483
 484        req_task = memdup_user(buf, tasksize);
 485        if (IS_ERR(req_task))
 486                return PTR_ERR(req_task);
 487
 488        taskout = req_task->out_size;
 489        taskin  = req_task->in_size;
 490
 491        if (taskin > 65536 || taskout > 65536) {
 492                err = -EINVAL;
 493                goto abort;
 494        }
 495
 496        if (taskout) {
 497                int outtotal = tasksize;
 498                outbuf = kzalloc(taskout, GFP_KERNEL);
 499                if (outbuf == NULL) {
 500                        err = -ENOMEM;
 501                        goto abort;
 502                }
 503                if (copy_from_user(outbuf, buf + outtotal, taskout)) {
 504                        err = -EFAULT;
 505                        goto abort;
 506                }
 507        }
 508
 509        if (taskin) {
 510                int intotal = tasksize + taskout;
 511                inbuf = kzalloc(taskin, GFP_KERNEL);
 512                if (inbuf == NULL) {
 513                        err = -ENOMEM;
 514                        goto abort;
 515                }
 516                if (copy_from_user(inbuf, buf + intotal, taskin)) {
 517                        err = -EFAULT;
 518                        goto abort;
 519                }
 520        }
 521
 522        memset(&cmd, 0, sizeof(cmd));
 523
 524        memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 525        memcpy(&cmd.tf,  req_task->io_ports,  HDIO_DRIVE_TASK_HDR_SIZE);
 526
 527        cmd.valid.out.tf = IDE_VALID_DEVICE;
 528        cmd.valid.in.tf  = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
 529        cmd.tf_flags = IDE_TFLAG_IO_16BIT;
 530
 531        if (drive->dev_flags & IDE_DFLAG_LBA48) {
 532                cmd.tf_flags |= IDE_TFLAG_LBA48;
 533                cmd.valid.in.hob = IDE_VALID_IN_HOB;
 534        }
 535
 536        if (req_task->out_flags.all) {
 537                cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
 538
 539                if (req_task->out_flags.b.data)
 540                        cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
 541
 542                if (req_task->out_flags.b.nsector_hob)
 543                        cmd.valid.out.hob |= IDE_VALID_NSECT;
 544                if (req_task->out_flags.b.sector_hob)
 545                        cmd.valid.out.hob |= IDE_VALID_LBAL;
 546                if (req_task->out_flags.b.lcyl_hob)
 547                        cmd.valid.out.hob |= IDE_VALID_LBAM;
 548                if (req_task->out_flags.b.hcyl_hob)
 549                        cmd.valid.out.hob |= IDE_VALID_LBAH;
 550
 551                if (req_task->out_flags.b.error_feature)
 552                        cmd.valid.out.tf  |= IDE_VALID_FEATURE;
 553                if (req_task->out_flags.b.nsector)
 554                        cmd.valid.out.tf  |= IDE_VALID_NSECT;
 555                if (req_task->out_flags.b.sector)
 556                        cmd.valid.out.tf  |= IDE_VALID_LBAL;
 557                if (req_task->out_flags.b.lcyl)
 558                        cmd.valid.out.tf  |= IDE_VALID_LBAM;
 559                if (req_task->out_flags.b.hcyl)
 560                        cmd.valid.out.tf  |= IDE_VALID_LBAH;
 561        } else {
 562                cmd.valid.out.tf |= IDE_VALID_OUT_TF;
 563                if (cmd.tf_flags & IDE_TFLAG_LBA48)
 564                        cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
 565        }
 566
 567        if (req_task->in_flags.b.data)
 568                cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
 569
 570        if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
 571                /* fixup data phase if needed */
 572                if (req_task->data_phase == TASKFILE_IN_DMAQ ||
 573                    req_task->data_phase == TASKFILE_IN_DMA)
 574                        cmd.tf_flags |= IDE_TFLAG_WRITE;
 575        }
 576
 577        cmd.protocol = ATA_PROT_DMA;
 578
 579        switch (req_task->data_phase) {
 580        case TASKFILE_MULTI_OUT:
 581                if (!drive->mult_count) {
 582                        /* (hs): give up if multcount is not set */
 583                        pr_err("%s: %s Multimode Write multcount is not set\n",
 584                                drive->name, __func__);
 585                        err = -EPERM;
 586                        goto abort;
 587                }
 588                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 589                /* fall through */
 590        case TASKFILE_OUT:
 591                cmd.protocol = ATA_PROT_PIO;
 592                /* fall through */
 593        case TASKFILE_OUT_DMAQ:
 594        case TASKFILE_OUT_DMA:
 595                cmd.tf_flags |= IDE_TFLAG_WRITE;
 596                nsect = taskout / SECTOR_SIZE;
 597                data_buf = outbuf;
 598                break;
 599        case TASKFILE_MULTI_IN:
 600                if (!drive->mult_count) {
 601                        /* (hs): give up if multcount is not set */
 602                        pr_err("%s: %s Multimode Read multcount is not set\n",
 603                                drive->name, __func__);
 604                        err = -EPERM;
 605                        goto abort;
 606                }
 607                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 608                /* fall through */
 609        case TASKFILE_IN:
 610                cmd.protocol = ATA_PROT_PIO;
 611                /* fall through */
 612        case TASKFILE_IN_DMAQ:
 613        case TASKFILE_IN_DMA:
 614                nsect = taskin / SECTOR_SIZE;
 615                data_buf = inbuf;
 616                break;
 617        case TASKFILE_NO_DATA:
 618                cmd.protocol = ATA_PROT_NODATA;
 619                break;
 620        default:
 621                err = -EFAULT;
 622                goto abort;
 623        }
 624
 625        if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
 626                nsect = 0;
 627        else if (!nsect) {
 628                nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
 629
 630                if (!nsect) {
 631                        pr_err("%s: in/out command without data\n",
 632                                        drive->name);
 633                        err = -EFAULT;
 634                        goto abort;
 635                }
 636        }
 637
 638        err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
 639
 640        memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 641        memcpy(req_task->io_ports,  &cmd.tf,  HDIO_DRIVE_TASK_HDR_SIZE);
 642
 643        if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
 644            req_task->in_flags.all == 0) {
 645                req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
 646                if (drive->dev_flags & IDE_DFLAG_LBA48)
 647                        req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
 648        }
 649
 650        if (copy_to_user(buf, req_task, tasksize)) {
 651                err = -EFAULT;
 652                goto abort;
 653        }
 654        if (taskout) {
 655                int outtotal = tasksize;
 656                if (copy_to_user(buf + outtotal, outbuf, taskout)) {
 657                        err = -EFAULT;
 658                        goto abort;
 659                }
 660        }
 661        if (taskin) {
 662                int intotal = tasksize + taskout;
 663                if (copy_to_user(buf + intotal, inbuf, taskin)) {
 664                        err = -EFAULT;
 665                        goto abort;
 666                }
 667        }
 668abort:
 669        kfree(req_task);
 670        kfree(outbuf);
 671        kfree(inbuf);
 672
 673        return err;
 674}
 675#endif
 676
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.