linux/drivers/ide/ide-taskfile.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
   3 *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
   4 *  Copyright (C) 2001-2002        Klaus Smolin
   5 *                                      IBM Storage Technology Division
   6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  The big the bad and the ugly.
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/sched.h>
  16#include <linux/interrupt.h>
  17#include <linux/errno.h>
  18#include <linux/slab.h>
  19#include <linux/delay.h>
  20#include <linux/hdreg.h>
  21#include <linux/ide.h>
  22#include <linux/scatterlist.h>
  23#include <linux/uaccess.h>
  24
  25#include <asm/io.h>
  26
  27void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
  28{
  29        ide_hwif_t *hwif = drive->hwif;
  30        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  31
  32        /* Be sure we're looking at the low order bytes */
  33        tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
  34
  35        tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
  36
  37        if (cmd->tf_flags & IDE_TFLAG_LBA48) {
  38                tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
  39
  40                tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
  41        }
  42}
  43
  44void ide_tf_dump(const char *s, struct ide_cmd *cmd)
  45{
  46#ifdef DEBUG
  47        printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
  48                "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
  49               s, cmd->tf.feature, cmd->tf.nsect,
  50               cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
  51               cmd->tf.device, cmd->tf.command);
  52        printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
  53               s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
  54#endif
  55}
  56
  57int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf)
  58{
  59        struct ide_cmd cmd;
  60
  61        memset(&cmd, 0, sizeof(cmd));
  62        cmd.tf.nsect = 0x01;
  63        if (drive->media == ide_disk)
  64                cmd.tf.command = ATA_CMD_ID_ATA;
  65        else
  66                cmd.tf.command = ATA_CMD_ID_ATAPI;
  67        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
  68        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
  69        cmd.protocol = ATA_PROT_PIO;
  70
  71        return ide_raw_taskfile(drive, &cmd, buf, 1);
  72}
  73
  74static ide_startstop_t task_no_data_intr(ide_drive_t *);
  75static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
  76static ide_startstop_t task_pio_intr(ide_drive_t *);
  77
  78ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
  79{
  80        ide_hwif_t *hwif = drive->hwif;
  81        struct ide_cmd *cmd = &hwif->cmd;
  82        struct ide_taskfile *tf = &cmd->tf;
  83        ide_handler_t *handler = NULL;
  84        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  85        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
  86
  87        if (orig_cmd->protocol == ATA_PROT_PIO &&
  88            (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
  89            drive->mult_count == 0) {
  90                pr_err("%s: multimode not set!\n", drive->name);
  91                return ide_stopped;
  92        }
  93
  94        if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
  95                orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
  96
  97        memcpy(cmd, orig_cmd, sizeof(*cmd));
  98
  99        if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
 100                ide_tf_dump(drive->name, cmd);
 101                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
 102
 103                if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
 104                        u8 data[2] = { cmd->tf.data, cmd->hob.data };
 105
 106                        tp_ops->output_data(drive, cmd, data, 2);
 107                }
 108
 109                if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
 110                        u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
 111                                  0xE0 : 0xEF;
 112
 113                        if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
 114                                cmd->tf.device &= HIHI;
 115                        cmd->tf.device |= drive->select;
 116                }
 117
 118                tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
 119                tp_ops->tf_load(drive, &cmd->tf,  cmd->valid.out.tf);
 120        }
 121
 122        switch (cmd->protocol) {
 123        case ATA_PROT_PIO:
 124                if (cmd->tf_flags & IDE_TFLAG_WRITE) {
 125                        tp_ops->exec_command(hwif, tf->command);
 126                        ndelay(400);    /* FIXME */
 127                        return pre_task_out_intr(drive, cmd);
 128                }
 129                handler = task_pio_intr;
 130                /* fall-through */
 131        case ATA_PROT_NODATA:
 132                if (handler == NULL)
 133                        handler = task_no_data_intr;
 134                ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE);
 135                return ide_started;
 136        case ATA_PROT_DMA:
 137                if (ide_dma_prepare(drive, cmd))
 138                        return ide_stopped;
 139                hwif->expiry = dma_ops->dma_timer_expiry;
 140                ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
 141                dma_ops->dma_start(drive);
 142        default:
 143                return ide_started;
 144        }
 145}
 146EXPORT_SYMBOL_GPL(do_rw_taskfile);
 147
 148static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 149{
 150        ide_hwif_t *hwif = drive->hwif;
 151        struct ide_cmd *cmd = &hwif->cmd;
 152        struct ide_taskfile *tf = &cmd->tf;
 153        int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
 154        int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
 155        u8 stat;
 156
 157        local_irq_enable_in_hardirq();
 158
 159        while (1) {
 160                stat = hwif->tp_ops->read_status(hwif);
 161                if ((stat & ATA_BUSY) == 0 || retries-- == 0)
 162                        break;
 163                udelay(10);
 164        };
 165
 166        if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
 167                if (custom && tf->command == ATA_CMD_SET_MULTI) {
 168                        drive->mult_req = drive->mult_count = 0;
 169                        drive->special_flags |= IDE_SFLAG_RECALIBRATE;
 170                        (void)ide_dump_status(drive, __func__, stat);
 171                        return ide_stopped;
 172                } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
 173                        if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
 174                                ide_set_handler(drive, &task_no_data_intr,
 175                                                WAIT_WORSTCASE);
 176                                return ide_started;
 177                        }
 178                }
 179                return ide_error(drive, "task_no_data_intr", stat);
 180        }
 181
 182        if (custom && tf->command == ATA_CMD_SET_MULTI)
 183                drive->mult_count = drive->mult_req;
 184
 185        if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
 186            tf->command == ATA_CMD_CHK_POWER) {
 187                struct request *rq = hwif->rq;
 188
 189                if (blk_pm_request(rq))
 190                        ide_complete_pm_rq(drive, rq);
 191                else
 192                        ide_finish_cmd(drive, cmd, stat);
 193        }
 194
 195        return ide_stopped;
 196}
 197
 198static u8 wait_drive_not_busy(ide_drive_t *drive)
 199{
 200        ide_hwif_t *hwif = drive->hwif;
 201        int retries;
 202        u8 stat;
 203
 204        /*
 205         * Last sector was transferred, wait until device is ready.  This can
 206         * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
 207         */
 208        for (retries = 0; retries < 1000; retries++) {
 209                stat = hwif->tp_ops->read_status(hwif);
 210
 211                if (stat & ATA_BUSY)
 212                        udelay(10);
 213                else
 214                        break;
 215        }
 216
 217        if (stat & ATA_BUSY)
 218                pr_err("%s: drive still BUSY!\n", drive->name);
 219
 220        return stat;
 221}
 222
 223void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
 224                   unsigned int write, unsigned int len)
 225{
 226        ide_hwif_t *hwif = drive->hwif;
 227        struct scatterlist *sg = hwif->sg_table;
 228        struct scatterlist *cursg = cmd->cursg;
 229        unsigned long uninitialized_var(flags);
 230        struct page *page;
 231        unsigned int offset;
 232        u8 *buf;
 233
 234        cursg = cmd->cursg;
 235        if (cursg == NULL)
 236                cursg = cmd->cursg = sg;
 237
 238        while (len) {
 239                unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
 240                int page_is_high;
 241
 242                page = sg_page(cursg);
 243                offset = cursg->offset + cmd->cursg_ofs;
 244
 245                /* get the current page and offset */
 246                page = nth_page(page, (offset >> PAGE_SHIFT));
 247                offset %= PAGE_SIZE;
 248
 249                nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
 250
 251                page_is_high = PageHighMem(page);
 252                if (page_is_high)
 253                        local_irq_save(flags);
 254
 255                buf = kmap_atomic(page) + offset;
 256
 257                cmd->nleft -= nr_bytes;
 258                cmd->cursg_ofs += nr_bytes;
 259
 260                if (cmd->cursg_ofs == cursg->length) {
 261                        cursg = cmd->cursg = sg_next(cmd->cursg);
 262                        cmd->cursg_ofs = 0;
 263                }
 264
 265                /* do the actual data transfer */
 266                if (write)
 267                        hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
 268                else
 269                        hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
 270
 271                kunmap_atomic(buf);
 272
 273                if (page_is_high)
 274                        local_irq_restore(flags);
 275
 276                len -= nr_bytes;
 277        }
 278}
 279EXPORT_SYMBOL_GPL(ide_pio_bytes);
 280
 281static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
 282                              unsigned int write)
 283{
 284        unsigned int nr_bytes;
 285
 286        u8 saved_io_32bit = drive->io_32bit;
 287
 288        if (cmd->tf_flags & IDE_TFLAG_FS)
 289                cmd->rq->errors = 0;
 290
 291        if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
 292                drive->io_32bit = 0;
 293
 294        touch_softlockup_watchdog();
 295
 296        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 297                nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
 298        else
 299                nr_bytes = SECTOR_SIZE;
 300
 301        ide_pio_bytes(drive, cmd, write, nr_bytes);
 302
 303        drive->io_32bit = saved_io_32bit;
 304}
 305
 306static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 307{
 308        if (cmd->tf_flags & IDE_TFLAG_FS) {
 309                int nr_bytes = cmd->nbytes - cmd->nleft;
 310
 311                if (cmd->protocol == ATA_PROT_PIO &&
 312                    ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
 313                        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 314                                nr_bytes -= drive->mult_count << 9;
 315                        else
 316                                nr_bytes -= SECTOR_SIZE;
 317                }
 318
 319                if (nr_bytes > 0)
 320                        ide_complete_rq(drive, 0, nr_bytes);
 321        }
 322}
 323
 324void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
 325{
 326        struct request *rq = drive->hwif->rq;
 327        u8 err = ide_read_error(drive), nsect = cmd->tf.nsect;
 328        u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER);
 329
 330        ide_complete_cmd(drive, cmd, stat, err);
 331        rq->errors = err;
 332
 333        if (err == 0 && set_xfer) {
 334                ide_set_xfer_rate(drive, nsect);
 335                ide_driveid_update(drive);
 336        }
 337
 338        ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
 339}
 340
 341/*
 342 * Handler for command with PIO data phase.
 343 */
 344static ide_startstop_t task_pio_intr(ide_drive_t *drive)
 345{
 346        ide_hwif_t *hwif = drive->hwif;
 347        struct ide_cmd *cmd = &drive->hwif->cmd;
 348        u8 stat = hwif->tp_ops->read_status(hwif);
 349        u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
 350
 351        if (write == 0) {
 352                /* Error? */
 353                if (stat & ATA_ERR)
 354                        goto out_err;
 355
 356                /* Didn't want any data? Odd. */
 357                if ((stat & ATA_DRQ) == 0) {
 358                        /* Command all done? */
 359                        if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
 360                                goto out_end;
 361
 362                        /* Assume it was a spurious irq */
 363                        goto out_wait;
 364                }
 365        } else {
 366                if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
 367                        goto out_err;
 368
 369                /* Deal with unexpected ATA data phase. */
 370                if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
 371                        goto out_err;
 372        }
 373
 374        if (write && cmd->nleft == 0)
 375                goto out_end;
 376
 377        /* Still data left to transfer. */
 378        ide_pio_datablock(drive, cmd, write);
 379
 380        /* Are we done? Check status and finish transfer. */
 381        if (write == 0 && cmd->nleft == 0) {
 382                stat = wait_drive_not_busy(drive);
 383                if (!OK_STAT(stat, 0, BAD_STAT))
 384                        goto out_err;
 385
 386                goto out_end;
 387        }
 388out_wait:
 389        /* Still data left to transfer. */
 390        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 391        return ide_started;
 392out_end:
 393        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 394                ide_finish_cmd(drive, cmd, stat);
 395        else
 396                ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
 397        return ide_stopped;
 398out_err:
 399        ide_error_cmd(drive, cmd);
 400        return ide_error(drive, __func__, stat);
 401}
 402
 403static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
 404                                         struct ide_cmd *cmd)
 405{
 406        ide_startstop_t startstop;
 407
 408        if (ide_wait_stat(&startstop, drive, ATA_DRQ,
 409                          drive->bad_wstat, WAIT_DRQ)) {
 410                pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name,
 411                        (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
 412                        (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
 413                return startstop;
 414        }
 415
 416        if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
 417                local_irq_disable();
 418
 419        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 420
 421        ide_pio_datablock(drive, cmd, 1);
 422
 423        return ide_started;
 424}
 425
 426int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 427                     u16 nsect)
 428{
 429        struct request *rq;
 430        int error;
 431        int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 432
 433        rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
 434        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 435
 436        /*
 437         * (ks) We transfer currently only whole sectors.
 438         * This is suffient for now.  But, it would be great,
 439         * if we would find a solution to transfer any size.
 440         * To support special commands like READ LONG.
 441         */
 442        if (nsect) {
 443                error = blk_rq_map_kern(drive->queue, rq, buf,
 444                                        nsect * SECTOR_SIZE, __GFP_WAIT);
 445                if (error)
 446                        goto put_req;
 447        }
 448
 449        rq->special = cmd;
 450        cmd->rq = rq;
 451
 452        error = blk_execute_rq(drive->queue, NULL, rq, 0);
 453
 454put_req:
 455        blk_put_request(rq);
 456        return error;
 457}
 458EXPORT_SYMBOL(ide_raw_taskfile);
 459
 460int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
 461{
 462        cmd->protocol = ATA_PROT_NODATA;
 463
 464        return ide_raw_taskfile(drive, cmd, NULL, 0);
 465}
 466EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
 467
 468#ifdef CONFIG_IDE_TASK_IOCTL
 469int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
 470{
 471        ide_task_request_t      *req_task;
 472        struct ide_cmd          cmd;
 473        u8 *outbuf              = NULL;
 474        u8 *inbuf               = NULL;
 475        u8 *data_buf            = NULL;
 476        int err                 = 0;
 477        int tasksize            = sizeof(struct ide_task_request_s);
 478        unsigned int taskin     = 0;
 479        unsigned int taskout    = 0;
 480        u16 nsect               = 0;
 481        char __user *buf = (char __user *)arg;
 482
 483        req_task = memdup_user(buf, tasksize);
 484        if (IS_ERR(req_task))
 485                return PTR_ERR(req_task);
 486
 487        taskout = req_task->out_size;
 488        taskin  = req_task->in_size;
 489
 490        if (taskin > 65536 || taskout > 65536) {
 491                err = -EINVAL;
 492                goto abort;
 493        }
 494
 495        if (taskout) {
 496                int outtotal = tasksize;
 497                outbuf = kzalloc(taskout, GFP_KERNEL);
 498                if (outbuf == NULL) {
 499                        err = -ENOMEM;
 500                        goto abort;
 501                }
 502                if (copy_from_user(outbuf, buf + outtotal, taskout)) {
 503                        err = -EFAULT;
 504                        goto abort;
 505                }
 506        }
 507
 508        if (taskin) {
 509                int intotal = tasksize + taskout;
 510                inbuf = kzalloc(taskin, GFP_KERNEL);
 511                if (inbuf == NULL) {
 512                        err = -ENOMEM;
 513                        goto abort;
 514                }
 515                if (copy_from_user(inbuf, buf + intotal, taskin)) {
 516                        err = -EFAULT;
 517                        goto abort;
 518                }
 519        }
 520
 521        memset(&cmd, 0, sizeof(cmd));
 522
 523        memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 524        memcpy(&cmd.tf,  req_task->io_ports,  HDIO_DRIVE_TASK_HDR_SIZE);
 525
 526        cmd.valid.out.tf = IDE_VALID_DEVICE;
 527        cmd.valid.in.tf  = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
 528        cmd.tf_flags = IDE_TFLAG_IO_16BIT;
 529
 530        if (drive->dev_flags & IDE_DFLAG_LBA48) {
 531                cmd.tf_flags |= IDE_TFLAG_LBA48;
 532                cmd.valid.in.hob = IDE_VALID_IN_HOB;
 533        }
 534
 535        if (req_task->out_flags.all) {
 536                cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
 537
 538                if (req_task->out_flags.b.data)
 539                        cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
 540
 541                if (req_task->out_flags.b.nsector_hob)
 542                        cmd.valid.out.hob |= IDE_VALID_NSECT;
 543                if (req_task->out_flags.b.sector_hob)
 544                        cmd.valid.out.hob |= IDE_VALID_LBAL;
 545                if (req_task->out_flags.b.lcyl_hob)
 546                        cmd.valid.out.hob |= IDE_VALID_LBAM;
 547                if (req_task->out_flags.b.hcyl_hob)
 548                        cmd.valid.out.hob |= IDE_VALID_LBAH;
 549
 550                if (req_task->out_flags.b.error_feature)
 551                        cmd.valid.out.tf  |= IDE_VALID_FEATURE;
 552                if (req_task->out_flags.b.nsector)
 553                        cmd.valid.out.tf  |= IDE_VALID_NSECT;
 554                if (req_task->out_flags.b.sector)
 555                        cmd.valid.out.tf  |= IDE_VALID_LBAL;
 556                if (req_task->out_flags.b.lcyl)
 557                        cmd.valid.out.tf  |= IDE_VALID_LBAM;
 558                if (req_task->out_flags.b.hcyl)
 559                        cmd.valid.out.tf  |= IDE_VALID_LBAH;
 560        } else {
 561                cmd.valid.out.tf |= IDE_VALID_OUT_TF;
 562                if (cmd.tf_flags & IDE_TFLAG_LBA48)
 563                        cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
 564        }
 565
 566        if (req_task->in_flags.b.data)
 567                cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
 568
 569        if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
 570                /* fixup data phase if needed */
 571                if (req_task->data_phase == TASKFILE_IN_DMAQ ||
 572                    req_task->data_phase == TASKFILE_IN_DMA)
 573                        cmd.tf_flags |= IDE_TFLAG_WRITE;
 574        }
 575
 576        cmd.protocol = ATA_PROT_DMA;
 577
 578        switch (req_task->data_phase) {
 579        case TASKFILE_MULTI_OUT:
 580                if (!drive->mult_count) {
 581                        /* (hs): give up if multcount is not set */
 582                        pr_err("%s: %s Multimode Write multcount is not set\n",
 583                                drive->name, __func__);
 584                        err = -EPERM;
 585                        goto abort;
 586                }
 587                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 588                /* fall through */
 589        case TASKFILE_OUT:
 590                cmd.protocol = ATA_PROT_PIO;
 591                /* fall through */
 592        case TASKFILE_OUT_DMAQ:
 593        case TASKFILE_OUT_DMA:
 594                cmd.tf_flags |= IDE_TFLAG_WRITE;
 595                nsect = taskout / SECTOR_SIZE;
 596                data_buf = outbuf;
 597                break;
 598        case TASKFILE_MULTI_IN:
 599                if (!drive->mult_count) {
 600                        /* (hs): give up if multcount is not set */
 601                        pr_err("%s: %s Multimode Read multcount is not set\n",
 602                                drive->name, __func__);
 603                        err = -EPERM;
 604                        goto abort;
 605                }
 606                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 607                /* fall through */
 608        case TASKFILE_IN:
 609                cmd.protocol = ATA_PROT_PIO;
 610                /* fall through */
 611        case TASKFILE_IN_DMAQ:
 612        case TASKFILE_IN_DMA:
 613                nsect = taskin / SECTOR_SIZE;
 614                data_buf = inbuf;
 615                break;
 616        case TASKFILE_NO_DATA:
 617                cmd.protocol = ATA_PROT_NODATA;
 618                break;
 619        default:
 620                err = -EFAULT;
 621                goto abort;
 622        }
 623
 624        if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
 625                nsect = 0;
 626        else if (!nsect) {
 627                nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
 628
 629                if (!nsect) {
 630                        pr_err("%s: in/out command without data\n",
 631                                        drive->name);
 632                        err = -EFAULT;
 633                        goto abort;
 634                }
 635        }
 636
 637        err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
 638
 639        memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 640        memcpy(req_task->io_ports,  &cmd.tf,  HDIO_DRIVE_TASK_HDR_SIZE);
 641
 642        if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
 643            req_task->in_flags.all == 0) {
 644                req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
 645                if (drive->dev_flags & IDE_DFLAG_LBA48)
 646                        req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
 647        }
 648
 649        if (copy_to_user(buf, req_task, tasksize)) {
 650                err = -EFAULT;
 651                goto abort;
 652        }
 653        if (taskout) {
 654                int outtotal = tasksize;
 655                if (copy_to_user(buf + outtotal, outbuf, taskout)) {
 656                        err = -EFAULT;
 657                        goto abort;
 658                }
 659        }
 660        if (taskin) {
 661                int intotal = tasksize + taskout;
 662                if (copy_to_user(buf + intotal, inbuf, taskin)) {
 663                        err = -EFAULT;
 664                        goto abort;
 665                }
 666        }
 667abort:
 668        kfree(req_task);
 669        kfree(outbuf);
 670        kfree(inbuf);
 671
 672        return err;
 673}
 674#endif
 675
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.