linux/drivers/ide/ide-pm.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/gfp.h>
   3#include <linux/ide.h>
   4
   5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
   6{
   7        ide_drive_t *drive = to_ide_device(dev);
   8        ide_drive_t *pair = ide_get_pair_dev(drive);
   9        ide_hwif_t *hwif = drive->hwif;
  10        struct request *rq;
  11        struct request_pm_state rqpm;
  12        int ret;
  13
  14        if (ide_port_acpi(hwif)) {
  15                /* call ACPI _GTM only once */
  16                if ((drive->dn & 1) == 0 || pair == NULL)
  17                        ide_acpi_get_timing(hwif);
  18        }
  19
  20        memset(&rqpm, 0, sizeof(rqpm));
  21        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
  22        rq->cmd_type = REQ_TYPE_PM_SUSPEND;
  23        rq->special = &rqpm;
  24        rqpm.pm_step = IDE_PM_START_SUSPEND;
  25        if (mesg.event == PM_EVENT_PRETHAW)
  26                mesg.event = PM_EVENT_FREEZE;
  27        rqpm.pm_state = mesg.event;
  28
  29        ret = blk_execute_rq(drive->queue, NULL, rq, 0);
  30        blk_put_request(rq);
  31
  32        if (ret == 0 && ide_port_acpi(hwif)) {
  33                /* call ACPI _PS3 only after both devices are suspended */
  34                if ((drive->dn & 1) || pair == NULL)
  35                        ide_acpi_set_state(hwif, 0);
  36        }
  37
  38        return ret;
  39}
  40
  41int generic_ide_resume(struct device *dev)
  42{
  43        ide_drive_t *drive = to_ide_device(dev);
  44        ide_drive_t *pair = ide_get_pair_dev(drive);
  45        ide_hwif_t *hwif = drive->hwif;
  46        struct request *rq;
  47        struct request_pm_state rqpm;
  48        int err;
  49
  50        if (ide_port_acpi(hwif)) {
  51                /* call ACPI _PS0 / _STM only once */
  52                if ((drive->dn & 1) == 0 || pair == NULL) {
  53                        ide_acpi_set_state(hwif, 1);
  54                        ide_acpi_push_timing(hwif);
  55                }
  56
  57                ide_acpi_exec_tfs(drive);
  58        }
  59
  60        memset(&rqpm, 0, sizeof(rqpm));
  61        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
  62        rq->cmd_type = REQ_TYPE_PM_RESUME;
  63        rq->cmd_flags |= REQ_PREEMPT;
  64        rq->special = &rqpm;
  65        rqpm.pm_step = IDE_PM_START_RESUME;
  66        rqpm.pm_state = PM_EVENT_ON;
  67
  68        err = blk_execute_rq(drive->queue, NULL, rq, 1);
  69        blk_put_request(rq);
  70
  71        if (err == 0 && dev->driver) {
  72                struct ide_driver *drv = to_ide_driver(dev->driver);
  73
  74                if (drv->resume)
  75                        drv->resume(drive);
  76        }
  77
  78        return err;
  79}
  80
  81void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
  82{
  83        struct request_pm_state *pm = rq->special;
  84
  85#ifdef DEBUG_PM
  86        printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
  87                drive->name, pm->pm_step);
  88#endif
  89        if (drive->media != ide_disk)
  90                return;
  91
  92        switch (pm->pm_step) {
  93        case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
  94                if (pm->pm_state == PM_EVENT_FREEZE)
  95                        pm->pm_step = IDE_PM_COMPLETED;
  96                else
  97                        pm->pm_step = IDE_PM_STANDBY;
  98                break;
  99        case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
 100                pm->pm_step = IDE_PM_COMPLETED;
 101                break;
 102        case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
 103                pm->pm_step = IDE_PM_IDLE;
 104                break;
 105        case IDE_PM_IDLE:               /* Resume step 2 (idle)*/
 106                pm->pm_step = IDE_PM_RESTORE_DMA;
 107                break;
 108        }
 109}
 110
 111ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
 112{
 113        struct request_pm_state *pm = rq->special;
 114        struct ide_cmd cmd = { };
 115
 116        switch (pm->pm_step) {
 117        case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
 118                if (drive->media != ide_disk)
 119                        break;
 120                /* Not supported? Switch to next step now. */
 121                if (ata_id_flush_enabled(drive->id) == 0 ||
 122                    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
 123                        ide_complete_power_step(drive, rq);
 124                        return ide_stopped;
 125                }
 126                if (ata_id_flush_ext_enabled(drive->id))
 127                        cmd.tf.command = ATA_CMD_FLUSH_EXT;
 128                else
 129                        cmd.tf.command = ATA_CMD_FLUSH;
 130                goto out_do_tf;
 131        case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
 132                cmd.tf.command = ATA_CMD_STANDBYNOW1;
 133                goto out_do_tf;
 134        case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
 135                ide_set_max_pio(drive);
 136                /*
 137                 * skip IDE_PM_IDLE for ATAPI devices
 138                 */
 139                if (drive->media != ide_disk)
 140                        pm->pm_step = IDE_PM_RESTORE_DMA;
 141                else
 142                        ide_complete_power_step(drive, rq);
 143                return ide_stopped;
 144        case IDE_PM_IDLE:               /* Resume step 2 (idle) */
 145                cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
 146                goto out_do_tf;
 147        case IDE_PM_RESTORE_DMA:        /* Resume step 3 (restore DMA) */
 148                /*
 149                 * Right now, all we do is call ide_set_dma(drive),
 150                 * we could be smarter and check for current xfer_speed
 151                 * in struct drive etc...
 152                 */
 153                if (drive->hwif->dma_ops == NULL)
 154                        break;
 155                /*
 156                 * TODO: respect IDE_DFLAG_USING_DMA
 157                 */
 158                ide_set_dma(drive);
 159                break;
 160        }
 161
 162        pm->pm_step = IDE_PM_COMPLETED;
 163
 164        return ide_stopped;
 165
 166out_do_tf:
 167        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
 168        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
 169        cmd.protocol = ATA_PROT_NODATA;
 170
 171        return do_rw_taskfile(drive, &cmd);
 172}
 173
 174/**
 175 *      ide_complete_pm_rq - end the current Power Management request
 176 *      @drive: target drive
 177 *      @rq: request
 178 *
 179 *      This function cleans up the current PM request and stops the queue
 180 *      if necessary.
 181 */
 182void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 183{
 184        struct request_queue *q = drive->queue;
 185        struct request_pm_state *pm = rq->special;
 186        unsigned long flags;
 187
 188        ide_complete_power_step(drive, rq);
 189        if (pm->pm_step != IDE_PM_COMPLETED)
 190                return;
 191
 192#ifdef DEBUG_PM
 193        printk("%s: completing PM request, %s\n", drive->name,
 194               (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
 195#endif
 196        spin_lock_irqsave(q->queue_lock, flags);
 197        if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
 198                blk_stop_queue(q);
 199        else
 200                drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
 201        spin_unlock_irqrestore(q->queue_lock, flags);
 202
 203        drive->hwif->rq = NULL;
 204
 205        if (blk_end_request(rq, 0, 0))
 206                BUG();
 207}
 208
 209void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
 210{
 211        struct request_pm_state *pm = rq->special;
 212
 213        if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
 214            pm->pm_step == IDE_PM_START_SUSPEND)
 215                /* Mark drive blocked when starting the suspend sequence. */
 216                drive->dev_flags |= IDE_DFLAG_BLOCKED;
 217        else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
 218                 pm->pm_step == IDE_PM_START_RESUME) {
 219                /*
 220                 * The first thing we do on wakeup is to wait for BSY bit to
 221                 * go away (with a looong timeout) as a drive on this hwif may
 222                 * just be POSTing itself.
 223                 * We do that before even selecting as the "other" device on
 224                 * the bus may be broken enough to walk on our toes at this
 225                 * point.
 226                 */
 227                ide_hwif_t *hwif = drive->hwif;
 228                const struct ide_tp_ops *tp_ops = hwif->tp_ops;
 229                struct request_queue *q = drive->queue;
 230                unsigned long flags;
 231                int rc;
 232#ifdef DEBUG_PM
 233                printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
 234#endif
 235                rc = ide_wait_not_busy(hwif, 35000);
 236                if (rc)
 237                        printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
 238                tp_ops->dev_select(drive);
 239                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
 240                rc = ide_wait_not_busy(hwif, 100000);
 241                if (rc)
 242                        printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
 243
 244                spin_lock_irqsave(q->queue_lock, flags);
 245                blk_start_queue(q);
 246                spin_unlock_irqrestore(q->queue_lock, flags);
 247        }
 248}
 249
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.