linux/drivers/clocksource/sh_cmt.c
<<
>>
Prefs
   1/*
   2 * SuperH Timer Support - CMT
   3 *
   4 *  Copyright (C) 2008 Magnus Damm
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/platform_device.h>
  22#include <linux/spinlock.h>
  23#include <linux/interrupt.h>
  24#include <linux/ioport.h>
  25#include <linux/io.h>
  26#include <linux/clk.h>
  27#include <linux/irq.h>
  28#include <linux/err.h>
  29#include <linux/delay.h>
  30#include <linux/clocksource.h>
  31#include <linux/clockchips.h>
  32#include <linux/sh_timer.h>
  33#include <linux/slab.h>
  34#include <linux/module.h>
  35#include <linux/pm_domain.h>
  36#include <linux/pm_runtime.h>
  37
  38struct sh_cmt_priv {
  39        void __iomem *mapbase;
  40        struct clk *clk;
  41        unsigned long width; /* 16 or 32 bit version of hardware block */
  42        unsigned long overflow_bit;
  43        unsigned long clear_bits;
  44        struct irqaction irqaction;
  45        struct platform_device *pdev;
  46
  47        unsigned long flags;
  48        unsigned long match_value;
  49        unsigned long next_match_value;
  50        unsigned long max_match_value;
  51        unsigned long rate;
  52        raw_spinlock_t lock;
  53        struct clock_event_device ced;
  54        struct clocksource cs;
  55        unsigned long total_cycles;
  56        bool cs_enabled;
  57};
  58
  59static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
  60
  61#define CMSTR -1 /* shared register */
  62#define CMCSR 0 /* channel register */
  63#define CMCNT 1 /* channel register */
  64#define CMCOR 2 /* channel register */
  65
  66static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
  67{
  68        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  69        void __iomem *base = p->mapbase;
  70        unsigned long offs;
  71
  72        if (reg_nr == CMSTR) {
  73                offs = 0;
  74                base -= cfg->channel_offset;
  75        } else
  76                offs = reg_nr;
  77
  78        if (p->width == 16)
  79                offs <<= 1;
  80        else {
  81                offs <<= 2;
  82                if ((reg_nr == CMCNT) || (reg_nr == CMCOR))
  83                        return ioread32(base + offs);
  84        }
  85
  86        return ioread16(base + offs);
  87}
  88
  89static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
  90                                unsigned long value)
  91{
  92        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  93        void __iomem *base = p->mapbase;
  94        unsigned long offs;
  95
  96        if (reg_nr == CMSTR) {
  97                offs = 0;
  98                base -= cfg->channel_offset;
  99        } else
 100                offs = reg_nr;
 101
 102        if (p->width == 16)
 103                offs <<= 1;
 104        else {
 105                offs <<= 2;
 106                if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) {
 107                        iowrite32(value, base + offs);
 108                        return;
 109                }
 110        }
 111
 112        iowrite16(value, base + offs);
 113}
 114
 115static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
 116                                        int *has_wrapped)
 117{
 118        unsigned long v1, v2, v3;
 119        int o1, o2;
 120
 121        o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
 122
 123        /* Make sure the timer value is stable. Stolen from acpi_pm.c */
 124        do {
 125                o2 = o1;
 126                v1 = sh_cmt_read(p, CMCNT);
 127                v2 = sh_cmt_read(p, CMCNT);
 128                v3 = sh_cmt_read(p, CMCNT);
 129                o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
 130        } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
 131                          || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
 132
 133        *has_wrapped = o1;
 134        return v2;
 135}
 136
 137
 138static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
 139{
 140        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
 141        unsigned long flags, value;
 142
 143        /* start stop register shared by multiple timer channels */
 144        raw_spin_lock_irqsave(&sh_cmt_lock, flags);
 145        value = sh_cmt_read(p, CMSTR);
 146
 147        if (start)
 148                value |= 1 << cfg->timer_bit;
 149        else
 150                value &= ~(1 << cfg->timer_bit);
 151
 152        sh_cmt_write(p, CMSTR, value);
 153        raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
 154}
 155
 156static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 157{
 158        int k, ret;
 159
 160        pm_runtime_get_sync(&p->pdev->dev);
 161        dev_pm_syscore_device(&p->pdev->dev, true);
 162
 163        /* enable clock */
 164        ret = clk_enable(p->clk);
 165        if (ret) {
 166                dev_err(&p->pdev->dev, "cannot enable clock\n");
 167                goto err0;
 168        }
 169
 170        /* make sure channel is disabled */
 171        sh_cmt_start_stop_ch(p, 0);
 172
 173        /* configure channel, periodic mode and maximum timeout */
 174        if (p->width == 16) {
 175                *rate = clk_get_rate(p->clk) / 512;
 176                sh_cmt_write(p, CMCSR, 0x43);
 177        } else {
 178                *rate = clk_get_rate(p->clk) / 8;
 179                sh_cmt_write(p, CMCSR, 0x01a4);
 180        }
 181
 182        sh_cmt_write(p, CMCOR, 0xffffffff);
 183        sh_cmt_write(p, CMCNT, 0);
 184
 185        /*
 186         * According to the sh73a0 user's manual, as CMCNT can be operated
 187         * only by the RCLK (Pseudo 32 KHz), there's one restriction on
 188         * modifying CMCNT register; two RCLK cycles are necessary before
 189         * this register is either read or any modification of the value
 190         * it holds is reflected in the LSI's actual operation.
 191         *
 192         * While at it, we're supposed to clear out the CMCNT as of this
 193         * moment, so make sure it's processed properly here.  This will
 194         * take RCLKx2 at maximum.
 195         */
 196        for (k = 0; k < 100; k++) {
 197                if (!sh_cmt_read(p, CMCNT))
 198                        break;
 199                udelay(1);
 200        }
 201
 202        if (sh_cmt_read(p, CMCNT)) {
 203                dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
 204                ret = -ETIMEDOUT;
 205                goto err1;
 206        }
 207
 208        /* enable channel */
 209        sh_cmt_start_stop_ch(p, 1);
 210        return 0;
 211 err1:
 212        /* stop clock */
 213        clk_disable(p->clk);
 214
 215 err0:
 216        return ret;
 217}
 218
 219static void sh_cmt_disable(struct sh_cmt_priv *p)
 220{
 221        /* disable channel */
 222        sh_cmt_start_stop_ch(p, 0);
 223
 224        /* disable interrupts in CMT block */
 225        sh_cmt_write(p, CMCSR, 0);
 226
 227        /* stop clock */
 228        clk_disable(p->clk);
 229
 230        dev_pm_syscore_device(&p->pdev->dev, false);
 231        pm_runtime_put(&p->pdev->dev);
 232}
 233
 234/* private flags */
 235#define FLAG_CLOCKEVENT (1 << 0)
 236#define FLAG_CLOCKSOURCE (1 << 1)
 237#define FLAG_REPROGRAM (1 << 2)
 238#define FLAG_SKIPEVENT (1 << 3)
 239#define FLAG_IRQCONTEXT (1 << 4)
 240
 241static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
 242                                              int absolute)
 243{
 244        unsigned long new_match;
 245        unsigned long value = p->next_match_value;
 246        unsigned long delay = 0;
 247        unsigned long now = 0;
 248        int has_wrapped;
 249
 250        now = sh_cmt_get_counter(p, &has_wrapped);
 251        p->flags |= FLAG_REPROGRAM; /* force reprogram */
 252
 253        if (has_wrapped) {
 254                /* we're competing with the interrupt handler.
 255                 *  -> let the interrupt handler reprogram the timer.
 256                 *  -> interrupt number two handles the event.
 257                 */
 258                p->flags |= FLAG_SKIPEVENT;
 259                return;
 260        }
 261
 262        if (absolute)
 263                now = 0;
 264
 265        do {
 266                /* reprogram the timer hardware,
 267                 * but don't save the new match value yet.
 268                 */
 269                new_match = now + value + delay;
 270                if (new_match > p->max_match_value)
 271                        new_match = p->max_match_value;
 272
 273                sh_cmt_write(p, CMCOR, new_match);
 274
 275                now = sh_cmt_get_counter(p, &has_wrapped);
 276                if (has_wrapped && (new_match > p->match_value)) {
 277                        /* we are changing to a greater match value,
 278                         * so this wrap must be caused by the counter
 279                         * matching the old value.
 280                         * -> first interrupt reprograms the timer.
 281                         * -> interrupt number two handles the event.
 282                         */
 283                        p->flags |= FLAG_SKIPEVENT;
 284                        break;
 285                }
 286
 287                if (has_wrapped) {
 288                        /* we are changing to a smaller match value,
 289                         * so the wrap must be caused by the counter
 290                         * matching the new value.
 291                         * -> save programmed match value.
 292                         * -> let isr handle the event.
 293                         */
 294                        p->match_value = new_match;
 295                        break;
 296                }
 297
 298                /* be safe: verify hardware settings */
 299                if (now < new_match) {
 300                        /* timer value is below match value, all good.
 301                         * this makes sure we won't miss any match events.
 302                         * -> save programmed match value.
 303                         * -> let isr handle the event.
 304                         */
 305                        p->match_value = new_match;
 306                        break;
 307                }
 308
 309                /* the counter has reached a value greater
 310                 * than our new match value. and since the
 311                 * has_wrapped flag isn't set we must have
 312                 * programmed a too close event.
 313                 * -> increase delay and retry.
 314                 */
 315                if (delay)
 316                        delay <<= 1;
 317                else
 318                        delay = 1;
 319
 320                if (!delay)
 321                        dev_warn(&p->pdev->dev, "too long delay\n");
 322
 323        } while (delay);
 324}
 325
 326static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 327{
 328        if (delta > p->max_match_value)
 329                dev_warn(&p->pdev->dev, "delta out of range\n");
 330
 331        p->next_match_value = delta;
 332        sh_cmt_clock_event_program_verify(p, 0);
 333}
 334
 335static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 336{
 337        unsigned long flags;
 338
 339        raw_spin_lock_irqsave(&p->lock, flags);
 340        __sh_cmt_set_next(p, delta);
 341        raw_spin_unlock_irqrestore(&p->lock, flags);
 342}
 343
 344static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
 345{
 346        struct sh_cmt_priv *p = dev_id;
 347
 348        /* clear flags */
 349        sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits);
 350
 351        /* update clock source counter to begin with if enabled
 352         * the wrap flag should be cleared by the timer specific
 353         * isr before we end up here.
 354         */
 355        if (p->flags & FLAG_CLOCKSOURCE)
 356                p->total_cycles += p->match_value + 1;
 357
 358        if (!(p->flags & FLAG_REPROGRAM))
 359                p->next_match_value = p->max_match_value;
 360
 361        p->flags |= FLAG_IRQCONTEXT;
 362
 363        if (p->flags & FLAG_CLOCKEVENT) {
 364                if (!(p->flags & FLAG_SKIPEVENT)) {
 365                        if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
 366                                p->next_match_value = p->max_match_value;
 367                                p->flags |= FLAG_REPROGRAM;
 368                        }
 369
 370                        p->ced.event_handler(&p->ced);
 371                }
 372        }
 373
 374        p->flags &= ~FLAG_SKIPEVENT;
 375
 376        if (p->flags & FLAG_REPROGRAM) {
 377                p->flags &= ~FLAG_REPROGRAM;
 378                sh_cmt_clock_event_program_verify(p, 1);
 379
 380                if (p->flags & FLAG_CLOCKEVENT)
 381                        if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
 382                            || (p->match_value == p->next_match_value))
 383                                p->flags &= ~FLAG_REPROGRAM;
 384        }
 385
 386        p->flags &= ~FLAG_IRQCONTEXT;
 387
 388        return IRQ_HANDLED;
 389}
 390
 391static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
 392{
 393        int ret = 0;
 394        unsigned long flags;
 395
 396        raw_spin_lock_irqsave(&p->lock, flags);
 397
 398        if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 399                ret = sh_cmt_enable(p, &p->rate);
 400
 401        if (ret)
 402                goto out;
 403        p->flags |= flag;
 404
 405        /* setup timeout if no clockevent */
 406        if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
 407                __sh_cmt_set_next(p, p->max_match_value);
 408 out:
 409        raw_spin_unlock_irqrestore(&p->lock, flags);
 410
 411        return ret;
 412}
 413
 414static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
 415{
 416        unsigned long flags;
 417        unsigned long f;
 418
 419        raw_spin_lock_irqsave(&p->lock, flags);
 420
 421        f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
 422        p->flags &= ~flag;
 423
 424        if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 425                sh_cmt_disable(p);
 426
 427        /* adjust the timeout to maximum if only clocksource left */
 428        if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
 429                __sh_cmt_set_next(p, p->max_match_value);
 430
 431        raw_spin_unlock_irqrestore(&p->lock, flags);
 432}
 433
 434static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
 435{
 436        return container_of(cs, struct sh_cmt_priv, cs);
 437}
 438
 439static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
 440{
 441        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 442        unsigned long flags, raw;
 443        unsigned long value;
 444        int has_wrapped;
 445
 446        raw_spin_lock_irqsave(&p->lock, flags);
 447        value = p->total_cycles;
 448        raw = sh_cmt_get_counter(p, &has_wrapped);
 449
 450        if (unlikely(has_wrapped))
 451                raw += p->match_value + 1;
 452        raw_spin_unlock_irqrestore(&p->lock, flags);
 453
 454        return value + raw;
 455}
 456
 457static int sh_cmt_clocksource_enable(struct clocksource *cs)
 458{
 459        int ret;
 460        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 461
 462        WARN_ON(p->cs_enabled);
 463
 464        p->total_cycles = 0;
 465
 466        ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
 467        if (!ret) {
 468                __clocksource_updatefreq_hz(cs, p->rate);
 469                p->cs_enabled = true;
 470        }
 471        return ret;
 472}
 473
 474static void sh_cmt_clocksource_disable(struct clocksource *cs)
 475{
 476        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 477
 478        WARN_ON(!p->cs_enabled);
 479
 480        sh_cmt_stop(p, FLAG_CLOCKSOURCE);
 481        p->cs_enabled = false;
 482}
 483
 484static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 485{
 486        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 487
 488        sh_cmt_stop(p, FLAG_CLOCKSOURCE);
 489        pm_genpd_syscore_poweroff(&p->pdev->dev);
 490}
 491
 492static void sh_cmt_clocksource_resume(struct clocksource *cs)
 493{
 494        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 495
 496        pm_genpd_syscore_poweron(&p->pdev->dev);
 497        sh_cmt_start(p, FLAG_CLOCKSOURCE);
 498}
 499
 500static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
 501                                       char *name, unsigned long rating)
 502{
 503        struct clocksource *cs = &p->cs;
 504
 505        cs->name = name;
 506        cs->rating = rating;
 507        cs->read = sh_cmt_clocksource_read;
 508        cs->enable = sh_cmt_clocksource_enable;
 509        cs->disable = sh_cmt_clocksource_disable;
 510        cs->suspend = sh_cmt_clocksource_suspend;
 511        cs->resume = sh_cmt_clocksource_resume;
 512        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
 513        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 514
 515        dev_info(&p->pdev->dev, "used as clock source\n");
 516
 517        /* Register with dummy 1 Hz value, gets updated in ->enable() */
 518        clocksource_register_hz(cs, 1);
 519        return 0;
 520}
 521
 522static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
 523{
 524        return container_of(ced, struct sh_cmt_priv, ced);
 525}
 526
 527static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
 528{
 529        struct clock_event_device *ced = &p->ced;
 530
 531        sh_cmt_start(p, FLAG_CLOCKEVENT);
 532
 533        /* TODO: calculate good shift from rate and counter bit width */
 534
 535        ced->shift = 32;
 536        ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
 537        ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
 538        ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
 539
 540        if (periodic)
 541                sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
 542        else
 543                sh_cmt_set_next(p, p->max_match_value);
 544}
 545
 546static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
 547                                    struct clock_event_device *ced)
 548{
 549        struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
 550
 551        /* deal with old setting first */
 552        switch (ced->mode) {
 553        case CLOCK_EVT_MODE_PERIODIC:
 554        case CLOCK_EVT_MODE_ONESHOT:
 555                sh_cmt_stop(p, FLAG_CLOCKEVENT);
 556                break;
 557        default:
 558                break;
 559        }
 560
 561        switch (mode) {
 562        case CLOCK_EVT_MODE_PERIODIC:
 563                dev_info(&p->pdev->dev, "used for periodic clock events\n");
 564                sh_cmt_clock_event_start(p, 1);
 565                break;
 566        case CLOCK_EVT_MODE_ONESHOT:
 567                dev_info(&p->pdev->dev, "used for oneshot clock events\n");
 568                sh_cmt_clock_event_start(p, 0);
 569                break;
 570        case CLOCK_EVT_MODE_SHUTDOWN:
 571        case CLOCK_EVT_MODE_UNUSED:
 572                sh_cmt_stop(p, FLAG_CLOCKEVENT);
 573                break;
 574        default:
 575                break;
 576        }
 577}
 578
 579static int sh_cmt_clock_event_next(unsigned long delta,
 580                                   struct clock_event_device *ced)
 581{
 582        struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
 583
 584        BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
 585        if (likely(p->flags & FLAG_IRQCONTEXT))
 586                p->next_match_value = delta - 1;
 587        else
 588                sh_cmt_set_next(p, delta - 1);
 589
 590        return 0;
 591}
 592
 593static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
 594{
 595        pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
 596}
 597
 598static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
 599{
 600        pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
 601}
 602
 603static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
 604                                       char *name, unsigned long rating)
 605{
 606        struct clock_event_device *ced = &p->ced;
 607
 608        memset(ced, 0, sizeof(*ced));
 609
 610        ced->name = name;
 611        ced->features = CLOCK_EVT_FEAT_PERIODIC;
 612        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
 613        ced->rating = rating;
 614        ced->cpumask = cpumask_of(0);
 615        ced->set_next_event = sh_cmt_clock_event_next;
 616        ced->set_mode = sh_cmt_clock_event_mode;
 617        ced->suspend = sh_cmt_clock_event_suspend;
 618        ced->resume = sh_cmt_clock_event_resume;
 619
 620        dev_info(&p->pdev->dev, "used for clock events\n");
 621        clockevents_register_device(ced);
 622}
 623
 624static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
 625                           unsigned long clockevent_rating,
 626                           unsigned long clocksource_rating)
 627{
 628        if (p->width == (sizeof(p->max_match_value) * 8))
 629                p->max_match_value = ~0;
 630        else
 631                p->max_match_value = (1 << p->width) - 1;
 632
 633        p->match_value = p->max_match_value;
 634        raw_spin_lock_init(&p->lock);
 635
 636        if (clockevent_rating)
 637                sh_cmt_register_clockevent(p, name, clockevent_rating);
 638
 639        if (clocksource_rating)
 640                sh_cmt_register_clocksource(p, name, clocksource_rating);
 641
 642        return 0;
 643}
 644
 645static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
 646{
 647        struct sh_timer_config *cfg = pdev->dev.platform_data;
 648        struct resource *res;
 649        int irq, ret;
 650        ret = -ENXIO;
 651
 652        memset(p, 0, sizeof(*p));
 653        p->pdev = pdev;
 654
 655        if (!cfg) {
 656                dev_err(&p->pdev->dev, "missing platform data\n");
 657                goto err0;
 658        }
 659
 660        platform_set_drvdata(pdev, p);
 661
 662        res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
 663        if (!res) {
 664                dev_err(&p->pdev->dev, "failed to get I/O memory\n");
 665                goto err0;
 666        }
 667
 668        irq = platform_get_irq(p->pdev, 0);
 669        if (irq < 0) {
 670                dev_err(&p->pdev->dev, "failed to get irq\n");
 671                goto err0;
 672        }
 673
 674        /* map memory, let mapbase point to our channel */
 675        p->mapbase = ioremap_nocache(res->start, resource_size(res));
 676        if (p->mapbase == NULL) {
 677                dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
 678                goto err0;
 679        }
 680
 681        /* request irq using setup_irq() (too early for request_irq()) */
 682        p->irqaction.name = dev_name(&p->pdev->dev);
 683        p->irqaction.handler = sh_cmt_interrupt;
 684        p->irqaction.dev_id = p;
 685        p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 686                             IRQF_IRQPOLL  | IRQF_NOBALANCING;
 687
 688        /* get hold of clock */
 689        p->clk = clk_get(&p->pdev->dev, "cmt_fck");
 690        if (IS_ERR(p->clk)) {
 691                dev_err(&p->pdev->dev, "cannot get clock\n");
 692                ret = PTR_ERR(p->clk);
 693                goto err1;
 694        }
 695
 696        if (resource_size(res) == 6) {
 697                p->width = 16;
 698                p->overflow_bit = 0x80;
 699                p->clear_bits = ~0x80;
 700        } else {
 701                p->width = 32;
 702                p->overflow_bit = 0x8000;
 703                p->clear_bits = ~0xc000;
 704        }
 705
 706        ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
 707                              cfg->clockevent_rating,
 708                              cfg->clocksource_rating);
 709        if (ret) {
 710                dev_err(&p->pdev->dev, "registration failed\n");
 711                goto err1;
 712        }
 713        p->cs_enabled = false;
 714
 715        ret = setup_irq(irq, &p->irqaction);
 716        if (ret) {
 717                dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
 718                goto err1;
 719        }
 720
 721        return 0;
 722
 723err1:
 724        iounmap(p->mapbase);
 725err0:
 726        return ret;
 727}
 728
 729static int __devinit sh_cmt_probe(struct platform_device *pdev)
 730{
 731        struct sh_cmt_priv *p = platform_get_drvdata(pdev);
 732        struct sh_timer_config *cfg = pdev->dev.platform_data;
 733        int ret;
 734
 735        if (!is_early_platform_device(pdev)) {
 736                pm_runtime_set_active(&pdev->dev);
 737                pm_runtime_enable(&pdev->dev);
 738        }
 739
 740        if (p) {
 741                dev_info(&pdev->dev, "kept as earlytimer\n");
 742                goto out;
 743        }
 744
 745        p = kmalloc(sizeof(*p), GFP_KERNEL);
 746        if (p == NULL) {
 747                dev_err(&pdev->dev, "failed to allocate driver data\n");
 748                return -ENOMEM;
 749        }
 750
 751        ret = sh_cmt_setup(p, pdev);
 752        if (ret) {
 753                kfree(p);
 754                platform_set_drvdata(pdev, NULL);
 755                pm_runtime_idle(&pdev->dev);
 756                return ret;
 757        }
 758        if (is_early_platform_device(pdev))
 759                return 0;
 760
 761 out:
 762        if (cfg->clockevent_rating || cfg->clocksource_rating)
 763                pm_runtime_irq_safe(&pdev->dev);
 764        else
 765                pm_runtime_idle(&pdev->dev);
 766
 767        return 0;
 768}
 769
 770static int __devexit sh_cmt_remove(struct platform_device *pdev)
 771{
 772        return -EBUSY; /* cannot unregister clockevent and clocksource */
 773}
 774
 775static struct platform_driver sh_cmt_device_driver = {
 776        .probe          = sh_cmt_probe,
 777        .remove         = __devexit_p(sh_cmt_remove),
 778        .driver         = {
 779                .name   = "sh_cmt",
 780        }
 781};
 782
 783static int __init sh_cmt_init(void)
 784{
 785        return platform_driver_register(&sh_cmt_device_driver);
 786}
 787
 788static void __exit sh_cmt_exit(void)
 789{
 790        platform_driver_unregister(&sh_cmt_device_driver);
 791}
 792
 793early_platform_init("earlytimer", &sh_cmt_device_driver);
 794module_init(sh_cmt_init);
 795module_exit(sh_cmt_exit);
 796
 797MODULE_AUTHOR("Magnus Damm");
 798MODULE_DESCRIPTION("SuperH CMT Timer Driver");
 799MODULE_LICENSE("GPL v2");
 800
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.