linux/drivers/clocksource/sh_cmt.c
<<
>>
Prefs
   1/*
   2 * SuperH Timer Support - CMT
   3 *
   4 *  Copyright (C) 2008 Magnus Damm
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/platform_device.h>
  22#include <linux/spinlock.h>
  23#include <linux/interrupt.h>
  24#include <linux/ioport.h>
  25#include <linux/io.h>
  26#include <linux/clk.h>
  27#include <linux/irq.h>
  28#include <linux/err.h>
  29#include <linux/delay.h>
  30#include <linux/clocksource.h>
  31#include <linux/clockchips.h>
  32#include <linux/sh_timer.h>
  33#include <linux/slab.h>
  34#include <linux/module.h>
  35#include <linux/pm_domain.h>
  36#include <linux/pm_runtime.h>
  37
  38struct sh_cmt_priv {
  39        void __iomem *mapbase;
  40        void __iomem *mapbase_str;
  41        struct clk *clk;
  42        unsigned long width; /* 16 or 32 bit version of hardware block */
  43        unsigned long overflow_bit;
  44        unsigned long clear_bits;
  45        struct irqaction irqaction;
  46        struct platform_device *pdev;
  47
  48        unsigned long flags;
  49        unsigned long match_value;
  50        unsigned long next_match_value;
  51        unsigned long max_match_value;
  52        unsigned long rate;
  53        raw_spinlock_t lock;
  54        struct clock_event_device ced;
  55        struct clocksource cs;
  56        unsigned long total_cycles;
  57        bool cs_enabled;
  58
  59        /* callbacks for CMSTR and CMCSR access */
  60        unsigned long (*read_control)(void __iomem *base, unsigned long offs);
  61        void (*write_control)(void __iomem *base, unsigned long offs,
  62                              unsigned long value);
  63
  64        /* callbacks for CMCNT and CMCOR access */
  65        unsigned long (*read_count)(void __iomem *base, unsigned long offs);
  66        void (*write_count)(void __iomem *base, unsigned long offs,
  67                            unsigned long value);
  68};
  69
  70/* Examples of supported CMT timer register layouts and I/O access widths:
  71 *
  72 * "16-bit counter and 16-bit control" as found on sh7263:
  73 * CMSTR 0xfffec000 16-bit
  74 * CMCSR 0xfffec002 16-bit
  75 * CMCNT 0xfffec004 16-bit
  76 * CMCOR 0xfffec006 16-bit
  77 *
  78 * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
  79 * CMSTR 0xffca0000 16-bit
  80 * CMCSR 0xffca0060 16-bit
  81 * CMCNT 0xffca0064 32-bit
  82 * CMCOR 0xffca0068 32-bit
  83 *
  84 * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
  85 * CMSTR 0xffca0500 32-bit
  86 * CMCSR 0xffca0510 32-bit
  87 * CMCNT 0xffca0514 32-bit
  88 * CMCOR 0xffca0518 32-bit
  89 */
  90
  91static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
  92{
  93        return ioread16(base + (offs << 1));
  94}
  95
  96static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
  97{
  98        return ioread32(base + (offs << 2));
  99}
 100
 101static void sh_cmt_write16(void __iomem *base, unsigned long offs,
 102                           unsigned long value)
 103{
 104        iowrite16(value, base + (offs << 1));
 105}
 106
 107static void sh_cmt_write32(void __iomem *base, unsigned long offs,
 108                           unsigned long value)
 109{
 110        iowrite32(value, base + (offs << 2));
 111}
 112
 113#define CMCSR 0 /* channel register */
 114#define CMCNT 1 /* channel register */
 115#define CMCOR 2 /* channel register */
 116
 117static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
 118{
 119        return p->read_control(p->mapbase_str, 0);
 120}
 121
 122static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
 123{
 124        return p->read_control(p->mapbase, CMCSR);
 125}
 126
 127static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
 128{
 129        return p->read_count(p->mapbase, CMCNT);
 130}
 131
 132static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
 133                                      unsigned long value)
 134{
 135        p->write_control(p->mapbase_str, 0, value);
 136}
 137
 138static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
 139                                      unsigned long value)
 140{
 141        p->write_control(p->mapbase, CMCSR, value);
 142}
 143
 144static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
 145                                      unsigned long value)
 146{
 147        p->write_count(p->mapbase, CMCNT, value);
 148}
 149
 150static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
 151                                      unsigned long value)
 152{
 153        p->write_count(p->mapbase, CMCOR, value);
 154}
 155
 156static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
 157                                        int *has_wrapped)
 158{
 159        unsigned long v1, v2, v3;
 160        int o1, o2;
 161
 162        o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
 163
 164        /* Make sure the timer value is stable. Stolen from acpi_pm.c */
 165        do {
 166                o2 = o1;
 167                v1 = sh_cmt_read_cmcnt(p);
 168                v2 = sh_cmt_read_cmcnt(p);
 169                v3 = sh_cmt_read_cmcnt(p);
 170                o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
 171        } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
 172                          || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
 173
 174        *has_wrapped = o1;
 175        return v2;
 176}
 177
 178static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
 179
 180static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
 181{
 182        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
 183        unsigned long flags, value;
 184
 185        /* start stop register shared by multiple timer channels */
 186        raw_spin_lock_irqsave(&sh_cmt_lock, flags);
 187        value = sh_cmt_read_cmstr(p);
 188
 189        if (start)
 190                value |= 1 << cfg->timer_bit;
 191        else
 192                value &= ~(1 << cfg->timer_bit);
 193
 194        sh_cmt_write_cmstr(p, value);
 195        raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
 196}
 197
 198static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 199{
 200        int k, ret;
 201
 202        pm_runtime_get_sync(&p->pdev->dev);
 203        dev_pm_syscore_device(&p->pdev->dev, true);
 204
 205        /* enable clock */
 206        ret = clk_enable(p->clk);
 207        if (ret) {
 208                dev_err(&p->pdev->dev, "cannot enable clock\n");
 209                goto err0;
 210        }
 211
 212        /* make sure channel is disabled */
 213        sh_cmt_start_stop_ch(p, 0);
 214
 215        /* configure channel, periodic mode and maximum timeout */
 216        if (p->width == 16) {
 217                *rate = clk_get_rate(p->clk) / 512;
 218                sh_cmt_write_cmcsr(p, 0x43);
 219        } else {
 220                *rate = clk_get_rate(p->clk) / 8;
 221                sh_cmt_write_cmcsr(p, 0x01a4);
 222        }
 223
 224        sh_cmt_write_cmcor(p, 0xffffffff);
 225        sh_cmt_write_cmcnt(p, 0);
 226
 227        /*
 228         * According to the sh73a0 user's manual, as CMCNT can be operated
 229         * only by the RCLK (Pseudo 32 KHz), there's one restriction on
 230         * modifying CMCNT register; two RCLK cycles are necessary before
 231         * this register is either read or any modification of the value
 232         * it holds is reflected in the LSI's actual operation.
 233         *
 234         * While at it, we're supposed to clear out the CMCNT as of this
 235         * moment, so make sure it's processed properly here.  This will
 236         * take RCLKx2 at maximum.
 237         */
 238        for (k = 0; k < 100; k++) {
 239                if (!sh_cmt_read_cmcnt(p))
 240                        break;
 241                udelay(1);
 242        }
 243
 244        if (sh_cmt_read_cmcnt(p)) {
 245                dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
 246                ret = -ETIMEDOUT;
 247                goto err1;
 248        }
 249
 250        /* enable channel */
 251        sh_cmt_start_stop_ch(p, 1);
 252        return 0;
 253 err1:
 254        /* stop clock */
 255        clk_disable(p->clk);
 256
 257 err0:
 258        return ret;
 259}
 260
 261static void sh_cmt_disable(struct sh_cmt_priv *p)
 262{
 263        /* disable channel */
 264        sh_cmt_start_stop_ch(p, 0);
 265
 266        /* disable interrupts in CMT block */
 267        sh_cmt_write_cmcsr(p, 0);
 268
 269        /* stop clock */
 270        clk_disable(p->clk);
 271
 272        dev_pm_syscore_device(&p->pdev->dev, false);
 273        pm_runtime_put(&p->pdev->dev);
 274}
 275
 276/* private flags */
 277#define FLAG_CLOCKEVENT (1 << 0)
 278#define FLAG_CLOCKSOURCE (1 << 1)
 279#define FLAG_REPROGRAM (1 << 2)
 280#define FLAG_SKIPEVENT (1 << 3)
 281#define FLAG_IRQCONTEXT (1 << 4)
 282
 283static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
 284                                              int absolute)
 285{
 286        unsigned long new_match;
 287        unsigned long value = p->next_match_value;
 288        unsigned long delay = 0;
 289        unsigned long now = 0;
 290        int has_wrapped;
 291
 292        now = sh_cmt_get_counter(p, &has_wrapped);
 293        p->flags |= FLAG_REPROGRAM; /* force reprogram */
 294
 295        if (has_wrapped) {
 296                /* we're competing with the interrupt handler.
 297                 *  -> let the interrupt handler reprogram the timer.
 298                 *  -> interrupt number two handles the event.
 299                 */
 300                p->flags |= FLAG_SKIPEVENT;
 301                return;
 302        }
 303
 304        if (absolute)
 305                now = 0;
 306
 307        do {
 308                /* reprogram the timer hardware,
 309                 * but don't save the new match value yet.
 310                 */
 311                new_match = now + value + delay;
 312                if (new_match > p->max_match_value)
 313                        new_match = p->max_match_value;
 314
 315                sh_cmt_write_cmcor(p, new_match);
 316
 317                now = sh_cmt_get_counter(p, &has_wrapped);
 318                if (has_wrapped && (new_match > p->match_value)) {
 319                        /* we are changing to a greater match value,
 320                         * so this wrap must be caused by the counter
 321                         * matching the old value.
 322                         * -> first interrupt reprograms the timer.
 323                         * -> interrupt number two handles the event.
 324                         */
 325                        p->flags |= FLAG_SKIPEVENT;
 326                        break;
 327                }
 328
 329                if (has_wrapped) {
 330                        /* we are changing to a smaller match value,
 331                         * so the wrap must be caused by the counter
 332                         * matching the new value.
 333                         * -> save programmed match value.
 334                         * -> let isr handle the event.
 335                         */
 336                        p->match_value = new_match;
 337                        break;
 338                }
 339
 340                /* be safe: verify hardware settings */
 341                if (now < new_match) {
 342                        /* timer value is below match value, all good.
 343                         * this makes sure we won't miss any match events.
 344                         * -> save programmed match value.
 345                         * -> let isr handle the event.
 346                         */
 347                        p->match_value = new_match;
 348                        break;
 349                }
 350
 351                /* the counter has reached a value greater
 352                 * than our new match value. and since the
 353                 * has_wrapped flag isn't set we must have
 354                 * programmed a too close event.
 355                 * -> increase delay and retry.
 356                 */
 357                if (delay)
 358                        delay <<= 1;
 359                else
 360                        delay = 1;
 361
 362                if (!delay)
 363                        dev_warn(&p->pdev->dev, "too long delay\n");
 364
 365        } while (delay);
 366}
 367
 368static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 369{
 370        if (delta > p->max_match_value)
 371                dev_warn(&p->pdev->dev, "delta out of range\n");
 372
 373        p->next_match_value = delta;
 374        sh_cmt_clock_event_program_verify(p, 0);
 375}
 376
 377static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 378{
 379        unsigned long flags;
 380
 381        raw_spin_lock_irqsave(&p->lock, flags);
 382        __sh_cmt_set_next(p, delta);
 383        raw_spin_unlock_irqrestore(&p->lock, flags);
 384}
 385
 386static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
 387{
 388        struct sh_cmt_priv *p = dev_id;
 389
 390        /* clear flags */
 391        sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
 392
 393        /* update clock source counter to begin with if enabled
 394         * the wrap flag should be cleared by the timer specific
 395         * isr before we end up here.
 396         */
 397        if (p->flags & FLAG_CLOCKSOURCE)
 398                p->total_cycles += p->match_value + 1;
 399
 400        if (!(p->flags & FLAG_REPROGRAM))
 401                p->next_match_value = p->max_match_value;
 402
 403        p->flags |= FLAG_IRQCONTEXT;
 404
 405        if (p->flags & FLAG_CLOCKEVENT) {
 406                if (!(p->flags & FLAG_SKIPEVENT)) {
 407                        if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
 408                                p->next_match_value = p->max_match_value;
 409                                p->flags |= FLAG_REPROGRAM;
 410                        }
 411
 412                        p->ced.event_handler(&p->ced);
 413                }
 414        }
 415
 416        p->flags &= ~FLAG_SKIPEVENT;
 417
 418        if (p->flags & FLAG_REPROGRAM) {
 419                p->flags &= ~FLAG_REPROGRAM;
 420                sh_cmt_clock_event_program_verify(p, 1);
 421
 422                if (p->flags & FLAG_CLOCKEVENT)
 423                        if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
 424                            || (p->match_value == p->next_match_value))
 425                                p->flags &= ~FLAG_REPROGRAM;
 426        }
 427
 428        p->flags &= ~FLAG_IRQCONTEXT;
 429
 430        return IRQ_HANDLED;
 431}
 432
 433static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
 434{
 435        int ret = 0;
 436        unsigned long flags;
 437
 438        raw_spin_lock_irqsave(&p->lock, flags);
 439
 440        if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 441                ret = sh_cmt_enable(p, &p->rate);
 442
 443        if (ret)
 444                goto out;
 445        p->flags |= flag;
 446
 447        /* setup timeout if no clockevent */
 448        if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
 449                __sh_cmt_set_next(p, p->max_match_value);
 450 out:
 451        raw_spin_unlock_irqrestore(&p->lock, flags);
 452
 453        return ret;
 454}
 455
 456static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
 457{
 458        unsigned long flags;
 459        unsigned long f;
 460
 461        raw_spin_lock_irqsave(&p->lock, flags);
 462
 463        f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
 464        p->flags &= ~flag;
 465
 466        if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 467                sh_cmt_disable(p);
 468
 469        /* adjust the timeout to maximum if only clocksource left */
 470        if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
 471                __sh_cmt_set_next(p, p->max_match_value);
 472
 473        raw_spin_unlock_irqrestore(&p->lock, flags);
 474}
 475
 476static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
 477{
 478        return container_of(cs, struct sh_cmt_priv, cs);
 479}
 480
 481static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
 482{
 483        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 484        unsigned long flags, raw;
 485        unsigned long value;
 486        int has_wrapped;
 487
 488        raw_spin_lock_irqsave(&p->lock, flags);
 489        value = p->total_cycles;
 490        raw = sh_cmt_get_counter(p, &has_wrapped);
 491
 492        if (unlikely(has_wrapped))
 493                raw += p->match_value + 1;
 494        raw_spin_unlock_irqrestore(&p->lock, flags);
 495
 496        return value + raw;
 497}
 498
 499static int sh_cmt_clocksource_enable(struct clocksource *cs)
 500{
 501        int ret;
 502        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 503
 504        WARN_ON(p->cs_enabled);
 505
 506        p->total_cycles = 0;
 507
 508        ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
 509        if (!ret) {
 510                __clocksource_updatefreq_hz(cs, p->rate);
 511                p->cs_enabled = true;
 512        }
 513        return ret;
 514}
 515
 516static void sh_cmt_clocksource_disable(struct clocksource *cs)
 517{
 518        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 519
 520        WARN_ON(!p->cs_enabled);
 521
 522        sh_cmt_stop(p, FLAG_CLOCKSOURCE);
 523        p->cs_enabled = false;
 524}
 525
 526static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 527{
 528        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 529
 530        sh_cmt_stop(p, FLAG_CLOCKSOURCE);
 531        pm_genpd_syscore_poweroff(&p->pdev->dev);
 532}
 533
 534static void sh_cmt_clocksource_resume(struct clocksource *cs)
 535{
 536        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 537
 538        pm_genpd_syscore_poweron(&p->pdev->dev);
 539        sh_cmt_start(p, FLAG_CLOCKSOURCE);
 540}
 541
 542static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
 543                                       char *name, unsigned long rating)
 544{
 545        struct clocksource *cs = &p->cs;
 546
 547        cs->name = name;
 548        cs->rating = rating;
 549        cs->read = sh_cmt_clocksource_read;
 550        cs->enable = sh_cmt_clocksource_enable;
 551        cs->disable = sh_cmt_clocksource_disable;
 552        cs->suspend = sh_cmt_clocksource_suspend;
 553        cs->resume = sh_cmt_clocksource_resume;
 554        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
 555        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 556
 557        dev_info(&p->pdev->dev, "used as clock source\n");
 558
 559        /* Register with dummy 1 Hz value, gets updated in ->enable() */
 560        clocksource_register_hz(cs, 1);
 561        return 0;
 562}
 563
 564static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
 565{
 566        return container_of(ced, struct sh_cmt_priv, ced);
 567}
 568
 569static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
 570{
 571        struct clock_event_device *ced = &p->ced;
 572
 573        sh_cmt_start(p, FLAG_CLOCKEVENT);
 574
 575        /* TODO: calculate good shift from rate and counter bit width */
 576
 577        ced->shift = 32;
 578        ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
 579        ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
 580        ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
 581
 582        if (periodic)
 583                sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
 584        else
 585                sh_cmt_set_next(p, p->max_match_value);
 586}
 587
 588static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
 589                                    struct clock_event_device *ced)
 590{
 591        struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
 592
 593        /* deal with old setting first */
 594        switch (ced->mode) {
 595        case CLOCK_EVT_MODE_PERIODIC:
 596        case CLOCK_EVT_MODE_ONESHOT:
 597                sh_cmt_stop(p, FLAG_CLOCKEVENT);
 598                break;
 599        default:
 600                break;
 601        }
 602
 603        switch (mode) {
 604        case CLOCK_EVT_MODE_PERIODIC:
 605                dev_info(&p->pdev->dev, "used for periodic clock events\n");
 606                sh_cmt_clock_event_start(p, 1);
 607                break;
 608        case CLOCK_EVT_MODE_ONESHOT:
 609                dev_info(&p->pdev->dev, "used for oneshot clock events\n");
 610                sh_cmt_clock_event_start(p, 0);
 611                break;
 612        case CLOCK_EVT_MODE_SHUTDOWN:
 613        case CLOCK_EVT_MODE_UNUSED:
 614                sh_cmt_stop(p, FLAG_CLOCKEVENT);
 615                break;
 616        default:
 617                break;
 618        }
 619}
 620
 621static int sh_cmt_clock_event_next(unsigned long delta,
 622                                   struct clock_event_device *ced)
 623{
 624        struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
 625
 626        BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
 627        if (likely(p->flags & FLAG_IRQCONTEXT))
 628                p->next_match_value = delta - 1;
 629        else
 630                sh_cmt_set_next(p, delta - 1);
 631
 632        return 0;
 633}
 634
 635static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
 636{
 637        pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
 638}
 639
 640static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
 641{
 642        pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
 643}
 644
 645static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
 646                                       char *name, unsigned long rating)
 647{
 648        struct clock_event_device *ced = &p->ced;
 649
 650        memset(ced, 0, sizeof(*ced));
 651
 652        ced->name = name;
 653        ced->features = CLOCK_EVT_FEAT_PERIODIC;
 654        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
 655        ced->rating = rating;
 656        ced->cpumask = cpumask_of(0);
 657        ced->set_next_event = sh_cmt_clock_event_next;
 658        ced->set_mode = sh_cmt_clock_event_mode;
 659        ced->suspend = sh_cmt_clock_event_suspend;
 660        ced->resume = sh_cmt_clock_event_resume;
 661
 662        dev_info(&p->pdev->dev, "used for clock events\n");
 663        clockevents_register_device(ced);
 664}
 665
 666static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
 667                           unsigned long clockevent_rating,
 668                           unsigned long clocksource_rating)
 669{
 670        if (clockevent_rating)
 671                sh_cmt_register_clockevent(p, name, clockevent_rating);
 672
 673        if (clocksource_rating)
 674                sh_cmt_register_clocksource(p, name, clocksource_rating);
 675
 676        return 0;
 677}
 678
 679static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
 680{
 681        struct sh_timer_config *cfg = pdev->dev.platform_data;
 682        struct resource *res, *res2;
 683        int irq, ret;
 684        ret = -ENXIO;
 685
 686        memset(p, 0, sizeof(*p));
 687        p->pdev = pdev;
 688
 689        if (!cfg) {
 690                dev_err(&p->pdev->dev, "missing platform data\n");
 691                goto err0;
 692        }
 693
 694        res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
 695        if (!res) {
 696                dev_err(&p->pdev->dev, "failed to get I/O memory\n");
 697                goto err0;
 698        }
 699
 700        /* optional resource for the shared timer start/stop register */
 701        res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
 702
 703        irq = platform_get_irq(p->pdev, 0);
 704        if (irq < 0) {
 705                dev_err(&p->pdev->dev, "failed to get irq\n");
 706                goto err0;
 707        }
 708
 709        /* map memory, let mapbase point to our channel */
 710        p->mapbase = ioremap_nocache(res->start, resource_size(res));
 711        if (p->mapbase == NULL) {
 712                dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
 713                goto err0;
 714        }
 715
 716        /* map second resource for CMSTR */
 717        p->mapbase_str = ioremap_nocache(res2 ? res2->start :
 718                                         res->start - cfg->channel_offset,
 719                                         res2 ? resource_size(res2) : 2);
 720        if (p->mapbase_str == NULL) {
 721                dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
 722                goto err1;
 723        }
 724
 725        /* request irq using setup_irq() (too early for request_irq()) */
 726        p->irqaction.name = dev_name(&p->pdev->dev);
 727        p->irqaction.handler = sh_cmt_interrupt;
 728        p->irqaction.dev_id = p;
 729        p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 730                             IRQF_IRQPOLL  | IRQF_NOBALANCING;
 731
 732        /* get hold of clock */
 733        p->clk = clk_get(&p->pdev->dev, "cmt_fck");
 734        if (IS_ERR(p->clk)) {
 735                dev_err(&p->pdev->dev, "cannot get clock\n");
 736                ret = PTR_ERR(p->clk);
 737                goto err2;
 738        }
 739
 740        if (res2 && (resource_size(res2) == 4)) {
 741                /* assume both CMSTR and CMCSR to be 32-bit */
 742                p->read_control = sh_cmt_read32;
 743                p->write_control = sh_cmt_write32;
 744        } else {
 745                p->read_control = sh_cmt_read16;
 746                p->write_control = sh_cmt_write16;
 747        }
 748
 749        if (resource_size(res) == 6) {
 750                p->width = 16;
 751                p->read_count = sh_cmt_read16;
 752                p->write_count = sh_cmt_write16;
 753                p->overflow_bit = 0x80;
 754                p->clear_bits = ~0x80;
 755        } else {
 756                p->width = 32;
 757                p->read_count = sh_cmt_read32;
 758                p->write_count = sh_cmt_write32;
 759                p->overflow_bit = 0x8000;
 760                p->clear_bits = ~0xc000;
 761        }
 762
 763        if (p->width == (sizeof(p->max_match_value) * 8))
 764                p->max_match_value = ~0;
 765        else
 766                p->max_match_value = (1 << p->width) - 1;
 767
 768        p->match_value = p->max_match_value;
 769        raw_spin_lock_init(&p->lock);
 770
 771        ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
 772                              cfg->clockevent_rating,
 773                              cfg->clocksource_rating);
 774        if (ret) {
 775                dev_err(&p->pdev->dev, "registration failed\n");
 776                goto err3;
 777        }
 778        p->cs_enabled = false;
 779
 780        ret = setup_irq(irq, &p->irqaction);
 781        if (ret) {
 782                dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
 783                goto err3;
 784        }
 785
 786        platform_set_drvdata(pdev, p);
 787
 788        return 0;
 789err3:
 790        clk_put(p->clk);
 791err2:
 792        iounmap(p->mapbase_str);
 793err1:
 794        iounmap(p->mapbase);
 795err0:
 796        return ret;
 797}
 798
 799static int sh_cmt_probe(struct platform_device *pdev)
 800{
 801        struct sh_cmt_priv *p = platform_get_drvdata(pdev);
 802        struct sh_timer_config *cfg = pdev->dev.platform_data;
 803        int ret;
 804
 805        if (!is_early_platform_device(pdev)) {
 806                pm_runtime_set_active(&pdev->dev);
 807                pm_runtime_enable(&pdev->dev);
 808        }
 809
 810        if (p) {
 811                dev_info(&pdev->dev, "kept as earlytimer\n");
 812                goto out;
 813        }
 814
 815        p = kmalloc(sizeof(*p), GFP_KERNEL);
 816        if (p == NULL) {
 817                dev_err(&pdev->dev, "failed to allocate driver data\n");
 818                return -ENOMEM;
 819        }
 820
 821        ret = sh_cmt_setup(p, pdev);
 822        if (ret) {
 823                kfree(p);
 824                pm_runtime_idle(&pdev->dev);
 825                return ret;
 826        }
 827        if (is_early_platform_device(pdev))
 828                return 0;
 829
 830 out:
 831        if (cfg->clockevent_rating || cfg->clocksource_rating)
 832                pm_runtime_irq_safe(&pdev->dev);
 833        else
 834                pm_runtime_idle(&pdev->dev);
 835
 836        return 0;
 837}
 838
 839static int sh_cmt_remove(struct platform_device *pdev)
 840{
 841        return -EBUSY; /* cannot unregister clockevent and clocksource */
 842}
 843
 844static struct platform_driver sh_cmt_device_driver = {
 845        .probe          = sh_cmt_probe,
 846        .remove         = sh_cmt_remove,
 847        .driver         = {
 848                .name   = "sh_cmt",
 849        }
 850};
 851
 852static int __init sh_cmt_init(void)
 853{
 854        return platform_driver_register(&sh_cmt_device_driver);
 855}
 856
 857static void __exit sh_cmt_exit(void)
 858{
 859        platform_driver_unregister(&sh_cmt_device_driver);
 860}
 861
 862early_platform_init("earlytimer", &sh_cmt_device_driver);
 863subsys_initcall(sh_cmt_init);
 864module_exit(sh_cmt_exit);
 865
 866MODULE_AUTHOR("Magnus Damm");
 867MODULE_DESCRIPTION("SuperH CMT Timer Driver");
 868MODULE_LICENSE("GPL v2");
 869
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.