linux/drivers/clocksource/sh_tmu.c
<<
>>
Prefs
   1/*
   2 * SuperH Timer Support - TMU
   3 *
   4 *  Copyright (C) 2009 Magnus Damm
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/platform_device.h>
  22#include <linux/spinlock.h>
  23#include <linux/interrupt.h>
  24#include <linux/ioport.h>
  25#include <linux/delay.h>
  26#include <linux/io.h>
  27#include <linux/clk.h>
  28#include <linux/irq.h>
  29#include <linux/err.h>
  30#include <linux/clocksource.h>
  31#include <linux/clockchips.h>
  32#include <linux/sh_timer.h>
  33#include <linux/slab.h>
  34#include <linux/module.h>
  35#include <linux/pm_domain.h>
  36#include <linux/pm_runtime.h>
  37
  38struct sh_tmu_priv {
  39        void __iomem *mapbase;
  40        struct clk *clk;
  41        struct irqaction irqaction;
  42        struct platform_device *pdev;
  43        unsigned long rate;
  44        unsigned long periodic;
  45        struct clock_event_device ced;
  46        struct clocksource cs;
  47        bool cs_enabled;
  48        unsigned int enable_count;
  49};
  50
  51static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
  52
  53#define TSTR -1 /* shared register */
  54#define TCOR  0 /* channel register */
  55#define TCNT 1 /* channel register */
  56#define TCR 2 /* channel register */
  57
  58static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
  59{
  60        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  61        void __iomem *base = p->mapbase;
  62        unsigned long offs;
  63
  64        if (reg_nr == TSTR)
  65                return ioread8(base - cfg->channel_offset);
  66
  67        offs = reg_nr << 2;
  68
  69        if (reg_nr == TCR)
  70                return ioread16(base + offs);
  71        else
  72                return ioread32(base + offs);
  73}
  74
  75static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
  76                                unsigned long value)
  77{
  78        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  79        void __iomem *base = p->mapbase;
  80        unsigned long offs;
  81
  82        if (reg_nr == TSTR) {
  83                iowrite8(value, base - cfg->channel_offset);
  84                return;
  85        }
  86
  87        offs = reg_nr << 2;
  88
  89        if (reg_nr == TCR)
  90                iowrite16(value, base + offs);
  91        else
  92                iowrite32(value, base + offs);
  93}
  94
  95static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
  96{
  97        struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  98        unsigned long flags, value;
  99
 100        /* start stop register shared by multiple timer channels */
 101        raw_spin_lock_irqsave(&sh_tmu_lock, flags);
 102        value = sh_tmu_read(p, TSTR);
 103
 104        if (start)
 105                value |= 1 << cfg->timer_bit;
 106        else
 107                value &= ~(1 << cfg->timer_bit);
 108
 109        sh_tmu_write(p, TSTR, value);
 110        raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 111}
 112
 113static int __sh_tmu_enable(struct sh_tmu_priv *p)
 114{
 115        int ret;
 116
 117        /* enable clock */
 118        ret = clk_enable(p->clk);
 119        if (ret) {
 120                dev_err(&p->pdev->dev, "cannot enable clock\n");
 121                return ret;
 122        }
 123
 124        /* make sure channel is disabled */
 125        sh_tmu_start_stop_ch(p, 0);
 126
 127        /* maximum timeout */
 128        sh_tmu_write(p, TCOR, 0xffffffff);
 129        sh_tmu_write(p, TCNT, 0xffffffff);
 130
 131        /* configure channel to parent clock / 4, irq off */
 132        p->rate = clk_get_rate(p->clk) / 4;
 133        sh_tmu_write(p, TCR, 0x0000);
 134
 135        /* enable channel */
 136        sh_tmu_start_stop_ch(p, 1);
 137
 138        return 0;
 139}
 140
 141static int sh_tmu_enable(struct sh_tmu_priv *p)
 142{
 143        if (p->enable_count++ > 0)
 144                return 0;
 145
 146        pm_runtime_get_sync(&p->pdev->dev);
 147        dev_pm_syscore_device(&p->pdev->dev, true);
 148
 149        return __sh_tmu_enable(p);
 150}
 151
 152static void __sh_tmu_disable(struct sh_tmu_priv *p)
 153{
 154        /* disable channel */
 155        sh_tmu_start_stop_ch(p, 0);
 156
 157        /* disable interrupts in TMU block */
 158        sh_tmu_write(p, TCR, 0x0000);
 159
 160        /* stop clock */
 161        clk_disable(p->clk);
 162}
 163
 164static void sh_tmu_disable(struct sh_tmu_priv *p)
 165{
 166        if (WARN_ON(p->enable_count == 0))
 167                return;
 168
 169        if (--p->enable_count > 0)
 170                return;
 171
 172        __sh_tmu_disable(p);
 173
 174        dev_pm_syscore_device(&p->pdev->dev, false);
 175        pm_runtime_put(&p->pdev->dev);
 176}
 177
 178static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
 179                            int periodic)
 180{
 181        /* stop timer */
 182        sh_tmu_start_stop_ch(p, 0);
 183
 184        /* acknowledge interrupt */
 185        sh_tmu_read(p, TCR);
 186
 187        /* enable interrupt */
 188        sh_tmu_write(p, TCR, 0x0020);
 189
 190        /* reload delta value in case of periodic timer */
 191        if (periodic)
 192                sh_tmu_write(p, TCOR, delta);
 193        else
 194                sh_tmu_write(p, TCOR, 0xffffffff);
 195
 196        sh_tmu_write(p, TCNT, delta);
 197
 198        /* start timer */
 199        sh_tmu_start_stop_ch(p, 1);
 200}
 201
 202static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
 203{
 204        struct sh_tmu_priv *p = dev_id;
 205
 206        /* disable or acknowledge interrupt */
 207        if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
 208                sh_tmu_write(p, TCR, 0x0000);
 209        else
 210                sh_tmu_write(p, TCR, 0x0020);
 211
 212        /* notify clockevent layer */
 213        p->ced.event_handler(&p->ced);
 214        return IRQ_HANDLED;
 215}
 216
 217static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
 218{
 219        return container_of(cs, struct sh_tmu_priv, cs);
 220}
 221
 222static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
 223{
 224        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
 225
 226        return sh_tmu_read(p, TCNT) ^ 0xffffffff;
 227}
 228
 229static int sh_tmu_clocksource_enable(struct clocksource *cs)
 230{
 231        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
 232        int ret;
 233
 234        if (WARN_ON(p->cs_enabled))
 235                return 0;
 236
 237        ret = sh_tmu_enable(p);
 238        if (!ret) {
 239                __clocksource_updatefreq_hz(cs, p->rate);
 240                p->cs_enabled = true;
 241        }
 242
 243        return ret;
 244}
 245
 246static void sh_tmu_clocksource_disable(struct clocksource *cs)
 247{
 248        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
 249
 250        if (WARN_ON(!p->cs_enabled))
 251                return;
 252
 253        sh_tmu_disable(p);
 254        p->cs_enabled = false;
 255}
 256
 257static void sh_tmu_clocksource_suspend(struct clocksource *cs)
 258{
 259        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
 260
 261        if (!p->cs_enabled)
 262                return;
 263
 264        if (--p->enable_count == 0) {
 265                __sh_tmu_disable(p);
 266                pm_genpd_syscore_poweroff(&p->pdev->dev);
 267        }
 268}
 269
 270static void sh_tmu_clocksource_resume(struct clocksource *cs)
 271{
 272        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
 273
 274        if (!p->cs_enabled)
 275                return;
 276
 277        if (p->enable_count++ == 0) {
 278                pm_genpd_syscore_poweron(&p->pdev->dev);
 279                __sh_tmu_enable(p);
 280        }
 281}
 282
 283static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
 284                                       char *name, unsigned long rating)
 285{
 286        struct clocksource *cs = &p->cs;
 287
 288        cs->name = name;
 289        cs->rating = rating;
 290        cs->read = sh_tmu_clocksource_read;
 291        cs->enable = sh_tmu_clocksource_enable;
 292        cs->disable = sh_tmu_clocksource_disable;
 293        cs->suspend = sh_tmu_clocksource_suspend;
 294        cs->resume = sh_tmu_clocksource_resume;
 295        cs->mask = CLOCKSOURCE_MASK(32);
 296        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 297
 298        dev_info(&p->pdev->dev, "used as clock source\n");
 299
 300        /* Register with dummy 1 Hz value, gets updated in ->enable() */
 301        clocksource_register_hz(cs, 1);
 302        return 0;
 303}
 304
 305static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
 306{
 307        return container_of(ced, struct sh_tmu_priv, ced);
 308}
 309
 310static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
 311{
 312        struct clock_event_device *ced = &p->ced;
 313
 314        sh_tmu_enable(p);
 315
 316        clockevents_config(ced, p->rate);
 317
 318        if (periodic) {
 319                p->periodic = (p->rate + HZ/2) / HZ;
 320                sh_tmu_set_next(p, p->periodic, 1);
 321        }
 322}
 323
 324static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
 325                                    struct clock_event_device *ced)
 326{
 327        struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
 328        int disabled = 0;
 329
 330        /* deal with old setting first */
 331        switch (ced->mode) {
 332        case CLOCK_EVT_MODE_PERIODIC:
 333        case CLOCK_EVT_MODE_ONESHOT:
 334                sh_tmu_disable(p);
 335                disabled = 1;
 336                break;
 337        default:
 338                break;
 339        }
 340
 341        switch (mode) {
 342        case CLOCK_EVT_MODE_PERIODIC:
 343                dev_info(&p->pdev->dev, "used for periodic clock events\n");
 344                sh_tmu_clock_event_start(p, 1);
 345                break;
 346        case CLOCK_EVT_MODE_ONESHOT:
 347                dev_info(&p->pdev->dev, "used for oneshot clock events\n");
 348                sh_tmu_clock_event_start(p, 0);
 349                break;
 350        case CLOCK_EVT_MODE_UNUSED:
 351                if (!disabled)
 352                        sh_tmu_disable(p);
 353                break;
 354        case CLOCK_EVT_MODE_SHUTDOWN:
 355        default:
 356                break;
 357        }
 358}
 359
 360static int sh_tmu_clock_event_next(unsigned long delta,
 361                                   struct clock_event_device *ced)
 362{
 363        struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
 364
 365        BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
 366
 367        /* program new delta value */
 368        sh_tmu_set_next(p, delta, 0);
 369        return 0;
 370}
 371
 372static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
 373{
 374        pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
 375}
 376
 377static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
 378{
 379        pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
 380}
 381
 382static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
 383                                       char *name, unsigned long rating)
 384{
 385        struct clock_event_device *ced = &p->ced;
 386        int ret;
 387
 388        memset(ced, 0, sizeof(*ced));
 389
 390        ced->name = name;
 391        ced->features = CLOCK_EVT_FEAT_PERIODIC;
 392        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
 393        ced->rating = rating;
 394        ced->cpumask = cpumask_of(0);
 395        ced->set_next_event = sh_tmu_clock_event_next;
 396        ced->set_mode = sh_tmu_clock_event_mode;
 397        ced->suspend = sh_tmu_clock_event_suspend;
 398        ced->resume = sh_tmu_clock_event_resume;
 399
 400        dev_info(&p->pdev->dev, "used for clock events\n");
 401
 402        clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
 403
 404        ret = setup_irq(p->irqaction.irq, &p->irqaction);
 405        if (ret) {
 406                dev_err(&p->pdev->dev, "failed to request irq %d\n",
 407                        p->irqaction.irq);
 408                return;
 409        }
 410}
 411
 412static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
 413                    unsigned long clockevent_rating,
 414                    unsigned long clocksource_rating)
 415{
 416        if (clockevent_rating)
 417                sh_tmu_register_clockevent(p, name, clockevent_rating);
 418        else if (clocksource_rating)
 419                sh_tmu_register_clocksource(p, name, clocksource_rating);
 420
 421        return 0;
 422}
 423
 424static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
 425{
 426        struct sh_timer_config *cfg = pdev->dev.platform_data;
 427        struct resource *res;
 428        int irq, ret;
 429        ret = -ENXIO;
 430
 431        memset(p, 0, sizeof(*p));
 432        p->pdev = pdev;
 433
 434        if (!cfg) {
 435                dev_err(&p->pdev->dev, "missing platform data\n");
 436                goto err0;
 437        }
 438
 439        platform_set_drvdata(pdev, p);
 440
 441        res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
 442        if (!res) {
 443                dev_err(&p->pdev->dev, "failed to get I/O memory\n");
 444                goto err0;
 445        }
 446
 447        irq = platform_get_irq(p->pdev, 0);
 448        if (irq < 0) {
 449                dev_err(&p->pdev->dev, "failed to get irq\n");
 450                goto err0;
 451        }
 452
 453        /* map memory, let mapbase point to our channel */
 454        p->mapbase = ioremap_nocache(res->start, resource_size(res));
 455        if (p->mapbase == NULL) {
 456                dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
 457                goto err0;
 458        }
 459
 460        /* setup data for setup_irq() (too early for request_irq()) */
 461        p->irqaction.name = dev_name(&p->pdev->dev);
 462        p->irqaction.handler = sh_tmu_interrupt;
 463        p->irqaction.dev_id = p;
 464        p->irqaction.irq = irq;
 465        p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 466                             IRQF_IRQPOLL  | IRQF_NOBALANCING;
 467
 468        /* get hold of clock */
 469        p->clk = clk_get(&p->pdev->dev, "tmu_fck");
 470        if (IS_ERR(p->clk)) {
 471                dev_err(&p->pdev->dev, "cannot get clock\n");
 472                ret = PTR_ERR(p->clk);
 473                goto err1;
 474        }
 475        p->cs_enabled = false;
 476        p->enable_count = 0;
 477
 478        return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
 479                               cfg->clockevent_rating,
 480                               cfg->clocksource_rating);
 481 err1:
 482        iounmap(p->mapbase);
 483 err0:
 484        return ret;
 485}
 486
 487static int sh_tmu_probe(struct platform_device *pdev)
 488{
 489        struct sh_tmu_priv *p = platform_get_drvdata(pdev);
 490        struct sh_timer_config *cfg = pdev->dev.platform_data;
 491        int ret;
 492
 493        if (!is_early_platform_device(pdev)) {
 494                pm_runtime_set_active(&pdev->dev);
 495                pm_runtime_enable(&pdev->dev);
 496        }
 497
 498        if (p) {
 499                dev_info(&pdev->dev, "kept as earlytimer\n");
 500                goto out;
 501        }
 502
 503        p = kmalloc(sizeof(*p), GFP_KERNEL);
 504        if (p == NULL) {
 505                dev_err(&pdev->dev, "failed to allocate driver data\n");
 506                return -ENOMEM;
 507        }
 508
 509        ret = sh_tmu_setup(p, pdev);
 510        if (ret) {
 511                kfree(p);
 512                platform_set_drvdata(pdev, NULL);
 513                pm_runtime_idle(&pdev->dev);
 514                return ret;
 515        }
 516        if (is_early_platform_device(pdev))
 517                return 0;
 518
 519 out:
 520        if (cfg->clockevent_rating || cfg->clocksource_rating)
 521                pm_runtime_irq_safe(&pdev->dev);
 522        else
 523                pm_runtime_idle(&pdev->dev);
 524
 525        return 0;
 526}
 527
 528static int sh_tmu_remove(struct platform_device *pdev)
 529{
 530        return -EBUSY; /* cannot unregister clockevent and clocksource */
 531}
 532
 533static struct platform_driver sh_tmu_device_driver = {
 534        .probe          = sh_tmu_probe,
 535        .remove         = sh_tmu_remove,
 536        .driver         = {
 537                .name   = "sh_tmu",
 538        }
 539};
 540
 541static int __init sh_tmu_init(void)
 542{
 543        return platform_driver_register(&sh_tmu_device_driver);
 544}
 545
 546static void __exit sh_tmu_exit(void)
 547{
 548        platform_driver_unregister(&sh_tmu_device_driver);
 549}
 550
 551early_platform_init("earlytimer", &sh_tmu_device_driver);
 552subsys_initcall(sh_tmu_init);
 553module_exit(sh_tmu_exit);
 554
 555MODULE_AUTHOR("Magnus Damm");
 556MODULE_DESCRIPTION("SuperH TMU Timer Driver");
 557MODULE_LICENSE("GPL v2");
 558
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.