linux/drivers/clocksource/arm_arch_timer.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/clocksource/arm_arch_timer.c
   3 *
   4 *  Copyright (C) 2011 ARM Ltd.
   5 *  All Rights Reserved
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/device.h>
  14#include <linux/smp.h>
  15#include <linux/cpu.h>
  16#include <linux/clockchips.h>
  17#include <linux/interrupt.h>
  18#include <linux/of_irq.h>
  19#include <linux/of_address.h>
  20#include <linux/io.h>
  21#include <linux/slab.h>
  22
  23#include <asm/arch_timer.h>
  24#include <asm/virt.h>
  25
  26#include <clocksource/arm_arch_timer.h>
  27
  28#define CNTTIDR         0x08
  29#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  30
  31#define CNTVCT_LO       0x08
  32#define CNTVCT_HI       0x0c
  33#define CNTFRQ          0x10
  34#define CNTP_TVAL       0x28
  35#define CNTP_CTL        0x2c
  36#define CNTV_TVAL       0x38
  37#define CNTV_CTL        0x3c
  38
  39#define ARCH_CP15_TIMER BIT(0)
  40#define ARCH_MEM_TIMER  BIT(1)
  41static unsigned arch_timers_present __initdata;
  42
  43static void __iomem *arch_counter_base;
  44
  45struct arch_timer {
  46        void __iomem *base;
  47        struct clock_event_device evt;
  48};
  49
  50#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  51
  52static u32 arch_timer_rate;
  53
  54enum ppi_nr {
  55        PHYS_SECURE_PPI,
  56        PHYS_NONSECURE_PPI,
  57        VIRT_PPI,
  58        HYP_PPI,
  59        MAX_TIMER_PPI
  60};
  61
  62static int arch_timer_ppi[MAX_TIMER_PPI];
  63
  64static struct clock_event_device __percpu *arch_timer_evt;
  65
  66static bool arch_timer_use_virtual = true;
  67static bool arch_timer_mem_use_virtual;
  68
  69/*
  70 * Architected system timer support.
  71 */
  72
  73static __always_inline
  74void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
  75                          struct clock_event_device *clk)
  76{
  77        if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  78                struct arch_timer *timer = to_arch_timer(clk);
  79                switch (reg) {
  80                case ARCH_TIMER_REG_CTRL:
  81                        writel_relaxed(val, timer->base + CNTP_CTL);
  82                        break;
  83                case ARCH_TIMER_REG_TVAL:
  84                        writel_relaxed(val, timer->base + CNTP_TVAL);
  85                        break;
  86                }
  87        } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  88                struct arch_timer *timer = to_arch_timer(clk);
  89                switch (reg) {
  90                case ARCH_TIMER_REG_CTRL:
  91                        writel_relaxed(val, timer->base + CNTV_CTL);
  92                        break;
  93                case ARCH_TIMER_REG_TVAL:
  94                        writel_relaxed(val, timer->base + CNTV_TVAL);
  95                        break;
  96                }
  97        } else {
  98                arch_timer_reg_write_cp15(access, reg, val);
  99        }
 100}
 101
 102static __always_inline
 103u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
 104                        struct clock_event_device *clk)
 105{
 106        u32 val;
 107
 108        if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
 109                struct arch_timer *timer = to_arch_timer(clk);
 110                switch (reg) {
 111                case ARCH_TIMER_REG_CTRL:
 112                        val = readl_relaxed(timer->base + CNTP_CTL);
 113                        break;
 114                case ARCH_TIMER_REG_TVAL:
 115                        val = readl_relaxed(timer->base + CNTP_TVAL);
 116                        break;
 117                }
 118        } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
 119                struct arch_timer *timer = to_arch_timer(clk);
 120                switch (reg) {
 121                case ARCH_TIMER_REG_CTRL:
 122                        val = readl_relaxed(timer->base + CNTV_CTL);
 123                        break;
 124                case ARCH_TIMER_REG_TVAL:
 125                        val = readl_relaxed(timer->base + CNTV_TVAL);
 126                        break;
 127                }
 128        } else {
 129                val = arch_timer_reg_read_cp15(access, reg);
 130        }
 131
 132        return val;
 133}
 134
 135static __always_inline irqreturn_t timer_handler(const int access,
 136                                        struct clock_event_device *evt)
 137{
 138        unsigned long ctrl;
 139
 140        ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
 141        if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
 142                ctrl |= ARCH_TIMER_CTRL_IT_MASK;
 143                arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
 144                evt->event_handler(evt);
 145                return IRQ_HANDLED;
 146        }
 147
 148        return IRQ_NONE;
 149}
 150
 151static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
 152{
 153        struct clock_event_device *evt = dev_id;
 154
 155        return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
 156}
 157
 158static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
 159{
 160        struct clock_event_device *evt = dev_id;
 161
 162        return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
 163}
 164
 165static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
 166{
 167        struct clock_event_device *evt = dev_id;
 168
 169        return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
 170}
 171
 172static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
 173{
 174        struct clock_event_device *evt = dev_id;
 175
 176        return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
 177}
 178
 179static __always_inline void timer_set_mode(const int access, int mode,
 180                                  struct clock_event_device *clk)
 181{
 182        unsigned long ctrl;
 183        switch (mode) {
 184        case CLOCK_EVT_MODE_UNUSED:
 185        case CLOCK_EVT_MODE_SHUTDOWN:
 186                ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
 187                ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
 188                arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
 189                break;
 190        default:
 191                break;
 192        }
 193}
 194
 195static void arch_timer_set_mode_virt(enum clock_event_mode mode,
 196                                     struct clock_event_device *clk)
 197{
 198        timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
 199}
 200
 201static void arch_timer_set_mode_phys(enum clock_event_mode mode,
 202                                     struct clock_event_device *clk)
 203{
 204        timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
 205}
 206
 207static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
 208                                         struct clock_event_device *clk)
 209{
 210        timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
 211}
 212
 213static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
 214                                         struct clock_event_device *clk)
 215{
 216        timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
 217}
 218
 219static __always_inline void set_next_event(const int access, unsigned long evt,
 220                                           struct clock_event_device *clk)
 221{
 222        unsigned long ctrl;
 223        ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
 224        ctrl |= ARCH_TIMER_CTRL_ENABLE;
 225        ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
 226        arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
 227        arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
 228}
 229
 230static int arch_timer_set_next_event_virt(unsigned long evt,
 231                                          struct clock_event_device *clk)
 232{
 233        set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
 234        return 0;
 235}
 236
 237static int arch_timer_set_next_event_phys(unsigned long evt,
 238                                          struct clock_event_device *clk)
 239{
 240        set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
 241        return 0;
 242}
 243
 244static int arch_timer_set_next_event_virt_mem(unsigned long evt,
 245                                              struct clock_event_device *clk)
 246{
 247        set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
 248        return 0;
 249}
 250
 251static int arch_timer_set_next_event_phys_mem(unsigned long evt,
 252                                              struct clock_event_device *clk)
 253{
 254        set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
 255        return 0;
 256}
 257
 258static void __arch_timer_setup(unsigned type,
 259                               struct clock_event_device *clk)
 260{
 261        clk->features = CLOCK_EVT_FEAT_ONESHOT;
 262
 263        if (type == ARCH_CP15_TIMER) {
 264                clk->features |= CLOCK_EVT_FEAT_C3STOP;
 265                clk->name = "arch_sys_timer";
 266                clk->rating = 450;
 267                clk->cpumask = cpumask_of(smp_processor_id());
 268                if (arch_timer_use_virtual) {
 269                        clk->irq = arch_timer_ppi[VIRT_PPI];
 270                        clk->set_mode = arch_timer_set_mode_virt;
 271                        clk->set_next_event = arch_timer_set_next_event_virt;
 272                } else {
 273                        clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
 274                        clk->set_mode = arch_timer_set_mode_phys;
 275                        clk->set_next_event = arch_timer_set_next_event_phys;
 276                }
 277        } else {
 278                clk->name = "arch_mem_timer";
 279                clk->rating = 400;
 280                clk->cpumask = cpu_all_mask;
 281                if (arch_timer_mem_use_virtual) {
 282                        clk->set_mode = arch_timer_set_mode_virt_mem;
 283                        clk->set_next_event =
 284                                arch_timer_set_next_event_virt_mem;
 285                } else {
 286                        clk->set_mode = arch_timer_set_mode_phys_mem;
 287                        clk->set_next_event =
 288                                arch_timer_set_next_event_phys_mem;
 289                }
 290        }
 291
 292        clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
 293
 294        clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
 295}
 296
 297static int arch_timer_setup(struct clock_event_device *clk)
 298{
 299        __arch_timer_setup(ARCH_CP15_TIMER, clk);
 300
 301        if (arch_timer_use_virtual)
 302                enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
 303        else {
 304                enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
 305                if (arch_timer_ppi[PHYS_NONSECURE_PPI])
 306                        enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
 307        }
 308
 309        arch_counter_set_user_access();
 310
 311        return 0;
 312}
 313
 314static void
 315arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
 316{
 317        /* Who has more than one independent system counter? */
 318        if (arch_timer_rate)
 319                return;
 320
 321        /* Try to determine the frequency from the device tree or CNTFRQ */
 322        if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
 323                if (cntbase)
 324                        arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
 325                else
 326                        arch_timer_rate = arch_timer_get_cntfrq();
 327        }
 328
 329        /* Check the timer frequency. */
 330        if (arch_timer_rate == 0)
 331                pr_warn("Architected timer frequency not available\n");
 332}
 333
 334static void arch_timer_banner(unsigned type)
 335{
 336        pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
 337                     type & ARCH_CP15_TIMER ? "cp15" : "",
 338                     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "",
 339                     type & ARCH_MEM_TIMER ? "mmio" : "",
 340                     (unsigned long)arch_timer_rate / 1000000,
 341                     (unsigned long)(arch_timer_rate / 10000) % 100,
 342                     type & ARCH_CP15_TIMER ?
 343                        arch_timer_use_virtual ? "virt" : "phys" :
 344                        "",
 345                     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
 346                     type & ARCH_MEM_TIMER ?
 347                        arch_timer_mem_use_virtual ? "virt" : "phys" :
 348                        "");
 349}
 350
 351u32 arch_timer_get_rate(void)
 352{
 353        return arch_timer_rate;
 354}
 355
 356static u64 arch_counter_get_cntvct_mem(void)
 357{
 358        u32 vct_lo, vct_hi, tmp_hi;
 359
 360        do {
 361                vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
 362                vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
 363                tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
 364        } while (vct_hi != tmp_hi);
 365
 366        return ((u64) vct_hi << 32) | vct_lo;
 367}
 368
 369/*
 370 * Default to cp15 based access because arm64 uses this function for
 371 * sched_clock() before DT is probed and the cp15 method is guaranteed
 372 * to exist on arm64. arm doesn't use this before DT is probed so even
 373 * if we don't have the cp15 accessors we won't have a problem.
 374 */
 375u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
 376
 377static cycle_t arch_counter_read(struct clocksource *cs)
 378{
 379        return arch_timer_read_counter();
 380}
 381
 382static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
 383{
 384        return arch_timer_read_counter();
 385}
 386
 387static struct clocksource clocksource_counter = {
 388        .name   = "arch_sys_counter",
 389        .rating = 400,
 390        .read   = arch_counter_read,
 391        .mask   = CLOCKSOURCE_MASK(56),
 392        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 393};
 394
 395static struct cyclecounter cyclecounter = {
 396        .read   = arch_counter_read_cc,
 397        .mask   = CLOCKSOURCE_MASK(56),
 398};
 399
 400static struct timecounter timecounter;
 401
 402struct timecounter *arch_timer_get_timecounter(void)
 403{
 404        return &timecounter;
 405}
 406
 407static void __init arch_counter_register(unsigned type)
 408{
 409        u64 start_count;
 410
 411        /* Register the CP15 based counter if we have one */
 412        if (type & ARCH_CP15_TIMER)
 413                arch_timer_read_counter = arch_counter_get_cntvct;
 414        else
 415                arch_timer_read_counter = arch_counter_get_cntvct_mem;
 416
 417        start_count = arch_timer_read_counter();
 418        clocksource_register_hz(&clocksource_counter, arch_timer_rate);
 419        cyclecounter.mult = clocksource_counter.mult;
 420        cyclecounter.shift = clocksource_counter.shift;
 421        timecounter_init(&timecounter, &cyclecounter, start_count);
 422}
 423
 424static void arch_timer_stop(struct clock_event_device *clk)
 425{
 426        pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
 427                 clk->irq, smp_processor_id());
 428
 429        if (arch_timer_use_virtual)
 430                disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
 431        else {
 432                disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
 433                if (arch_timer_ppi[PHYS_NONSECURE_PPI])
 434                        disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
 435        }
 436
 437        clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
 438}
 439
 440static int arch_timer_cpu_notify(struct notifier_block *self,
 441                                           unsigned long action, void *hcpu)
 442{
 443        /*
 444         * Grab cpu pointer in each case to avoid spurious
 445         * preemptible warnings
 446         */
 447        switch (action & ~CPU_TASKS_FROZEN) {
 448        case CPU_STARTING:
 449                arch_timer_setup(this_cpu_ptr(arch_timer_evt));
 450                break;
 451        case CPU_DYING:
 452                arch_timer_stop(this_cpu_ptr(arch_timer_evt));
 453                break;
 454        }
 455
 456        return NOTIFY_OK;
 457}
 458
 459static struct notifier_block arch_timer_cpu_nb = {
 460        .notifier_call = arch_timer_cpu_notify,
 461};
 462
 463static int __init arch_timer_register(void)
 464{
 465        int err;
 466        int ppi;
 467
 468        arch_timer_evt = alloc_percpu(struct clock_event_device);
 469        if (!arch_timer_evt) {
 470                err = -ENOMEM;
 471                goto out;
 472        }
 473
 474        if (arch_timer_use_virtual) {
 475                ppi = arch_timer_ppi[VIRT_PPI];
 476                err = request_percpu_irq(ppi, arch_timer_handler_virt,
 477                                         "arch_timer", arch_timer_evt);
 478        } else {
 479                ppi = arch_timer_ppi[PHYS_SECURE_PPI];
 480                err = request_percpu_irq(ppi, arch_timer_handler_phys,
 481                                         "arch_timer", arch_timer_evt);
 482                if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
 483                        ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
 484                        err = request_percpu_irq(ppi, arch_timer_handler_phys,
 485                                                 "arch_timer", arch_timer_evt);
 486                        if (err)
 487                                free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
 488                                                arch_timer_evt);
 489                }
 490        }
 491
 492        if (err) {
 493                pr_err("arch_timer: can't register interrupt %d (%d)\n",
 494                       ppi, err);
 495                goto out_free;
 496        }
 497
 498        err = register_cpu_notifier(&arch_timer_cpu_nb);
 499        if (err)
 500                goto out_free_irq;
 501
 502        /* Immediately configure the timer on the boot CPU */
 503        arch_timer_setup(this_cpu_ptr(arch_timer_evt));
 504
 505        return 0;
 506
 507out_free_irq:
 508        if (arch_timer_use_virtual)
 509                free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
 510        else {
 511                free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
 512                                arch_timer_evt);
 513                if (arch_timer_ppi[PHYS_NONSECURE_PPI])
 514                        free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
 515                                        arch_timer_evt);
 516        }
 517
 518out_free:
 519        free_percpu(arch_timer_evt);
 520out:
 521        return err;
 522}
 523
 524static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
 525{
 526        int ret;
 527        irq_handler_t func;
 528        struct arch_timer *t;
 529
 530        t = kzalloc(sizeof(*t), GFP_KERNEL);
 531        if (!t)
 532                return -ENOMEM;
 533
 534        t->base = base;
 535        t->evt.irq = irq;
 536        __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
 537
 538        if (arch_timer_mem_use_virtual)
 539                func = arch_timer_handler_virt_mem;
 540        else
 541                func = arch_timer_handler_phys_mem;
 542
 543        ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
 544        if (ret) {
 545                pr_err("arch_timer: Failed to request mem timer irq\n");
 546                kfree(t);
 547        }
 548
 549        return ret;
 550}
 551
 552static const struct of_device_id arch_timer_of_match[] __initconst = {
 553        { .compatible   = "arm,armv7-timer",    },
 554        { .compatible   = "arm,armv8-timer",    },
 555        {},
 556};
 557
 558static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
 559        { .compatible   = "arm,armv7-timer-mem", },
 560        {},
 561};
 562
 563static void __init arch_timer_common_init(void)
 564{
 565        unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
 566
 567        /* Wait until both nodes are probed if we have two timers */
 568        if ((arch_timers_present & mask) != mask) {
 569                if (of_find_matching_node(NULL, arch_timer_mem_of_match) &&
 570                                !(arch_timers_present & ARCH_MEM_TIMER))
 571                        return;
 572                if (of_find_matching_node(NULL, arch_timer_of_match) &&
 573                                !(arch_timers_present & ARCH_CP15_TIMER))
 574                        return;
 575        }
 576
 577        arch_timer_banner(arch_timers_present);
 578        arch_counter_register(arch_timers_present);
 579        arch_timer_arch_init();
 580}
 581
 582static void __init arch_timer_init(struct device_node *np)
 583{
 584        int i;
 585
 586        if (arch_timers_present & ARCH_CP15_TIMER) {
 587                pr_warn("arch_timer: multiple nodes in dt, skipping\n");
 588                return;
 589        }
 590
 591        arch_timers_present |= ARCH_CP15_TIMER;
 592        for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
 593                arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
 594        arch_timer_detect_rate(NULL, np);
 595
 596        /*
 597         * If HYP mode is available, we know that the physical timer
 598         * has been configured to be accessible from PL1. Use it, so
 599         * that a guest can use the virtual timer instead.
 600         *
 601         * If no interrupt provided for virtual timer, we'll have to
 602         * stick to the physical timer. It'd better be accessible...
 603         */
 604        if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
 605                arch_timer_use_virtual = false;
 606
 607                if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
 608                    !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
 609                        pr_warn("arch_timer: No interrupt available, giving up\n");
 610                        return;
 611                }
 612        }
 613
 614        arch_timer_register();
 615        arch_timer_common_init();
 616}
 617CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
 618CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
 619
 620static void __init arch_timer_mem_init(struct device_node *np)
 621{
 622        struct device_node *frame, *best_frame = NULL;
 623        void __iomem *cntctlbase, *base;
 624        unsigned int irq;
 625        u32 cnttidr;
 626
 627        arch_timers_present |= ARCH_MEM_TIMER;
 628        cntctlbase = of_iomap(np, 0);
 629        if (!cntctlbase) {
 630                pr_err("arch_timer: Can't find CNTCTLBase\n");
 631                return;
 632        }
 633
 634        cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
 635        iounmap(cntctlbase);
 636
 637        /*
 638         * Try to find a virtual capable frame. Otherwise fall back to a
 639         * physical capable frame.
 640         */
 641        for_each_available_child_of_node(np, frame) {
 642                int n;
 643
 644                if (of_property_read_u32(frame, "frame-number", &n)) {
 645                        pr_err("arch_timer: Missing frame-number\n");
 646                        of_node_put(best_frame);
 647                        of_node_put(frame);
 648                        return;
 649                }
 650
 651                if (cnttidr & CNTTIDR_VIRT(n)) {
 652                        of_node_put(best_frame);
 653                        best_frame = frame;
 654                        arch_timer_mem_use_virtual = true;
 655                        break;
 656                }
 657                of_node_put(best_frame);
 658                best_frame = of_node_get(frame);
 659        }
 660
 661        base = arch_counter_base = of_iomap(best_frame, 0);
 662        if (!base) {
 663                pr_err("arch_timer: Can't map frame's registers\n");
 664                of_node_put(best_frame);
 665                return;
 666        }
 667
 668        if (arch_timer_mem_use_virtual)
 669                irq = irq_of_parse_and_map(best_frame, 1);
 670        else
 671                irq = irq_of_parse_and_map(best_frame, 0);
 672        of_node_put(best_frame);
 673        if (!irq) {
 674                pr_err("arch_timer: Frame missing %s irq",
 675                       arch_timer_mem_use_virtual ? "virt" : "phys");
 676                return;
 677        }
 678
 679        arch_timer_detect_rate(base, np);
 680        arch_timer_mem_register(base, irq);
 681        arch_timer_common_init();
 682}
 683CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
 684                       arch_timer_mem_init);
 685
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.