linux/drivers/gpio/gpio-omap.c
<<
>>
Prefs
   1/*
   2 * Support functions for OMAP GPIO
   3 *
   4 * Copyright (C) 2003-2005 Nokia Corporation
   5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
   6 *
   7 * Copyright (C) 2009 Texas Instruments
   8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/init.h>
  16#include <linux/module.h>
  17#include <linux/interrupt.h>
  18#include <linux/syscore_ops.h>
  19#include <linux/err.h>
  20#include <linux/clk.h>
  21#include <linux/io.h>
  22#include <linux/device.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm.h>
  25#include <linux/of.h>
  26#include <linux/of_device.h>
  27#include <linux/irqdomain.h>
  28#include <linux/irqchip/chained_irq.h>
  29#include <linux/gpio.h>
  30#include <linux/platform_data/gpio-omap.h>
  31
  32#define OFF_MODE        1
  33
  34static LIST_HEAD(omap_gpio_list);
  35
  36struct gpio_regs {
  37        u32 irqenable1;
  38        u32 irqenable2;
  39        u32 wake_en;
  40        u32 ctrl;
  41        u32 oe;
  42        u32 leveldetect0;
  43        u32 leveldetect1;
  44        u32 risingdetect;
  45        u32 fallingdetect;
  46        u32 dataout;
  47        u32 debounce;
  48        u32 debounce_en;
  49};
  50
  51struct gpio_bank {
  52        struct list_head node;
  53        void __iomem *base;
  54        u16 irq;
  55        struct irq_domain *domain;
  56        u32 non_wakeup_gpios;
  57        u32 enabled_non_wakeup_gpios;
  58        struct gpio_regs context;
  59        u32 saved_datain;
  60        u32 level_mask;
  61        u32 toggle_mask;
  62        spinlock_t lock;
  63        struct gpio_chip chip;
  64        struct clk *dbck;
  65        u32 mod_usage;
  66        u32 dbck_enable_mask;
  67        bool dbck_enabled;
  68        struct device *dev;
  69        bool is_mpuio;
  70        bool dbck_flag;
  71        bool loses_context;
  72        bool context_valid;
  73        int stride;
  74        u32 width;
  75        int context_loss_count;
  76        int power_mode;
  77        bool workaround_enabled;
  78
  79        void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
  80        int (*get_context_loss_count)(struct device *dev);
  81
  82        struct omap_gpio_reg_offs *regs;
  83};
  84
  85#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
  86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
  87#define GPIO_MOD_CTRL_BIT       BIT(0)
  88
  89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
  90{
  91        return bank->chip.base + gpio_irq;
  92}
  93
  94static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
  95{
  96        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
  97
  98        return irq_find_mapping(bank->domain, offset);
  99}
 100
 101static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
 102{
 103        void __iomem *reg = bank->base;
 104        u32 l;
 105
 106        reg += bank->regs->direction;
 107        l = __raw_readl(reg);
 108        if (is_input)
 109                l |= 1 << gpio;
 110        else
 111                l &= ~(1 << gpio);
 112        __raw_writel(l, reg);
 113        bank->context.oe = l;
 114}
 115
 116
 117/* set data out value using dedicate set/clear register */
 118static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
 119{
 120        void __iomem *reg = bank->base;
 121        u32 l = GPIO_BIT(bank, gpio);
 122
 123        if (enable) {
 124                reg += bank->regs->set_dataout;
 125                bank->context.dataout |= l;
 126        } else {
 127                reg += bank->regs->clr_dataout;
 128                bank->context.dataout &= ~l;
 129        }
 130
 131        __raw_writel(l, reg);
 132}
 133
 134/* set data out value using mask register */
 135static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
 136{
 137        void __iomem *reg = bank->base + bank->regs->dataout;
 138        u32 gpio_bit = GPIO_BIT(bank, gpio);
 139        u32 l;
 140
 141        l = __raw_readl(reg);
 142        if (enable)
 143                l |= gpio_bit;
 144        else
 145                l &= ~gpio_bit;
 146        __raw_writel(l, reg);
 147        bank->context.dataout = l;
 148}
 149
 150static int _get_gpio_datain(struct gpio_bank *bank, int offset)
 151{
 152        void __iomem *reg = bank->base + bank->regs->datain;
 153
 154        return (__raw_readl(reg) & (1 << offset)) != 0;
 155}
 156
 157static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
 158{
 159        void __iomem *reg = bank->base + bank->regs->dataout;
 160
 161        return (__raw_readl(reg) & (1 << offset)) != 0;
 162}
 163
 164static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
 165{
 166        int l = __raw_readl(base + reg);
 167
 168        if (set)
 169                l |= mask;
 170        else
 171                l &= ~mask;
 172
 173        __raw_writel(l, base + reg);
 174}
 175
 176static inline void _gpio_dbck_enable(struct gpio_bank *bank)
 177{
 178        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
 179                clk_enable(bank->dbck);
 180                bank->dbck_enabled = true;
 181
 182                __raw_writel(bank->dbck_enable_mask,
 183                             bank->base + bank->regs->debounce_en);
 184        }
 185}
 186
 187static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 188{
 189        if (bank->dbck_enable_mask && bank->dbck_enabled) {
 190                /*
 191                 * Disable debounce before cutting it's clock. If debounce is
 192                 * enabled but the clock is not, GPIO module seems to be unable
 193                 * to detect events and generate interrupts at least on OMAP3.
 194                 */
 195                __raw_writel(0, bank->base + bank->regs->debounce_en);
 196
 197                clk_disable(bank->dbck);
 198                bank->dbck_enabled = false;
 199        }
 200}
 201
 202/**
 203 * _set_gpio_debounce - low level gpio debounce time
 204 * @bank: the gpio bank we're acting upon
 205 * @gpio: the gpio number on this @gpio
 206 * @debounce: debounce time to use
 207 *
 208 * OMAP's debounce time is in 31us steps so we need
 209 * to convert and round up to the closest unit.
 210 */
 211static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
 212                unsigned debounce)
 213{
 214        void __iomem            *reg;
 215        u32                     val;
 216        u32                     l;
 217
 218        if (!bank->dbck_flag)
 219                return;
 220
 221        if (debounce < 32)
 222                debounce = 0x01;
 223        else if (debounce > 7936)
 224                debounce = 0xff;
 225        else
 226                debounce = (debounce / 0x1f) - 1;
 227
 228        l = GPIO_BIT(bank, gpio);
 229
 230        clk_enable(bank->dbck);
 231        reg = bank->base + bank->regs->debounce;
 232        __raw_writel(debounce, reg);
 233
 234        reg = bank->base + bank->regs->debounce_en;
 235        val = __raw_readl(reg);
 236
 237        if (debounce)
 238                val |= l;
 239        else
 240                val &= ~l;
 241        bank->dbck_enable_mask = val;
 242
 243        __raw_writel(val, reg);
 244        clk_disable(bank->dbck);
 245        /*
 246         * Enable debounce clock per module.
 247         * This call is mandatory because in omap_gpio_request() when
 248         * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
 249         * runtime callbck fails to turn on dbck because dbck_enable_mask
 250         * used within _gpio_dbck_enable() is still not initialized at
 251         * that point. Therefore we have to enable dbck here.
 252         */
 253        _gpio_dbck_enable(bank);
 254        if (bank->dbck_enable_mask) {
 255                bank->context.debounce = debounce;
 256                bank->context.debounce_en = val;
 257        }
 258}
 259
 260/**
 261 * _clear_gpio_debounce - clear debounce settings for a gpio
 262 * @bank: the gpio bank we're acting upon
 263 * @gpio: the gpio number on this @gpio
 264 *
 265 * If a gpio is using debounce, then clear the debounce enable bit and if
 266 * this is the only gpio in this bank using debounce, then clear the debounce
 267 * time too. The debounce clock will also be disabled when calling this function
 268 * if this is the only gpio in the bank using debounce.
 269 */
 270static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
 271{
 272        u32 gpio_bit = GPIO_BIT(bank, gpio);
 273
 274        if (!bank->dbck_flag)
 275                return;
 276
 277        if (!(bank->dbck_enable_mask & gpio_bit))
 278                return;
 279
 280        bank->dbck_enable_mask &= ~gpio_bit;
 281        bank->context.debounce_en &= ~gpio_bit;
 282        __raw_writel(bank->context.debounce_en,
 283                     bank->base + bank->regs->debounce_en);
 284
 285        if (!bank->dbck_enable_mask) {
 286                bank->context.debounce = 0;
 287                __raw_writel(bank->context.debounce, bank->base +
 288                             bank->regs->debounce);
 289                clk_disable(bank->dbck);
 290                bank->dbck_enabled = false;
 291        }
 292}
 293
 294static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
 295                                                unsigned trigger)
 296{
 297        void __iomem *base = bank->base;
 298        u32 gpio_bit = 1 << gpio;
 299
 300        _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
 301                  trigger & IRQ_TYPE_LEVEL_LOW);
 302        _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
 303                  trigger & IRQ_TYPE_LEVEL_HIGH);
 304        _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
 305                  trigger & IRQ_TYPE_EDGE_RISING);
 306        _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
 307                  trigger & IRQ_TYPE_EDGE_FALLING);
 308
 309        bank->context.leveldetect0 =
 310                        __raw_readl(bank->base + bank->regs->leveldetect0);
 311        bank->context.leveldetect1 =
 312                        __raw_readl(bank->base + bank->regs->leveldetect1);
 313        bank->context.risingdetect =
 314                        __raw_readl(bank->base + bank->regs->risingdetect);
 315        bank->context.fallingdetect =
 316                        __raw_readl(bank->base + bank->regs->fallingdetect);
 317
 318        if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
 319                _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
 320                bank->context.wake_en =
 321                        __raw_readl(bank->base + bank->regs->wkup_en);
 322        }
 323
 324        /* This part needs to be executed always for OMAP{34xx, 44xx} */
 325        if (!bank->regs->irqctrl) {
 326                /* On omap24xx proceed only when valid GPIO bit is set */
 327                if (bank->non_wakeup_gpios) {
 328                        if (!(bank->non_wakeup_gpios & gpio_bit))
 329                                goto exit;
 330                }
 331
 332                /*
 333                 * Log the edge gpio and manually trigger the IRQ
 334                 * after resume if the input level changes
 335                 * to avoid irq lost during PER RET/OFF mode
 336                 * Applies for omap2 non-wakeup gpio and all omap3 gpios
 337                 */
 338                if (trigger & IRQ_TYPE_EDGE_BOTH)
 339                        bank->enabled_non_wakeup_gpios |= gpio_bit;
 340                else
 341                        bank->enabled_non_wakeup_gpios &= ~gpio_bit;
 342        }
 343
 344exit:
 345        bank->level_mask =
 346                __raw_readl(bank->base + bank->regs->leveldetect0) |
 347                __raw_readl(bank->base + bank->regs->leveldetect1);
 348}
 349
 350#ifdef CONFIG_ARCH_OMAP1
 351/*
 352 * This only applies to chips that can't do both rising and falling edge
 353 * detection at once.  For all other chips, this function is a noop.
 354 */
 355static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
 356{
 357        void __iomem *reg = bank->base;
 358        u32 l = 0;
 359
 360        if (!bank->regs->irqctrl)
 361                return;
 362
 363        reg += bank->regs->irqctrl;
 364
 365        l = __raw_readl(reg);
 366        if ((l >> gpio) & 1)
 367                l &= ~(1 << gpio);
 368        else
 369                l |= 1 << gpio;
 370
 371        __raw_writel(l, reg);
 372}
 373#else
 374static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
 375#endif
 376
 377static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
 378                                                        unsigned trigger)
 379{
 380        void __iomem *reg = bank->base;
 381        void __iomem *base = bank->base;
 382        u32 l = 0;
 383
 384        if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
 385                set_gpio_trigger(bank, gpio, trigger);
 386        } else if (bank->regs->irqctrl) {
 387                reg += bank->regs->irqctrl;
 388
 389                l = __raw_readl(reg);
 390                if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 391                        bank->toggle_mask |= 1 << gpio;
 392                if (trigger & IRQ_TYPE_EDGE_RISING)
 393                        l |= 1 << gpio;
 394                else if (trigger & IRQ_TYPE_EDGE_FALLING)
 395                        l &= ~(1 << gpio);
 396                else
 397                        return -EINVAL;
 398
 399                __raw_writel(l, reg);
 400        } else if (bank->regs->edgectrl1) {
 401                if (gpio & 0x08)
 402                        reg += bank->regs->edgectrl2;
 403                else
 404                        reg += bank->regs->edgectrl1;
 405
 406                gpio &= 0x07;
 407                l = __raw_readl(reg);
 408                l &= ~(3 << (gpio << 1));
 409                if (trigger & IRQ_TYPE_EDGE_RISING)
 410                        l |= 2 << (gpio << 1);
 411                if (trigger & IRQ_TYPE_EDGE_FALLING)
 412                        l |= 1 << (gpio << 1);
 413
 414                /* Enable wake-up during idle for dynamic tick */
 415                _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
 416                bank->context.wake_en =
 417                        __raw_readl(bank->base + bank->regs->wkup_en);
 418                __raw_writel(l, reg);
 419        }
 420        return 0;
 421}
 422
 423static int gpio_irq_type(struct irq_data *d, unsigned type)
 424{
 425        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 426        unsigned gpio = 0;
 427        int retval;
 428        unsigned long flags;
 429
 430        if (WARN_ON(!bank->mod_usage))
 431                return -EINVAL;
 432
 433#ifdef CONFIG_ARCH_OMAP1
 434        if (d->irq > IH_MPUIO_BASE)
 435                gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
 436#endif
 437
 438        if (!gpio)
 439                gpio = irq_to_gpio(bank, d->hwirq);
 440
 441        if (type & ~IRQ_TYPE_SENSE_MASK)
 442                return -EINVAL;
 443
 444        if (!bank->regs->leveldetect0 &&
 445                (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 446                return -EINVAL;
 447
 448        spin_lock_irqsave(&bank->lock, flags);
 449        retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
 450        spin_unlock_irqrestore(&bank->lock, flags);
 451
 452        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 453                __irq_set_handler_locked(d->irq, handle_level_irq);
 454        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 455                __irq_set_handler_locked(d->irq, handle_edge_irq);
 456
 457        return retval;
 458}
 459
 460static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 461{
 462        void __iomem *reg = bank->base;
 463
 464        reg += bank->regs->irqstatus;
 465        __raw_writel(gpio_mask, reg);
 466
 467        /* Workaround for clearing DSP GPIO interrupts to allow retention */
 468        if (bank->regs->irqstatus2) {
 469                reg = bank->base + bank->regs->irqstatus2;
 470                __raw_writel(gpio_mask, reg);
 471        }
 472
 473        /* Flush posted write for the irq status to avoid spurious interrupts */
 474        __raw_readl(reg);
 475}
 476
 477static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
 478{
 479        _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 480}
 481
 482static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
 483{
 484        void __iomem *reg = bank->base;
 485        u32 l;
 486        u32 mask = (1 << bank->width) - 1;
 487
 488        reg += bank->regs->irqenable;
 489        l = __raw_readl(reg);
 490        if (bank->regs->irqenable_inv)
 491                l = ~l;
 492        l &= mask;
 493        return l;
 494}
 495
 496static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 497{
 498        void __iomem *reg = bank->base;
 499        u32 l;
 500
 501        if (bank->regs->set_irqenable) {
 502                reg += bank->regs->set_irqenable;
 503                l = gpio_mask;
 504                bank->context.irqenable1 |= gpio_mask;
 505        } else {
 506                reg += bank->regs->irqenable;
 507                l = __raw_readl(reg);
 508                if (bank->regs->irqenable_inv)
 509                        l &= ~gpio_mask;
 510                else
 511                        l |= gpio_mask;
 512                bank->context.irqenable1 = l;
 513        }
 514
 515        __raw_writel(l, reg);
 516}
 517
 518static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 519{
 520        void __iomem *reg = bank->base;
 521        u32 l;
 522
 523        if (bank->regs->clr_irqenable) {
 524                reg += bank->regs->clr_irqenable;
 525                l = gpio_mask;
 526                bank->context.irqenable1 &= ~gpio_mask;
 527        } else {
 528                reg += bank->regs->irqenable;
 529                l = __raw_readl(reg);
 530                if (bank->regs->irqenable_inv)
 531                        l |= gpio_mask;
 532                else
 533                        l &= ~gpio_mask;
 534                bank->context.irqenable1 = l;
 535        }
 536
 537        __raw_writel(l, reg);
 538}
 539
 540static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
 541{
 542        if (enable)
 543                _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 544        else
 545                _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 546}
 547
 548/*
 549 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 550 * 1510 does not seem to have a wake-up register. If JTAG is connected
 551 * to the target, system will wake up always on GPIO events. While
 552 * system is running all registered GPIO interrupts need to have wake-up
 553 * enabled. When system is suspended, only selected GPIO interrupts need
 554 * to have wake-up enabled.
 555 */
 556static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
 557{
 558        u32 gpio_bit = GPIO_BIT(bank, gpio);
 559        unsigned long flags;
 560
 561        if (bank->non_wakeup_gpios & gpio_bit) {
 562                dev_err(bank->dev,
 563                        "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
 564                return -EINVAL;
 565        }
 566
 567        spin_lock_irqsave(&bank->lock, flags);
 568        if (enable)
 569                bank->context.wake_en |= gpio_bit;
 570        else
 571                bank->context.wake_en &= ~gpio_bit;
 572
 573        __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
 574        spin_unlock_irqrestore(&bank->lock, flags);
 575
 576        return 0;
 577}
 578
 579static void _reset_gpio(struct gpio_bank *bank, int gpio)
 580{
 581        _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
 582        _set_gpio_irqenable(bank, gpio, 0);
 583        _clear_gpio_irqstatus(bank, gpio);
 584        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 585        _clear_gpio_debounce(bank, gpio);
 586}
 587
 588/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
 589static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
 590{
 591        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 592        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 593
 594        return _set_gpio_wakeup(bank, gpio, enable);
 595}
 596
 597static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
 598{
 599        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 600        unsigned long flags;
 601
 602        /*
 603         * If this is the first gpio_request for the bank,
 604         * enable the bank module.
 605         */
 606        if (!bank->mod_usage)
 607                pm_runtime_get_sync(bank->dev);
 608
 609        spin_lock_irqsave(&bank->lock, flags);
 610        /* Set trigger to none. You need to enable the desired trigger with
 611         * request_irq() or set_irq_type().
 612         */
 613        _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
 614
 615        if (bank->regs->pinctrl) {
 616                void __iomem *reg = bank->base + bank->regs->pinctrl;
 617
 618                /* Claim the pin for MPU */
 619                __raw_writel(__raw_readl(reg) | (1 << offset), reg);
 620        }
 621
 622        if (bank->regs->ctrl && !bank->mod_usage) {
 623                void __iomem *reg = bank->base + bank->regs->ctrl;
 624                u32 ctrl;
 625
 626                ctrl = __raw_readl(reg);
 627                /* Module is enabled, clocks are not gated */
 628                ctrl &= ~GPIO_MOD_CTRL_BIT;
 629                __raw_writel(ctrl, reg);
 630                bank->context.ctrl = ctrl;
 631        }
 632
 633        bank->mod_usage |= 1 << offset;
 634
 635        spin_unlock_irqrestore(&bank->lock, flags);
 636
 637        return 0;
 638}
 639
 640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 641{
 642        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 643        void __iomem *base = bank->base;
 644        unsigned long flags;
 645
 646        spin_lock_irqsave(&bank->lock, flags);
 647
 648        if (bank->regs->wkup_en) {
 649                /* Disable wake-up during idle for dynamic tick */
 650                _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
 651                bank->context.wake_en =
 652                        __raw_readl(bank->base + bank->regs->wkup_en);
 653        }
 654
 655        bank->mod_usage &= ~(1 << offset);
 656
 657        if (bank->regs->ctrl && !bank->mod_usage) {
 658                void __iomem *reg = bank->base + bank->regs->ctrl;
 659                u32 ctrl;
 660
 661                ctrl = __raw_readl(reg);
 662                /* Module is disabled, clocks are gated */
 663                ctrl |= GPIO_MOD_CTRL_BIT;
 664                __raw_writel(ctrl, reg);
 665                bank->context.ctrl = ctrl;
 666        }
 667
 668        _reset_gpio(bank, bank->chip.base + offset);
 669        spin_unlock_irqrestore(&bank->lock, flags);
 670
 671        /*
 672         * If this is the last gpio to be freed in the bank,
 673         * disable the bank module.
 674         */
 675        if (!bank->mod_usage)
 676                pm_runtime_put(bank->dev);
 677}
 678
 679/*
 680 * We need to unmask the GPIO bank interrupt as soon as possible to
 681 * avoid missing GPIO interrupts for other lines in the bank.
 682 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 683 * in the bank to avoid missing nested interrupts for a GPIO line.
 684 * If we wait to unmask individual GPIO lines in the bank after the
 685 * line's interrupt handler has been run, we may miss some nested
 686 * interrupts.
 687 */
 688static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 689{
 690        void __iomem *isr_reg = NULL;
 691        u32 isr;
 692        unsigned int bit;
 693        struct gpio_bank *bank;
 694        int unmasked = 0;
 695        struct irq_chip *chip = irq_desc_get_chip(desc);
 696
 697        chained_irq_enter(chip, desc);
 698
 699        bank = irq_get_handler_data(irq);
 700        isr_reg = bank->base + bank->regs->irqstatus;
 701        pm_runtime_get_sync(bank->dev);
 702
 703        if (WARN_ON(!isr_reg))
 704                goto exit;
 705
 706        while (1) {
 707                u32 isr_saved, level_mask = 0;
 708                u32 enabled;
 709
 710                enabled = _get_gpio_irqbank_mask(bank);
 711                isr_saved = isr = __raw_readl(isr_reg) & enabled;
 712
 713                if (bank->level_mask)
 714                        level_mask = bank->level_mask & enabled;
 715
 716                /* clear edge sensitive interrupts before handler(s) are
 717                called so that we don't miss any interrupt occurred while
 718                executing them */
 719                _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
 720                _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 721                _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 722
 723                /* if there is only edge sensitive GPIO pin interrupts
 724                configured, we could unmask GPIO bank interrupt immediately */
 725                if (!level_mask && !unmasked) {
 726                        unmasked = 1;
 727                        chained_irq_exit(chip, desc);
 728                }
 729
 730                if (!isr)
 731                        break;
 732
 733                while (isr) {
 734                        bit = __ffs(isr);
 735                        isr &= ~(1 << bit);
 736
 737                        /*
 738                         * Some chips can't respond to both rising and falling
 739                         * at the same time.  If this irq was requested with
 740                         * both flags, we need to flip the ICR data for the IRQ
 741                         * to respond to the IRQ for the opposite direction.
 742                         * This will be indicated in the bank toggle_mask.
 743                         */
 744                        if (bank->toggle_mask & (1 << bit))
 745                                _toggle_gpio_edge_triggering(bank, bit);
 746
 747                        generic_handle_irq(irq_find_mapping(bank->domain, bit));
 748                }
 749        }
 750        /* if bank has any level sensitive GPIO pin interrupt
 751        configured, we must unmask the bank interrupt only after
 752        handler(s) are executed in order to avoid spurious bank
 753        interrupt */
 754exit:
 755        if (!unmasked)
 756                chained_irq_exit(chip, desc);
 757        pm_runtime_put(bank->dev);
 758}
 759
 760static void gpio_irq_shutdown(struct irq_data *d)
 761{
 762        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 763        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 764        unsigned long flags;
 765
 766        spin_lock_irqsave(&bank->lock, flags);
 767        _reset_gpio(bank, gpio);
 768        spin_unlock_irqrestore(&bank->lock, flags);
 769}
 770
 771static void gpio_ack_irq(struct irq_data *d)
 772{
 773        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 774        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 775
 776        _clear_gpio_irqstatus(bank, gpio);
 777}
 778
 779static void gpio_mask_irq(struct irq_data *d)
 780{
 781        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 782        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 783        unsigned long flags;
 784
 785        spin_lock_irqsave(&bank->lock, flags);
 786        _set_gpio_irqenable(bank, gpio, 0);
 787        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 788        spin_unlock_irqrestore(&bank->lock, flags);
 789}
 790
 791static void gpio_unmask_irq(struct irq_data *d)
 792{
 793        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 794        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 795        unsigned int irq_mask = GPIO_BIT(bank, gpio);
 796        u32 trigger = irqd_get_trigger_type(d);
 797        unsigned long flags;
 798
 799        spin_lock_irqsave(&bank->lock, flags);
 800        if (trigger)
 801                _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
 802
 803        /* For level-triggered GPIOs, the clearing must be done after
 804         * the HW source is cleared, thus after the handler has run */
 805        if (bank->level_mask & irq_mask) {
 806                _set_gpio_irqenable(bank, gpio, 0);
 807                _clear_gpio_irqstatus(bank, gpio);
 808        }
 809
 810        _set_gpio_irqenable(bank, gpio, 1);
 811        spin_unlock_irqrestore(&bank->lock, flags);
 812}
 813
 814static struct irq_chip gpio_irq_chip = {
 815        .name           = "GPIO",
 816        .irq_shutdown   = gpio_irq_shutdown,
 817        .irq_ack        = gpio_ack_irq,
 818        .irq_mask       = gpio_mask_irq,
 819        .irq_unmask     = gpio_unmask_irq,
 820        .irq_set_type   = gpio_irq_type,
 821        .irq_set_wake   = gpio_wake_enable,
 822};
 823
 824/*---------------------------------------------------------------------*/
 825
 826static int omap_mpuio_suspend_noirq(struct device *dev)
 827{
 828        struct platform_device *pdev = to_platform_device(dev);
 829        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 830        void __iomem            *mask_reg = bank->base +
 831                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 832        unsigned long           flags;
 833
 834        spin_lock_irqsave(&bank->lock, flags);
 835        __raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
 836        spin_unlock_irqrestore(&bank->lock, flags);
 837
 838        return 0;
 839}
 840
 841static int omap_mpuio_resume_noirq(struct device *dev)
 842{
 843        struct platform_device *pdev = to_platform_device(dev);
 844        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 845        void __iomem            *mask_reg = bank->base +
 846                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 847        unsigned long           flags;
 848
 849        spin_lock_irqsave(&bank->lock, flags);
 850        __raw_writel(bank->context.wake_en, mask_reg);
 851        spin_unlock_irqrestore(&bank->lock, flags);
 852
 853        return 0;
 854}
 855
 856static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
 857        .suspend_noirq = omap_mpuio_suspend_noirq,
 858        .resume_noirq = omap_mpuio_resume_noirq,
 859};
 860
 861/* use platform_driver for this. */
 862static struct platform_driver omap_mpuio_driver = {
 863        .driver         = {
 864                .name   = "mpuio",
 865                .pm     = &omap_mpuio_dev_pm_ops,
 866        },
 867};
 868
 869static struct platform_device omap_mpuio_device = {
 870        .name           = "mpuio",
 871        .id             = -1,
 872        .dev = {
 873                .driver = &omap_mpuio_driver.driver,
 874        }
 875        /* could list the /proc/iomem resources */
 876};
 877
 878static inline void mpuio_init(struct gpio_bank *bank)
 879{
 880        platform_set_drvdata(&omap_mpuio_device, bank);
 881
 882        if (platform_driver_register(&omap_mpuio_driver) == 0)
 883                (void) platform_device_register(&omap_mpuio_device);
 884}
 885
 886/*---------------------------------------------------------------------*/
 887
 888static int gpio_input(struct gpio_chip *chip, unsigned offset)
 889{
 890        struct gpio_bank *bank;
 891        unsigned long flags;
 892
 893        bank = container_of(chip, struct gpio_bank, chip);
 894        spin_lock_irqsave(&bank->lock, flags);
 895        _set_gpio_direction(bank, offset, 1);
 896        spin_unlock_irqrestore(&bank->lock, flags);
 897        return 0;
 898}
 899
 900static int gpio_is_input(struct gpio_bank *bank, int mask)
 901{
 902        void __iomem *reg = bank->base + bank->regs->direction;
 903
 904        return __raw_readl(reg) & mask;
 905}
 906
 907static int gpio_get(struct gpio_chip *chip, unsigned offset)
 908{
 909        struct gpio_bank *bank;
 910        u32 mask;
 911
 912        bank = container_of(chip, struct gpio_bank, chip);
 913        mask = (1 << offset);
 914
 915        if (gpio_is_input(bank, mask))
 916                return _get_gpio_datain(bank, offset);
 917        else
 918                return _get_gpio_dataout(bank, offset);
 919}
 920
 921static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
 922{
 923        struct gpio_bank *bank;
 924        unsigned long flags;
 925
 926        bank = container_of(chip, struct gpio_bank, chip);
 927        spin_lock_irqsave(&bank->lock, flags);
 928        bank->set_dataout(bank, offset, value);
 929        _set_gpio_direction(bank, offset, 0);
 930        spin_unlock_irqrestore(&bank->lock, flags);
 931        return 0;
 932}
 933
 934static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
 935                unsigned debounce)
 936{
 937        struct gpio_bank *bank;
 938        unsigned long flags;
 939
 940        bank = container_of(chip, struct gpio_bank, chip);
 941
 942        spin_lock_irqsave(&bank->lock, flags);
 943        _set_gpio_debounce(bank, offset, debounce);
 944        spin_unlock_irqrestore(&bank->lock, flags);
 945
 946        return 0;
 947}
 948
 949static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 950{
 951        struct gpio_bank *bank;
 952        unsigned long flags;
 953
 954        bank = container_of(chip, struct gpio_bank, chip);
 955        spin_lock_irqsave(&bank->lock, flags);
 956        bank->set_dataout(bank, offset, value);
 957        spin_unlock_irqrestore(&bank->lock, flags);
 958}
 959
 960/*---------------------------------------------------------------------*/
 961
 962static void __init omap_gpio_show_rev(struct gpio_bank *bank)
 963{
 964        static bool called;
 965        u32 rev;
 966
 967        if (called || bank->regs->revision == USHRT_MAX)
 968                return;
 969
 970        rev = __raw_readw(bank->base + bank->regs->revision);
 971        pr_info("OMAP GPIO hardware version %d.%d\n",
 972                (rev >> 4) & 0x0f, rev & 0x0f);
 973
 974        called = true;
 975}
 976
 977/* This lock class tells lockdep that GPIO irqs are in a different
 978 * category than their parents, so it won't report false recursion.
 979 */
 980static struct lock_class_key gpio_lock_class;
 981
 982static void omap_gpio_mod_init(struct gpio_bank *bank)
 983{
 984        void __iomem *base = bank->base;
 985        u32 l = 0xffffffff;
 986
 987        if (bank->width == 16)
 988                l = 0xffff;
 989
 990        if (bank->is_mpuio) {
 991                __raw_writel(l, bank->base + bank->regs->irqenable);
 992                return;
 993        }
 994
 995        _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
 996        _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
 997        if (bank->regs->debounce_en)
 998                __raw_writel(0, base + bank->regs->debounce_en);
 999
1000        /* Save OE default value (0xffffffff) in the context */
1001        bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
1002         /* Initialize interface clk ungated, module enabled */
1003        if (bank->regs->ctrl)
1004                __raw_writel(0, base + bank->regs->ctrl);
1005
1006        bank->dbck = clk_get(bank->dev, "dbclk");
1007        if (IS_ERR(bank->dbck))
1008                dev_err(bank->dev, "Could not get gpio dbck\n");
1009}
1010
1011static void
1012omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1013                    unsigned int num)
1014{
1015        struct irq_chip_generic *gc;
1016        struct irq_chip_type *ct;
1017
1018        gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1019                                    handle_simple_irq);
1020        if (!gc) {
1021                dev_err(bank->dev, "Memory alloc failed for gc\n");
1022                return;
1023        }
1024
1025        ct = gc->chip_types;
1026
1027        /* NOTE: No ack required, reading IRQ status clears it. */
1028        ct->chip.irq_mask = irq_gc_mask_set_bit;
1029        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1030        ct->chip.irq_set_type = gpio_irq_type;
1031
1032        if (bank->regs->wkup_en)
1033                ct->chip.irq_set_wake = gpio_wake_enable,
1034
1035        ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1036        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1037                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1038}
1039
1040static void omap_gpio_chip_init(struct gpio_bank *bank)
1041{
1042        int j;
1043        static int gpio;
1044
1045        /*
1046         * REVISIT eventually switch from OMAP-specific gpio structs
1047         * over to the generic ones
1048         */
1049        bank->chip.request = omap_gpio_request;
1050        bank->chip.free = omap_gpio_free;
1051        bank->chip.direction_input = gpio_input;
1052        bank->chip.get = gpio_get;
1053        bank->chip.direction_output = gpio_output;
1054        bank->chip.set_debounce = gpio_debounce;
1055        bank->chip.set = gpio_set;
1056        bank->chip.to_irq = omap_gpio_to_irq;
1057        if (bank->is_mpuio) {
1058                bank->chip.label = "mpuio";
1059                if (bank->regs->wkup_en)
1060                        bank->chip.dev = &omap_mpuio_device.dev;
1061                bank->chip.base = OMAP_MPUIO(0);
1062        } else {
1063                bank->chip.label = "gpio";
1064                bank->chip.base = gpio;
1065                gpio += bank->width;
1066        }
1067        bank->chip.ngpio = bank->width;
1068
1069        gpiochip_add(&bank->chip);
1070
1071        for (j = 0; j < bank->width; j++) {
1072                int irq = irq_create_mapping(bank->domain, j);
1073                irq_set_lockdep_class(irq, &gpio_lock_class);
1074                irq_set_chip_data(irq, bank);
1075                if (bank->is_mpuio) {
1076                        omap_mpuio_alloc_gc(bank, irq, bank->width);
1077                } else {
1078                        irq_set_chip_and_handler(irq, &gpio_irq_chip,
1079                                                 handle_simple_irq);
1080                        set_irq_flags(irq, IRQF_VALID);
1081                }
1082        }
1083        irq_set_chained_handler(bank->irq, gpio_irq_handler);
1084        irq_set_handler_data(bank->irq, bank);
1085}
1086
1087static const struct of_device_id omap_gpio_match[];
1088
1089static int omap_gpio_probe(struct platform_device *pdev)
1090{
1091        struct device *dev = &pdev->dev;
1092        struct device_node *node = dev->of_node;
1093        const struct of_device_id *match;
1094        const struct omap_gpio_platform_data *pdata;
1095        struct resource *res;
1096        struct gpio_bank *bank;
1097#ifdef CONFIG_ARCH_OMAP1
1098        int irq_base;
1099#endif
1100
1101        match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1102
1103        pdata = match ? match->data : dev->platform_data;
1104        if (!pdata)
1105                return -EINVAL;
1106
1107        bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1108        if (!bank) {
1109                dev_err(dev, "Memory alloc failed\n");
1110                return -ENOMEM;
1111        }
1112
1113        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1114        if (unlikely(!res)) {
1115                dev_err(dev, "Invalid IRQ resource\n");
1116                return -ENODEV;
1117        }
1118
1119        bank->irq = res->start;
1120        bank->dev = dev;
1121        bank->dbck_flag = pdata->dbck_flag;
1122        bank->stride = pdata->bank_stride;
1123        bank->width = pdata->bank_width;
1124        bank->is_mpuio = pdata->is_mpuio;
1125        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1126        bank->regs = pdata->regs;
1127#ifdef CONFIG_OF_GPIO
1128        bank->chip.of_node = of_node_get(node);
1129#endif
1130        if (node) {
1131                if (!of_property_read_bool(node, "ti,gpio-always-on"))
1132                        bank->loses_context = true;
1133        } else {
1134                bank->loses_context = pdata->loses_context;
1135
1136                if (bank->loses_context)
1137                        bank->get_context_loss_count =
1138                                pdata->get_context_loss_count;
1139        }
1140
1141#ifdef CONFIG_ARCH_OMAP1
1142        /*
1143         * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1144         * irq_alloc_descs() and irq_domain_add_legacy() and just use a
1145         * linear IRQ domain mapping for all OMAP platforms.
1146         */
1147        irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1148        if (irq_base < 0) {
1149                dev_err(dev, "Couldn't allocate IRQ numbers\n");
1150                return -ENODEV;
1151        }
1152
1153        bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
1154                                             0, &irq_domain_simple_ops, NULL);
1155#else
1156        bank->domain = irq_domain_add_linear(node, bank->width,
1157                                             &irq_domain_simple_ops, NULL);
1158#endif
1159        if (!bank->domain) {
1160                dev_err(dev, "Couldn't register an IRQ domain\n");
1161                return -ENODEV;
1162        }
1163
1164        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1165                bank->set_dataout = _set_gpio_dataout_reg;
1166        else
1167                bank->set_dataout = _set_gpio_dataout_mask;
1168
1169        spin_lock_init(&bank->lock);
1170
1171        /* Static mapping, never released */
1172        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1173        if (unlikely(!res)) {
1174                dev_err(dev, "Invalid mem resource\n");
1175                irq_domain_remove(bank->domain);
1176                return -ENODEV;
1177        }
1178
1179        if (!devm_request_mem_region(dev, res->start, resource_size(res),
1180                                     pdev->name)) {
1181                dev_err(dev, "Region already claimed\n");
1182                irq_domain_remove(bank->domain);
1183                return -EBUSY;
1184        }
1185
1186        bank->base = devm_ioremap(dev, res->start, resource_size(res));
1187        if (!bank->base) {
1188                dev_err(dev, "Could not ioremap\n");
1189                irq_domain_remove(bank->domain);
1190                return -ENOMEM;
1191        }
1192
1193        platform_set_drvdata(pdev, bank);
1194
1195        pm_runtime_enable(bank->dev);
1196        pm_runtime_irq_safe(bank->dev);
1197        pm_runtime_get_sync(bank->dev);
1198
1199        if (bank->is_mpuio)
1200                mpuio_init(bank);
1201
1202        omap_gpio_mod_init(bank);
1203        omap_gpio_chip_init(bank);
1204        omap_gpio_show_rev(bank);
1205
1206        pm_runtime_put(bank->dev);
1207
1208        list_add_tail(&bank->node, &omap_gpio_list);
1209
1210        return 0;
1211}
1212
1213#ifdef CONFIG_ARCH_OMAP2PLUS
1214
1215#if defined(CONFIG_PM_RUNTIME)
1216static void omap_gpio_restore_context(struct gpio_bank *bank);
1217
1218static int omap_gpio_runtime_suspend(struct device *dev)
1219{
1220        struct platform_device *pdev = to_platform_device(dev);
1221        struct gpio_bank *bank = platform_get_drvdata(pdev);
1222        u32 l1 = 0, l2 = 0;
1223        unsigned long flags;
1224        u32 wake_low, wake_hi;
1225
1226        spin_lock_irqsave(&bank->lock, flags);
1227
1228        /*
1229         * Only edges can generate a wakeup event to the PRCM.
1230         *
1231         * Therefore, ensure any wake-up capable GPIOs have
1232         * edge-detection enabled before going idle to ensure a wakeup
1233         * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1234         * NDA TRM 25.5.3.1)
1235         *
1236         * The normal values will be restored upon ->runtime_resume()
1237         * by writing back the values saved in bank->context.
1238         */
1239        wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1240        if (wake_low)
1241                __raw_writel(wake_low | bank->context.fallingdetect,
1242                             bank->base + bank->regs->fallingdetect);
1243        wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1244        if (wake_hi)
1245                __raw_writel(wake_hi | bank->context.risingdetect,
1246                             bank->base + bank->regs->risingdetect);
1247
1248        if (!bank->enabled_non_wakeup_gpios)
1249                goto update_gpio_context_count;
1250
1251        if (bank->power_mode != OFF_MODE) {
1252                bank->power_mode = 0;
1253                goto update_gpio_context_count;
1254        }
1255        /*
1256         * If going to OFF, remove triggering for all
1257         * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1258         * generated.  See OMAP2420 Errata item 1.101.
1259         */
1260        bank->saved_datain = __raw_readl(bank->base +
1261                                                bank->regs->datain);
1262        l1 = bank->context.fallingdetect;
1263        l2 = bank->context.risingdetect;
1264
1265        l1 &= ~bank->enabled_non_wakeup_gpios;
1266        l2 &= ~bank->enabled_non_wakeup_gpios;
1267
1268        __raw_writel(l1, bank->base + bank->regs->fallingdetect);
1269        __raw_writel(l2, bank->base + bank->regs->risingdetect);
1270
1271        bank->workaround_enabled = true;
1272
1273update_gpio_context_count:
1274        if (bank->get_context_loss_count)
1275                bank->context_loss_count =
1276                                bank->get_context_loss_count(bank->dev);
1277
1278        _gpio_dbck_disable(bank);
1279        spin_unlock_irqrestore(&bank->lock, flags);
1280
1281        return 0;
1282}
1283
1284static void omap_gpio_init_context(struct gpio_bank *p);
1285
1286static int omap_gpio_runtime_resume(struct device *dev)
1287{
1288        struct platform_device *pdev = to_platform_device(dev);
1289        struct gpio_bank *bank = platform_get_drvdata(pdev);
1290        u32 l = 0, gen, gen0, gen1;
1291        unsigned long flags;
1292        int c;
1293
1294        spin_lock_irqsave(&bank->lock, flags);
1295
1296        /*
1297         * On the first resume during the probe, the context has not
1298         * been initialised and so initialise it now. Also initialise
1299         * the context loss count.
1300         */
1301        if (bank->loses_context && !bank->context_valid) {
1302                omap_gpio_init_context(bank);
1303
1304                if (bank->get_context_loss_count)
1305                        bank->context_loss_count =
1306                                bank->get_context_loss_count(bank->dev);
1307        }
1308
1309        _gpio_dbck_enable(bank);
1310
1311        /*
1312         * In ->runtime_suspend(), level-triggered, wakeup-enabled
1313         * GPIOs were set to edge trigger also in order to be able to
1314         * generate a PRCM wakeup.  Here we restore the
1315         * pre-runtime_suspend() values for edge triggering.
1316         */
1317        __raw_writel(bank->context.fallingdetect,
1318                     bank->base + bank->regs->fallingdetect);
1319        __raw_writel(bank->context.risingdetect,
1320                     bank->base + bank->regs->risingdetect);
1321
1322        if (bank->loses_context) {
1323                if (!bank->get_context_loss_count) {
1324                        omap_gpio_restore_context(bank);
1325                } else {
1326                        c = bank->get_context_loss_count(bank->dev);
1327                        if (c != bank->context_loss_count) {
1328                                omap_gpio_restore_context(bank);
1329                        } else {
1330                                spin_unlock_irqrestore(&bank->lock, flags);
1331                                return 0;
1332                        }
1333                }
1334        }
1335
1336        if (!bank->workaround_enabled) {
1337                spin_unlock_irqrestore(&bank->lock, flags);
1338                return 0;
1339        }
1340
1341        l = __raw_readl(bank->base + bank->regs->datain);
1342
1343        /*
1344         * Check if any of the non-wakeup interrupt GPIOs have changed
1345         * state.  If so, generate an IRQ by software.  This is
1346         * horribly racy, but it's the best we can do to work around
1347         * this silicon bug.
1348         */
1349        l ^= bank->saved_datain;
1350        l &= bank->enabled_non_wakeup_gpios;
1351
1352        /*
1353         * No need to generate IRQs for the rising edge for gpio IRQs
1354         * configured with falling edge only; and vice versa.
1355         */
1356        gen0 = l & bank->context.fallingdetect;
1357        gen0 &= bank->saved_datain;
1358
1359        gen1 = l & bank->context.risingdetect;
1360        gen1 &= ~(bank->saved_datain);
1361
1362        /* FIXME: Consider GPIO IRQs with level detections properly! */
1363        gen = l & (~(bank->context.fallingdetect) &
1364                                         ~(bank->context.risingdetect));
1365        /* Consider all GPIO IRQs needed to be updated */
1366        gen |= gen0 | gen1;
1367
1368        if (gen) {
1369                u32 old0, old1;
1370
1371                old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
1372                old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1373
1374                if (!bank->regs->irqstatus_raw0) {
1375                        __raw_writel(old0 | gen, bank->base +
1376                                                bank->regs->leveldetect0);
1377                        __raw_writel(old1 | gen, bank->base +
1378                                                bank->regs->leveldetect1);
1379                }
1380
1381                if (bank->regs->irqstatus_raw0) {
1382                        __raw_writel(old0 | l, bank->base +
1383                                                bank->regs->leveldetect0);
1384                        __raw_writel(old1 | l, bank->base +
1385                                                bank->regs->leveldetect1);
1386                }
1387                __raw_writel(old0, bank->base + bank->regs->leveldetect0);
1388                __raw_writel(old1, bank->base + bank->regs->leveldetect1);
1389        }
1390
1391        bank->workaround_enabled = false;
1392        spin_unlock_irqrestore(&bank->lock, flags);
1393
1394        return 0;
1395}
1396#endif /* CONFIG_PM_RUNTIME */
1397
1398void omap2_gpio_prepare_for_idle(int pwr_mode)
1399{
1400        struct gpio_bank *bank;
1401
1402        list_for_each_entry(bank, &omap_gpio_list, node) {
1403                if (!bank->mod_usage || !bank->loses_context)
1404                        continue;
1405
1406                bank->power_mode = pwr_mode;
1407
1408                pm_runtime_put_sync_suspend(bank->dev);
1409        }
1410}
1411
1412void omap2_gpio_resume_after_idle(void)
1413{
1414        struct gpio_bank *bank;
1415
1416        list_for_each_entry(bank, &omap_gpio_list, node) {
1417                if (!bank->mod_usage || !bank->loses_context)
1418                        continue;
1419
1420                pm_runtime_get_sync(bank->dev);
1421        }
1422}
1423
1424#if defined(CONFIG_PM_RUNTIME)
1425static void omap_gpio_init_context(struct gpio_bank *p)
1426{
1427        struct omap_gpio_reg_offs *regs = p->regs;
1428        void __iomem *base = p->base;
1429
1430        p->context.ctrl         = __raw_readl(base + regs->ctrl);
1431        p->context.oe           = __raw_readl(base + regs->direction);
1432        p->context.wake_en      = __raw_readl(base + regs->wkup_en);
1433        p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
1434        p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
1435        p->context.risingdetect = __raw_readl(base + regs->risingdetect);
1436        p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
1437        p->context.irqenable1   = __raw_readl(base + regs->irqenable);
1438        p->context.irqenable2   = __raw_readl(base + regs->irqenable2);
1439
1440        if (regs->set_dataout && p->regs->clr_dataout)
1441                p->context.dataout = __raw_readl(base + regs->set_dataout);
1442        else
1443                p->context.dataout = __raw_readl(base + regs->dataout);
1444
1445        p->context_valid = true;
1446}
1447
1448static void omap_gpio_restore_context(struct gpio_bank *bank)
1449{
1450        __raw_writel(bank->context.wake_en,
1451                                bank->base + bank->regs->wkup_en);
1452        __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1453        __raw_writel(bank->context.leveldetect0,
1454                                bank->base + bank->regs->leveldetect0);
1455        __raw_writel(bank->context.leveldetect1,
1456                                bank->base + bank->regs->leveldetect1);
1457        __raw_writel(bank->context.risingdetect,
1458                                bank->base + bank->regs->risingdetect);
1459        __raw_writel(bank->context.fallingdetect,
1460                                bank->base + bank->regs->fallingdetect);
1461        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1462                __raw_writel(bank->context.dataout,
1463                                bank->base + bank->regs->set_dataout);
1464        else
1465                __raw_writel(bank->context.dataout,
1466                                bank->base + bank->regs->dataout);
1467        __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
1468
1469        if (bank->dbck_enable_mask) {
1470                __raw_writel(bank->context.debounce, bank->base +
1471                                        bank->regs->debounce);
1472                __raw_writel(bank->context.debounce_en,
1473                                        bank->base + bank->regs->debounce_en);
1474        }
1475
1476        __raw_writel(bank->context.irqenable1,
1477                                bank->base + bank->regs->irqenable);
1478        __raw_writel(bank->context.irqenable2,
1479                                bank->base + bank->regs->irqenable2);
1480}
1481#endif /* CONFIG_PM_RUNTIME */
1482#else
1483#define omap_gpio_runtime_suspend NULL
1484#define omap_gpio_runtime_resume NULL
1485static void omap_gpio_init_context(struct gpio_bank *p) {}
1486#endif
1487
1488static const struct dev_pm_ops gpio_pm_ops = {
1489        SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1490                                                                        NULL)
1491};
1492
1493#if defined(CONFIG_OF)
1494static struct omap_gpio_reg_offs omap2_gpio_regs = {
1495        .revision =             OMAP24XX_GPIO_REVISION,
1496        .direction =            OMAP24XX_GPIO_OE,
1497        .datain =               OMAP24XX_GPIO_DATAIN,
1498        .dataout =              OMAP24XX_GPIO_DATAOUT,
1499        .set_dataout =          OMAP24XX_GPIO_SETDATAOUT,
1500        .clr_dataout =          OMAP24XX_GPIO_CLEARDATAOUT,
1501        .irqstatus =            OMAP24XX_GPIO_IRQSTATUS1,
1502        .irqstatus2 =           OMAP24XX_GPIO_IRQSTATUS2,
1503        .irqenable =            OMAP24XX_GPIO_IRQENABLE1,
1504        .irqenable2 =           OMAP24XX_GPIO_IRQENABLE2,
1505        .set_irqenable =        OMAP24XX_GPIO_SETIRQENABLE1,
1506        .clr_irqenable =        OMAP24XX_GPIO_CLEARIRQENABLE1,
1507        .debounce =             OMAP24XX_GPIO_DEBOUNCE_VAL,
1508        .debounce_en =          OMAP24XX_GPIO_DEBOUNCE_EN,
1509        .ctrl =                 OMAP24XX_GPIO_CTRL,
1510        .wkup_en =              OMAP24XX_GPIO_WAKE_EN,
1511        .leveldetect0 =         OMAP24XX_GPIO_LEVELDETECT0,
1512        .leveldetect1 =         OMAP24XX_GPIO_LEVELDETECT1,
1513        .risingdetect =         OMAP24XX_GPIO_RISINGDETECT,
1514        .fallingdetect =        OMAP24XX_GPIO_FALLINGDETECT,
1515};
1516
1517static struct omap_gpio_reg_offs omap4_gpio_regs = {
1518        .revision =             OMAP4_GPIO_REVISION,
1519        .direction =            OMAP4_GPIO_OE,
1520        .datain =               OMAP4_GPIO_DATAIN,
1521        .dataout =              OMAP4_GPIO_DATAOUT,
1522        .set_dataout =          OMAP4_GPIO_SETDATAOUT,
1523        .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
1524        .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
1525        .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
1526        .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
1527        .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
1528        .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
1529        .clr_irqenable =        OMAP4_GPIO_IRQSTATUSCLR0,
1530        .debounce =             OMAP4_GPIO_DEBOUNCINGTIME,
1531        .debounce_en =          OMAP4_GPIO_DEBOUNCENABLE,
1532        .ctrl =                 OMAP4_GPIO_CTRL,
1533        .wkup_en =              OMAP4_GPIO_IRQWAKEN0,
1534        .leveldetect0 =         OMAP4_GPIO_LEVELDETECT0,
1535        .leveldetect1 =         OMAP4_GPIO_LEVELDETECT1,
1536        .risingdetect =         OMAP4_GPIO_RISINGDETECT,
1537        .fallingdetect =        OMAP4_GPIO_FALLINGDETECT,
1538};
1539
1540static const struct omap_gpio_platform_data omap2_pdata = {
1541        .regs = &omap2_gpio_regs,
1542        .bank_width = 32,
1543        .dbck_flag = false,
1544};
1545
1546static const struct omap_gpio_platform_data omap3_pdata = {
1547        .regs = &omap2_gpio_regs,
1548        .bank_width = 32,
1549        .dbck_flag = true,
1550};
1551
1552static const struct omap_gpio_platform_data omap4_pdata = {
1553        .regs = &omap4_gpio_regs,
1554        .bank_width = 32,
1555        .dbck_flag = true,
1556};
1557
1558static const struct of_device_id omap_gpio_match[] = {
1559        {
1560                .compatible = "ti,omap4-gpio",
1561                .data = &omap4_pdata,
1562        },
1563        {
1564                .compatible = "ti,omap3-gpio",
1565                .data = &omap3_pdata,
1566        },
1567        {
1568                .compatible = "ti,omap2-gpio",
1569                .data = &omap2_pdata,
1570        },
1571        { },
1572};
1573MODULE_DEVICE_TABLE(of, omap_gpio_match);
1574#endif
1575
1576static struct platform_driver omap_gpio_driver = {
1577        .probe          = omap_gpio_probe,
1578        .driver         = {
1579                .name   = "omap_gpio",
1580                .pm     = &gpio_pm_ops,
1581                .of_match_table = of_match_ptr(omap_gpio_match),
1582        },
1583};
1584
1585/*
1586 * gpio driver register needs to be done before
1587 * machine_init functions access gpio APIs.
1588 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1589 */
1590static int __init omap_gpio_drv_reg(void)
1591{
1592        return platform_driver_register(&omap_gpio_driver);
1593}
1594postcore_initcall(omap_gpio_drv_reg);
1595
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.