linux/kernel/irq/chip.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/chip.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   6 *
   7 * This file contains the core interrupt handling code, for irq-chip
   8 * based architectures.
   9 *
  10 * Detailed information is available in Documentation/DocBook/genericirq
  11 */
  12
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel_stat.h>
  18
  19#include "internals.h"
  20
  21/**
  22 *      irq_set_chip - set the irq chip for an irq
  23 *      @irq:   irq number
  24 *      @chip:  pointer to irq chip description structure
  25 */
  26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  27{
  28        unsigned long flags;
  29        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  30
  31        if (!desc)
  32                return -EINVAL;
  33
  34        if (!chip)
  35                chip = &no_irq_chip;
  36
  37        desc->irq_data.chip = chip;
  38        irq_put_desc_unlock(desc, flags);
  39        /*
  40         * For !CONFIG_SPARSE_IRQ make the irq show up in
  41         * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
  42         * already marked, and this call is harmless.
  43         */
  44        irq_reserve_irq(irq);
  45        return 0;
  46}
  47EXPORT_SYMBOL(irq_set_chip);
  48
  49/**
  50 *      irq_set_type - set the irq trigger type for an irq
  51 *      @irq:   irq number
  52 *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  53 */
  54int irq_set_irq_type(unsigned int irq, unsigned int type)
  55{
  56        unsigned long flags;
  57        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  58        int ret = 0;
  59
  60        if (!desc)
  61                return -EINVAL;
  62
  63        type &= IRQ_TYPE_SENSE_MASK;
  64        ret = __irq_set_trigger(desc, irq, type);
  65        irq_put_desc_busunlock(desc, flags);
  66        return ret;
  67}
  68EXPORT_SYMBOL(irq_set_irq_type);
  69
  70/**
  71 *      irq_set_handler_data - set irq handler data for an irq
  72 *      @irq:   Interrupt number
  73 *      @data:  Pointer to interrupt specific data
  74 *
  75 *      Set the hardware irq controller data for an irq
  76 */
  77int irq_set_handler_data(unsigned int irq, void *data)
  78{
  79        unsigned long flags;
  80        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  81
  82        if (!desc)
  83                return -EINVAL;
  84        desc->irq_data.handler_data = data;
  85        irq_put_desc_unlock(desc, flags);
  86        return 0;
  87}
  88EXPORT_SYMBOL(irq_set_handler_data);
  89
  90/**
  91 *      irq_set_msi_desc - set MSI descriptor data for an irq
  92 *      @irq:   Interrupt number
  93 *      @entry: Pointer to MSI descriptor data
  94 *
  95 *      Set the MSI descriptor entry for an irq
  96 */
  97int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
  98{
  99        unsigned long flags;
 100        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 101
 102        if (!desc)
 103                return -EINVAL;
 104        desc->irq_data.msi_desc = entry;
 105        if (entry)
 106                entry->irq = irq;
 107        irq_put_desc_unlock(desc, flags);
 108        return 0;
 109}
 110
 111/**
 112 *      irq_set_chip_data - set irq chip data for an irq
 113 *      @irq:   Interrupt number
 114 *      @data:  Pointer to chip specific data
 115 *
 116 *      Set the hardware irq chip data for an irq
 117 */
 118int irq_set_chip_data(unsigned int irq, void *data)
 119{
 120        unsigned long flags;
 121        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 122
 123        if (!desc)
 124                return -EINVAL;
 125        desc->irq_data.chip_data = data;
 126        irq_put_desc_unlock(desc, flags);
 127        return 0;
 128}
 129EXPORT_SYMBOL(irq_set_chip_data);
 130
 131struct irq_data *irq_get_irq_data(unsigned int irq)
 132{
 133        struct irq_desc *desc = irq_to_desc(irq);
 134
 135        return desc ? &desc->irq_data : NULL;
 136}
 137EXPORT_SYMBOL_GPL(irq_get_irq_data);
 138
 139static void irq_state_clr_disabled(struct irq_desc *desc)
 140{
 141        irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 142}
 143
 144static void irq_state_set_disabled(struct irq_desc *desc)
 145{
 146        irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
 147}
 148
 149static void irq_state_clr_masked(struct irq_desc *desc)
 150{
 151        irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 152}
 153
 154static void irq_state_set_masked(struct irq_desc *desc)
 155{
 156        irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
 157}
 158
 159int irq_startup(struct irq_desc *desc, bool resend)
 160{
 161        int ret = 0;
 162
 163        irq_state_clr_disabled(desc);
 164        desc->depth = 0;
 165
 166        if (desc->irq_data.chip->irq_startup) {
 167                ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
 168                irq_state_clr_masked(desc);
 169        } else {
 170                irq_enable(desc);
 171        }
 172        if (resend)
 173                check_irq_resend(desc, desc->irq_data.irq);
 174        return ret;
 175}
 176
 177void irq_shutdown(struct irq_desc *desc)
 178{
 179        irq_state_set_disabled(desc);
 180        desc->depth = 1;
 181        if (desc->irq_data.chip->irq_shutdown)
 182                desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 183        else if (desc->irq_data.chip->irq_disable)
 184                desc->irq_data.chip->irq_disable(&desc->irq_data);
 185        else
 186                desc->irq_data.chip->irq_mask(&desc->irq_data);
 187        irq_state_set_masked(desc);
 188}
 189
 190void irq_enable(struct irq_desc *desc)
 191{
 192        irq_state_clr_disabled(desc);
 193        if (desc->irq_data.chip->irq_enable)
 194                desc->irq_data.chip->irq_enable(&desc->irq_data);
 195        else
 196                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 197        irq_state_clr_masked(desc);
 198}
 199
 200void irq_disable(struct irq_desc *desc)
 201{
 202        irq_state_set_disabled(desc);
 203        if (desc->irq_data.chip->irq_disable) {
 204                desc->irq_data.chip->irq_disable(&desc->irq_data);
 205                irq_state_set_masked(desc);
 206        }
 207}
 208
 209void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 210{
 211        if (desc->irq_data.chip->irq_enable)
 212                desc->irq_data.chip->irq_enable(&desc->irq_data);
 213        else
 214                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 215        cpumask_set_cpu(cpu, desc->percpu_enabled);
 216}
 217
 218void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 219{
 220        if (desc->irq_data.chip->irq_disable)
 221                desc->irq_data.chip->irq_disable(&desc->irq_data);
 222        else
 223                desc->irq_data.chip->irq_mask(&desc->irq_data);
 224        cpumask_clear_cpu(cpu, desc->percpu_enabled);
 225}
 226
 227static inline void mask_ack_irq(struct irq_desc *desc)
 228{
 229        if (desc->irq_data.chip->irq_mask_ack)
 230                desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 231        else {
 232                desc->irq_data.chip->irq_mask(&desc->irq_data);
 233                if (desc->irq_data.chip->irq_ack)
 234                        desc->irq_data.chip->irq_ack(&desc->irq_data);
 235        }
 236        irq_state_set_masked(desc);
 237}
 238
 239void mask_irq(struct irq_desc *desc)
 240{
 241        if (desc->irq_data.chip->irq_mask) {
 242                desc->irq_data.chip->irq_mask(&desc->irq_data);
 243                irq_state_set_masked(desc);
 244        }
 245}
 246
 247void unmask_irq(struct irq_desc *desc)
 248{
 249        if (desc->irq_data.chip->irq_unmask) {
 250                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 251                irq_state_clr_masked(desc);
 252        }
 253}
 254
 255/*
 256 *      handle_nested_irq - Handle a nested irq from a irq thread
 257 *      @irq:   the interrupt number
 258 *
 259 *      Handle interrupts which are nested into a threaded interrupt
 260 *      handler. The handler function is called inside the calling
 261 *      threads context.
 262 */
 263void handle_nested_irq(unsigned int irq)
 264{
 265        struct irq_desc *desc = irq_to_desc(irq);
 266        struct irqaction *action;
 267        irqreturn_t action_ret;
 268
 269        might_sleep();
 270
 271        raw_spin_lock_irq(&desc->lock);
 272
 273        kstat_incr_irqs_this_cpu(irq, desc);
 274
 275        action = desc->action;
 276        if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
 277                goto out_unlock;
 278
 279        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 280        raw_spin_unlock_irq(&desc->lock);
 281
 282        action_ret = action->thread_fn(action->irq, action->dev_id);
 283        if (!noirqdebug)
 284                note_interrupt(irq, desc, action_ret);
 285
 286        raw_spin_lock_irq(&desc->lock);
 287        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 288
 289out_unlock:
 290        raw_spin_unlock_irq(&desc->lock);
 291}
 292EXPORT_SYMBOL_GPL(handle_nested_irq);
 293
 294static bool irq_check_poll(struct irq_desc *desc)
 295{
 296        if (!(desc->istate & IRQS_POLL_INPROGRESS))
 297                return false;
 298        return irq_wait_for_poll(desc);
 299}
 300
 301/**
 302 *      handle_simple_irq - Simple and software-decoded IRQs.
 303 *      @irq:   the interrupt number
 304 *      @desc:  the interrupt description structure for this irq
 305 *
 306 *      Simple interrupts are either sent from a demultiplexing interrupt
 307 *      handler or come from hardware, where no interrupt hardware control
 308 *      is necessary.
 309 *
 310 *      Note: The caller is expected to handle the ack, clear, mask and
 311 *      unmask issues if necessary.
 312 */
 313void
 314handle_simple_irq(unsigned int irq, struct irq_desc *desc)
 315{
 316        raw_spin_lock(&desc->lock);
 317
 318        if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
 319                if (!irq_check_poll(desc))
 320                        goto out_unlock;
 321
 322        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 323        kstat_incr_irqs_this_cpu(irq, desc);
 324
 325        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
 326                goto out_unlock;
 327
 328        handle_irq_event(desc);
 329
 330out_unlock:
 331        raw_spin_unlock(&desc->lock);
 332}
 333EXPORT_SYMBOL_GPL(handle_simple_irq);
 334
 335/*
 336 * Called unconditionally from handle_level_irq() and only for oneshot
 337 * interrupts from handle_fasteoi_irq()
 338 */
 339static void cond_unmask_irq(struct irq_desc *desc)
 340{
 341        /*
 342         * We need to unmask in the following cases:
 343         * - Standard level irq (IRQF_ONESHOT is not set)
 344         * - Oneshot irq which did not wake the thread (caused by a
 345         *   spurious interrupt or a primary handler handling it
 346         *   completely).
 347         */
 348        if (!irqd_irq_disabled(&desc->irq_data) &&
 349            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 350                unmask_irq(desc);
 351}
 352
 353/**
 354 *      handle_level_irq - Level type irq handler
 355 *      @irq:   the interrupt number
 356 *      @desc:  the interrupt description structure for this irq
 357 *
 358 *      Level type interrupts are active as long as the hardware line has
 359 *      the active level. This may require to mask the interrupt and unmask
 360 *      it after the associated handler has acknowledged the device, so the
 361 *      interrupt line is back to inactive.
 362 */
 363void
 364handle_level_irq(unsigned int irq, struct irq_desc *desc)
 365{
 366        raw_spin_lock(&desc->lock);
 367        mask_ack_irq(desc);
 368
 369        if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
 370                if (!irq_check_poll(desc))
 371                        goto out_unlock;
 372
 373        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 374        kstat_incr_irqs_this_cpu(irq, desc);
 375
 376        /*
 377         * If its disabled or no action available
 378         * keep it masked and get out of here
 379         */
 380        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
 381                goto out_unlock;
 382
 383        handle_irq_event(desc);
 384
 385        cond_unmask_irq(desc);
 386
 387out_unlock:
 388        raw_spin_unlock(&desc->lock);
 389}
 390EXPORT_SYMBOL_GPL(handle_level_irq);
 391
 392#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 393static inline void preflow_handler(struct irq_desc *desc)
 394{
 395        if (desc->preflow_handler)
 396                desc->preflow_handler(&desc->irq_data);
 397}
 398#else
 399static inline void preflow_handler(struct irq_desc *desc) { }
 400#endif
 401
 402/**
 403 *      handle_fasteoi_irq - irq handler for transparent controllers
 404 *      @irq:   the interrupt number
 405 *      @desc:  the interrupt description structure for this irq
 406 *
 407 *      Only a single callback will be issued to the chip: an ->eoi()
 408 *      call when the interrupt has been serviced. This enables support
 409 *      for modern forms of interrupt handlers, which handle the flow
 410 *      details in hardware, transparently.
 411 */
 412void
 413handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
 414{
 415        raw_spin_lock(&desc->lock);
 416
 417        if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
 418                if (!irq_check_poll(desc))
 419                        goto out;
 420
 421        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 422        kstat_incr_irqs_this_cpu(irq, desc);
 423
 424        /*
 425         * If its disabled or no action available
 426         * then mask it and get out of here:
 427         */
 428        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 429                desc->istate |= IRQS_PENDING;
 430                mask_irq(desc);
 431                goto out;
 432        }
 433
 434        if (desc->istate & IRQS_ONESHOT)
 435                mask_irq(desc);
 436
 437        preflow_handler(desc);
 438        handle_irq_event(desc);
 439
 440        if (desc->istate & IRQS_ONESHOT)
 441                cond_unmask_irq(desc);
 442
 443out_eoi:
 444        desc->irq_data.chip->irq_eoi(&desc->irq_data);
 445out_unlock:
 446        raw_spin_unlock(&desc->lock);
 447        return;
 448out:
 449        if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
 450                goto out_eoi;
 451        goto out_unlock;
 452}
 453
 454/**
 455 *      handle_edge_irq - edge type IRQ handler
 456 *      @irq:   the interrupt number
 457 *      @desc:  the interrupt description structure for this irq
 458 *
 459 *      Interrupt occures on the falling and/or rising edge of a hardware
 460 *      signal. The occurrence is latched into the irq controller hardware
 461 *      and must be acked in order to be reenabled. After the ack another
 462 *      interrupt can happen on the same source even before the first one
 463 *      is handled by the associated event handler. If this happens it
 464 *      might be necessary to disable (mask) the interrupt depending on the
 465 *      controller hardware. This requires to reenable the interrupt inside
 466 *      of the loop which handles the interrupts which have arrived while
 467 *      the handler was running. If all pending interrupts are handled, the
 468 *      loop is left.
 469 */
 470void
 471handle_edge_irq(unsigned int irq, struct irq_desc *desc)
 472{
 473        raw_spin_lock(&desc->lock);
 474
 475        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 476        /*
 477         * If we're currently running this IRQ, or its disabled,
 478         * we shouldn't process the IRQ. Mark it pending, handle
 479         * the necessary masking and go out
 480         */
 481        if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
 482                     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
 483                if (!irq_check_poll(desc)) {
 484                        desc->istate |= IRQS_PENDING;
 485                        mask_ack_irq(desc);
 486                        goto out_unlock;
 487                }
 488        }
 489        kstat_incr_irqs_this_cpu(irq, desc);
 490
 491        /* Start handling the irq */
 492        desc->irq_data.chip->irq_ack(&desc->irq_data);
 493
 494        do {
 495                if (unlikely(!desc->action)) {
 496                        mask_irq(desc);
 497                        goto out_unlock;
 498                }
 499
 500                /*
 501                 * When another irq arrived while we were handling
 502                 * one, we could have masked the irq.
 503                 * Renable it, if it was not disabled in meantime.
 504                 */
 505                if (unlikely(desc->istate & IRQS_PENDING)) {
 506                        if (!irqd_irq_disabled(&desc->irq_data) &&
 507                            irqd_irq_masked(&desc->irq_data))
 508                                unmask_irq(desc);
 509                }
 510
 511                handle_irq_event(desc);
 512
 513        } while ((desc->istate & IRQS_PENDING) &&
 514                 !irqd_irq_disabled(&desc->irq_data));
 515
 516out_unlock:
 517        raw_spin_unlock(&desc->lock);
 518}
 519
 520#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 521/**
 522 *      handle_edge_eoi_irq - edge eoi type IRQ handler
 523 *      @irq:   the interrupt number
 524 *      @desc:  the interrupt description structure for this irq
 525 *
 526 * Similar as the above handle_edge_irq, but using eoi and w/o the
 527 * mask/unmask logic.
 528 */
 529void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
 530{
 531        struct irq_chip *chip = irq_desc_get_chip(desc);
 532
 533        raw_spin_lock(&desc->lock);
 534
 535        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 536        /*
 537         * If we're currently running this IRQ, or its disabled,
 538         * we shouldn't process the IRQ. Mark it pending, handle
 539         * the necessary masking and go out
 540         */
 541        if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
 542                     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
 543                if (!irq_check_poll(desc)) {
 544                        desc->istate |= IRQS_PENDING;
 545                        goto out_eoi;
 546                }
 547        }
 548        kstat_incr_irqs_this_cpu(irq, desc);
 549
 550        do {
 551                if (unlikely(!desc->action))
 552                        goto out_eoi;
 553
 554                handle_irq_event(desc);
 555
 556        } while ((desc->istate & IRQS_PENDING) &&
 557                 !irqd_irq_disabled(&desc->irq_data));
 558
 559out_eoi:
 560        chip->irq_eoi(&desc->irq_data);
 561        raw_spin_unlock(&desc->lock);
 562}
 563#endif
 564
 565/**
 566 *      handle_percpu_irq - Per CPU local irq handler
 567 *      @irq:   the interrupt number
 568 *      @desc:  the interrupt description structure for this irq
 569 *
 570 *      Per CPU interrupts on SMP machines without locking requirements
 571 */
 572void
 573handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
 574{
 575        struct irq_chip *chip = irq_desc_get_chip(desc);
 576
 577        kstat_incr_irqs_this_cpu(irq, desc);
 578
 579        if (chip->irq_ack)
 580                chip->irq_ack(&desc->irq_data);
 581
 582        handle_irq_event_percpu(desc, desc->action);
 583
 584        if (chip->irq_eoi)
 585                chip->irq_eoi(&desc->irq_data);
 586}
 587
 588/**
 589 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 590 * @irq:        the interrupt number
 591 * @desc:       the interrupt description structure for this irq
 592 *
 593 * Per CPU interrupts on SMP machines without locking requirements. Same as
 594 * handle_percpu_irq() above but with the following extras:
 595 *
 596 * action->percpu_dev_id is a pointer to percpu variables which
 597 * contain the real device id for the cpu on which this handler is
 598 * called
 599 */
 600void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
 601{
 602        struct irq_chip *chip = irq_desc_get_chip(desc);
 603        struct irqaction *action = desc->action;
 604        void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
 605        irqreturn_t res;
 606
 607        kstat_incr_irqs_this_cpu(irq, desc);
 608
 609        if (chip->irq_ack)
 610                chip->irq_ack(&desc->irq_data);
 611
 612        trace_irq_handler_entry(irq, action);
 613        res = action->handler(irq, dev_id);
 614        trace_irq_handler_exit(irq, action, res);
 615
 616        if (chip->irq_eoi)
 617                chip->irq_eoi(&desc->irq_data);
 618}
 619
 620void
 621__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 622                  const char *name)
 623{
 624        unsigned long flags;
 625        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 626
 627        if (!desc)
 628                return;
 629
 630        if (!handle) {
 631                handle = handle_bad_irq;
 632        } else {
 633                if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
 634                        goto out;
 635        }
 636
 637        /* Uninstall? */
 638        if (handle == handle_bad_irq) {
 639                if (desc->irq_data.chip != &no_irq_chip)
 640                        mask_ack_irq(desc);
 641                irq_state_set_disabled(desc);
 642                desc->depth = 1;
 643        }
 644        desc->handle_irq = handle;
 645        desc->name = name;
 646
 647        if (handle != handle_bad_irq && is_chained) {
 648                irq_settings_set_noprobe(desc);
 649                irq_settings_set_norequest(desc);
 650                irq_settings_set_nothread(desc);
 651                irq_startup(desc, true);
 652        }
 653out:
 654        irq_put_desc_busunlock(desc, flags);
 655}
 656EXPORT_SYMBOL_GPL(__irq_set_handler);
 657
 658void
 659irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
 660                              irq_flow_handler_t handle, const char *name)
 661{
 662        irq_set_chip(irq, chip);
 663        __irq_set_handler(irq, handle, 0, name);
 664}
 665
 666void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 667{
 668        unsigned long flags;
 669        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 670
 671        if (!desc)
 672                return;
 673        irq_settings_clr_and_set(desc, clr, set);
 674
 675        irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
 676                   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
 677        if (irq_settings_has_no_balance_set(desc))
 678                irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 679        if (irq_settings_is_per_cpu(desc))
 680                irqd_set(&desc->irq_data, IRQD_PER_CPU);
 681        if (irq_settings_can_move_pcntxt(desc))
 682                irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
 683        if (irq_settings_is_level(desc))
 684                irqd_set(&desc->irq_data, IRQD_LEVEL);
 685
 686        irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
 687
 688        irq_put_desc_unlock(desc, flags);
 689}
 690EXPORT_SYMBOL_GPL(irq_modify_status);
 691
 692/**
 693 *      irq_cpu_online - Invoke all irq_cpu_online functions.
 694 *
 695 *      Iterate through all irqs and invoke the chip.irq_cpu_online()
 696 *      for each.
 697 */
 698void irq_cpu_online(void)
 699{
 700        struct irq_desc *desc;
 701        struct irq_chip *chip;
 702        unsigned long flags;
 703        unsigned int irq;
 704
 705        for_each_active_irq(irq) {
 706                desc = irq_to_desc(irq);
 707                if (!desc)
 708                        continue;
 709
 710                raw_spin_lock_irqsave(&desc->lock, flags);
 711
 712                chip = irq_data_get_irq_chip(&desc->irq_data);
 713                if (chip && chip->irq_cpu_online &&
 714                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
 715                     !irqd_irq_disabled(&desc->irq_data)))
 716                        chip->irq_cpu_online(&desc->irq_data);
 717
 718                raw_spin_unlock_irqrestore(&desc->lock, flags);
 719        }
 720}
 721
 722/**
 723 *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
 724 *
 725 *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
 726 *      for each.
 727 */
 728void irq_cpu_offline(void)
 729{
 730        struct irq_desc *desc;
 731        struct irq_chip *chip;
 732        unsigned long flags;
 733        unsigned int irq;
 734
 735        for_each_active_irq(irq) {
 736                desc = irq_to_desc(irq);
 737                if (!desc)
 738                        continue;
 739
 740                raw_spin_lock_irqsave(&desc->lock, flags);
 741
 742                chip = irq_data_get_irq_chip(&desc->irq_data);
 743                if (chip && chip->irq_cpu_offline &&
 744                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
 745                     !irqd_irq_disabled(&desc->irq_data)))
 746                        chip->irq_cpu_offline(&desc->irq_data);
 747
 748                raw_spin_unlock_irqrestore(&desc->lock, flags);
 749        }
 750}
 751