linux/kernel/irq/manage.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#define pr_fmt(fmt) "genirq: " fmt
  11
  12#include <linux/irq.h>
  13#include <linux/kthread.h>
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/task_work.h>
  20
  21#include "internals.h"
  22
  23#ifdef CONFIG_IRQ_FORCED_THREADING
  24__read_mostly bool force_irqthreads;
  25
  26static int __init setup_forced_irqthreads(char *arg)
  27{
  28        force_irqthreads = true;
  29        return 0;
  30}
  31early_param("threadirqs", setup_forced_irqthreads);
  32#endif
  33
  34/**
  35 *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  36 *      @irq: interrupt number to wait for
  37 *
  38 *      This function waits for any pending IRQ handlers for this interrupt
  39 *      to complete before returning. If you use this function while
  40 *      holding a resource the IRQ handler may need you will deadlock.
  41 *
  42 *      This function may be called - with care - from IRQ context.
  43 */
  44void synchronize_irq(unsigned int irq)
  45{
  46        struct irq_desc *desc = irq_to_desc(irq);
  47        bool inprogress;
  48
  49        if (!desc)
  50                return;
  51
  52        do {
  53                unsigned long flags;
  54
  55                /*
  56                 * Wait until we're out of the critical section.  This might
  57                 * give the wrong answer due to the lack of memory barriers.
  58                 */
  59                while (irqd_irq_inprogress(&desc->irq_data))
  60                        cpu_relax();
  61
  62                /* Ok, that indicated we're done: double-check carefully. */
  63                raw_spin_lock_irqsave(&desc->lock, flags);
  64                inprogress = irqd_irq_inprogress(&desc->irq_data);
  65                raw_spin_unlock_irqrestore(&desc->lock, flags);
  66
  67                /* Oops, that failed? */
  68        } while (inprogress);
  69
  70        /*
  71         * We made sure that no hardirq handler is running. Now verify
  72         * that no threaded handlers are active.
  73         */
  74        wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  75}
  76EXPORT_SYMBOL(synchronize_irq);
  77
  78#ifdef CONFIG_SMP
  79cpumask_var_t irq_default_affinity;
  80
  81/**
  82 *      irq_can_set_affinity - Check if the affinity of a given irq can be set
  83 *      @irq:           Interrupt to check
  84 *
  85 */
  86int irq_can_set_affinity(unsigned int irq)
  87{
  88        struct irq_desc *desc = irq_to_desc(irq);
  89
  90        if (!desc || !irqd_can_balance(&desc->irq_data) ||
  91            !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  92                return 0;
  93
  94        return 1;
  95}
  96
  97/**
  98 *      irq_set_thread_affinity - Notify irq threads to adjust affinity
  99 *      @desc:          irq descriptor which has affitnity changed
 100 *
 101 *      We just set IRQTF_AFFINITY and delegate the affinity setting
 102 *      to the interrupt thread itself. We can not call
 103 *      set_cpus_allowed_ptr() here as we hold desc->lock and this
 104 *      code can be called from hard interrupt context.
 105 */
 106void irq_set_thread_affinity(struct irq_desc *desc)
 107{
 108        struct irqaction *action = desc->action;
 109
 110        while (action) {
 111                if (action->thread)
 112                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 113                action = action->next;
 114        }
 115}
 116
 117#ifdef CONFIG_GENERIC_PENDING_IRQ
 118static inline bool irq_can_move_pcntxt(struct irq_data *data)
 119{
 120        return irqd_can_move_in_process_context(data);
 121}
 122static inline bool irq_move_pending(struct irq_data *data)
 123{
 124        return irqd_is_setaffinity_pending(data);
 125}
 126static inline void
 127irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 128{
 129        cpumask_copy(desc->pending_mask, mask);
 130}
 131static inline void
 132irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 133{
 134        cpumask_copy(mask, desc->pending_mask);
 135}
 136#else
 137static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 138static inline bool irq_move_pending(struct irq_data *data) { return false; }
 139static inline void
 140irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 141static inline void
 142irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 143#endif
 144
 145int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 146                        bool force)
 147{
 148        struct irq_desc *desc = irq_data_to_desc(data);
 149        struct irq_chip *chip = irq_data_get_irq_chip(data);
 150        int ret;
 151
 152        ret = chip->irq_set_affinity(data, mask, false);
 153        switch (ret) {
 154        case IRQ_SET_MASK_OK:
 155                cpumask_copy(data->affinity, mask);
 156        case IRQ_SET_MASK_OK_NOCOPY:
 157                irq_set_thread_affinity(desc);
 158                ret = 0;
 159        }
 160
 161        return ret;
 162}
 163
 164int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 165{
 166        struct irq_chip *chip = irq_data_get_irq_chip(data);
 167        struct irq_desc *desc = irq_data_to_desc(data);
 168        int ret = 0;
 169
 170        if (!chip || !chip->irq_set_affinity)
 171                return -EINVAL;
 172
 173        if (irq_can_move_pcntxt(data)) {
 174                ret = irq_do_set_affinity(data, mask, false);
 175        } else {
 176                irqd_set_move_pending(data);
 177                irq_copy_pending(desc, mask);
 178        }
 179
 180        if (desc->affinity_notify) {
 181                kref_get(&desc->affinity_notify->kref);
 182                schedule_work(&desc->affinity_notify->work);
 183        }
 184        irqd_set(data, IRQD_AFFINITY_SET);
 185
 186        return ret;
 187}
 188
 189/**
 190 *      irq_set_affinity - Set the irq affinity of a given irq
 191 *      @irq:           Interrupt to set affinity
 192 *      @mask:          cpumask
 193 *
 194 */
 195int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 196{
 197        struct irq_desc *desc = irq_to_desc(irq);
 198        unsigned long flags;
 199        int ret;
 200
 201        if (!desc)
 202                return -EINVAL;
 203
 204        raw_spin_lock_irqsave(&desc->lock, flags);
 205        ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
 206        raw_spin_unlock_irqrestore(&desc->lock, flags);
 207        return ret;
 208}
 209
 210int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 211{
 212        unsigned long flags;
 213        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 214
 215        if (!desc)
 216                return -EINVAL;
 217        desc->affinity_hint = m;
 218        irq_put_desc_unlock(desc, flags);
 219        return 0;
 220}
 221EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 222
 223static void irq_affinity_notify(struct work_struct *work)
 224{
 225        struct irq_affinity_notify *notify =
 226                container_of(work, struct irq_affinity_notify, work);
 227        struct irq_desc *desc = irq_to_desc(notify->irq);
 228        cpumask_var_t cpumask;
 229        unsigned long flags;
 230
 231        if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 232                goto out;
 233
 234        raw_spin_lock_irqsave(&desc->lock, flags);
 235        if (irq_move_pending(&desc->irq_data))
 236                irq_get_pending(cpumask, desc);
 237        else
 238                cpumask_copy(cpumask, desc->irq_data.affinity);
 239        raw_spin_unlock_irqrestore(&desc->lock, flags);
 240
 241        notify->notify(notify, cpumask);
 242
 243        free_cpumask_var(cpumask);
 244out:
 245        kref_put(&notify->kref, notify->release);
 246}
 247
 248/**
 249 *      irq_set_affinity_notifier - control notification of IRQ affinity changes
 250 *      @irq:           Interrupt for which to enable/disable notification
 251 *      @notify:        Context for notification, or %NULL to disable
 252 *                      notification.  Function pointers must be initialised;
 253 *                      the other fields will be initialised by this function.
 254 *
 255 *      Must be called in process context.  Notification may only be enabled
 256 *      after the IRQ is allocated and must be disabled before the IRQ is
 257 *      freed using free_irq().
 258 */
 259int
 260irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 261{
 262        struct irq_desc *desc = irq_to_desc(irq);
 263        struct irq_affinity_notify *old_notify;
 264        unsigned long flags;
 265
 266        /* The release function is promised process context */
 267        might_sleep();
 268
 269        if (!desc)
 270                return -EINVAL;
 271
 272        /* Complete initialisation of *notify */
 273        if (notify) {
 274                notify->irq = irq;
 275                kref_init(&notify->kref);
 276                INIT_WORK(&notify->work, irq_affinity_notify);
 277        }
 278
 279        raw_spin_lock_irqsave(&desc->lock, flags);
 280        old_notify = desc->affinity_notify;
 281        desc->affinity_notify = notify;
 282        raw_spin_unlock_irqrestore(&desc->lock, flags);
 283
 284        if (old_notify)
 285                kref_put(&old_notify->kref, old_notify->release);
 286
 287        return 0;
 288}
 289EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 290
 291#ifndef CONFIG_AUTO_IRQ_AFFINITY
 292/*
 293 * Generic version of the affinity autoselector.
 294 */
 295static int
 296setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 297{
 298        struct cpumask *set = irq_default_affinity;
 299        int node = desc->irq_data.node;
 300
 301        /* Excludes PER_CPU and NO_BALANCE interrupts */
 302        if (!irq_can_set_affinity(irq))
 303                return 0;
 304
 305        /*
 306         * Preserve an userspace affinity setup, but make sure that
 307         * one of the targets is online.
 308         */
 309        if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 310                if (cpumask_intersects(desc->irq_data.affinity,
 311                                       cpu_online_mask))
 312                        set = desc->irq_data.affinity;
 313                else
 314                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 315        }
 316
 317        cpumask_and(mask, cpu_online_mask, set);
 318        if (node != NUMA_NO_NODE) {
 319                const struct cpumask *nodemask = cpumask_of_node(node);
 320
 321                /* make sure at least one of the cpus in nodemask is online */
 322                if (cpumask_intersects(mask, nodemask))
 323                        cpumask_and(mask, mask, nodemask);
 324        }
 325        irq_do_set_affinity(&desc->irq_data, mask, false);
 326        return 0;
 327}
 328#else
 329static inline int
 330setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 331{
 332        return irq_select_affinity(irq);
 333}
 334#endif
 335
 336/*
 337 * Called when affinity is set via /proc/irq
 338 */
 339int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 340{
 341        struct irq_desc *desc = irq_to_desc(irq);
 342        unsigned long flags;
 343        int ret;
 344
 345        raw_spin_lock_irqsave(&desc->lock, flags);
 346        ret = setup_affinity(irq, desc, mask);
 347        raw_spin_unlock_irqrestore(&desc->lock, flags);
 348        return ret;
 349}
 350
 351#else
 352static inline int
 353setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 354{
 355        return 0;
 356}
 357#endif
 358
 359void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 360{
 361        if (suspend) {
 362                if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
 363                        return;
 364                desc->istate |= IRQS_SUSPENDED;
 365        }
 366
 367        if (!desc->depth++)
 368                irq_disable(desc);
 369}
 370
 371static int __disable_irq_nosync(unsigned int irq)
 372{
 373        unsigned long flags;
 374        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 375
 376        if (!desc)
 377                return -EINVAL;
 378        __disable_irq(desc, irq, false);
 379        irq_put_desc_busunlock(desc, flags);
 380        return 0;
 381}
 382
 383/**
 384 *      disable_irq_nosync - disable an irq without waiting
 385 *      @irq: Interrupt to disable
 386 *
 387 *      Disable the selected interrupt line.  Disables and Enables are
 388 *      nested.
 389 *      Unlike disable_irq(), this function does not ensure existing
 390 *      instances of the IRQ handler have completed before returning.
 391 *
 392 *      This function may be called from IRQ context.
 393 */
 394void disable_irq_nosync(unsigned int irq)
 395{
 396        __disable_irq_nosync(irq);
 397}
 398EXPORT_SYMBOL(disable_irq_nosync);
 399
 400/**
 401 *      disable_irq - disable an irq and wait for completion
 402 *      @irq: Interrupt to disable
 403 *
 404 *      Disable the selected interrupt line.  Enables and Disables are
 405 *      nested.
 406 *      This function waits for any pending IRQ handlers for this interrupt
 407 *      to complete before returning. If you use this function while
 408 *      holding a resource the IRQ handler may need you will deadlock.
 409 *
 410 *      This function may be called - with care - from IRQ context.
 411 */
 412void disable_irq(unsigned int irq)
 413{
 414        if (!__disable_irq_nosync(irq))
 415                synchronize_irq(irq);
 416}
 417EXPORT_SYMBOL(disable_irq);
 418
 419void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 420{
 421        if (resume) {
 422                if (!(desc->istate & IRQS_SUSPENDED)) {
 423                        if (!desc->action)
 424                                return;
 425                        if (!(desc->action->flags & IRQF_FORCE_RESUME))
 426                                return;
 427                        /* Pretend that it got disabled ! */
 428                        desc->depth++;
 429                }
 430                desc->istate &= ~IRQS_SUSPENDED;
 431        }
 432
 433        switch (desc->depth) {
 434        case 0:
 435 err_out:
 436                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
 437                break;
 438        case 1: {
 439                if (desc->istate & IRQS_SUSPENDED)
 440                        goto err_out;
 441                /* Prevent probing on this irq: */
 442                irq_settings_set_noprobe(desc);
 443                irq_enable(desc);
 444                check_irq_resend(desc, irq);
 445                /* fall-through */
 446        }
 447        default:
 448                desc->depth--;
 449        }
 450}
 451
 452/**
 453 *      enable_irq - enable handling of an irq
 454 *      @irq: Interrupt to enable
 455 *
 456 *      Undoes the effect of one call to disable_irq().  If this
 457 *      matches the last disable, processing of interrupts on this
 458 *      IRQ line is re-enabled.
 459 *
 460 *      This function may be called from IRQ context only when
 461 *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 462 */
 463void enable_irq(unsigned int irq)
 464{
 465        unsigned long flags;
 466        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 467
 468        if (!desc)
 469                return;
 470        if (WARN(!desc->irq_data.chip,
 471                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 472                goto out;
 473
 474        __enable_irq(desc, irq, false);
 475out:
 476        irq_put_desc_busunlock(desc, flags);
 477}
 478EXPORT_SYMBOL(enable_irq);
 479
 480static int set_irq_wake_real(unsigned int irq, unsigned int on)
 481{
 482        struct irq_desc *desc = irq_to_desc(irq);
 483        int ret = -ENXIO;
 484
 485        if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 486                return 0;
 487
 488        if (desc->irq_data.chip->irq_set_wake)
 489                ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 490
 491        return ret;
 492}
 493
 494/**
 495 *      irq_set_irq_wake - control irq power management wakeup
 496 *      @irq:   interrupt to control
 497 *      @on:    enable/disable power management wakeup
 498 *
 499 *      Enable/disable power management wakeup mode, which is
 500 *      disabled by default.  Enables and disables must match,
 501 *      just as they match for non-wakeup mode support.
 502 *
 503 *      Wakeup mode lets this IRQ wake the system from sleep
 504 *      states like "suspend to RAM".
 505 */
 506int irq_set_irq_wake(unsigned int irq, unsigned int on)
 507{
 508        unsigned long flags;
 509        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 510        int ret = 0;
 511
 512        if (!desc)
 513                return -EINVAL;
 514
 515        /* wakeup-capable irqs can be shared between drivers that
 516         * don't need to have the same sleep mode behaviors.
 517         */
 518        if (on) {
 519                if (desc->wake_depth++ == 0) {
 520                        ret = set_irq_wake_real(irq, on);
 521                        if (ret)
 522                                desc->wake_depth = 0;
 523                        else
 524                                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 525                }
 526        } else {
 527                if (desc->wake_depth == 0) {
 528                        WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 529                } else if (--desc->wake_depth == 0) {
 530                        ret = set_irq_wake_real(irq, on);
 531                        if (ret)
 532                                desc->wake_depth = 1;
 533                        else
 534                                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 535                }
 536        }
 537        irq_put_desc_busunlock(desc, flags);
 538        return ret;
 539}
 540EXPORT_SYMBOL(irq_set_irq_wake);
 541
 542/*
 543 * Internal function that tells the architecture code whether a
 544 * particular irq has been exclusively allocated or is available
 545 * for driver use.
 546 */
 547int can_request_irq(unsigned int irq, unsigned long irqflags)
 548{
 549        unsigned long flags;
 550        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 551        int canrequest = 0;
 552
 553        if (!desc)
 554                return 0;
 555
 556        if (irq_settings_can_request(desc)) {
 557                if (desc->action)
 558                        if (irqflags & desc->action->flags & IRQF_SHARED)
 559                                canrequest =1;
 560        }
 561        irq_put_desc_unlock(desc, flags);
 562        return canrequest;
 563}
 564
 565int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 566                      unsigned long flags)
 567{
 568        struct irq_chip *chip = desc->irq_data.chip;
 569        int ret, unmask = 0;
 570
 571        if (!chip || !chip->irq_set_type) {
 572                /*
 573                 * IRQF_TRIGGER_* but the PIC does not support multiple
 574                 * flow-types?
 575                 */
 576                pr_debug("No set_type function for IRQ %d (%s)\n", irq,
 577                         chip ? (chip->name ? : "unknown") : "unknown");
 578                return 0;
 579        }
 580
 581        flags &= IRQ_TYPE_SENSE_MASK;
 582
 583        if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 584                if (!irqd_irq_masked(&desc->irq_data))
 585                        mask_irq(desc);
 586                if (!irqd_irq_disabled(&desc->irq_data))
 587                        unmask = 1;
 588        }
 589
 590        /* caller masked out all except trigger mode flags */
 591        ret = chip->irq_set_type(&desc->irq_data, flags);
 592
 593        switch (ret) {
 594        case IRQ_SET_MASK_OK:
 595                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 596                irqd_set(&desc->irq_data, flags);
 597
 598        case IRQ_SET_MASK_OK_NOCOPY:
 599                flags = irqd_get_trigger_type(&desc->irq_data);
 600                irq_settings_set_trigger_mask(desc, flags);
 601                irqd_clear(&desc->irq_data, IRQD_LEVEL);
 602                irq_settings_clr_level(desc);
 603                if (flags & IRQ_TYPE_LEVEL_MASK) {
 604                        irq_settings_set_level(desc);
 605                        irqd_set(&desc->irq_data, IRQD_LEVEL);
 606                }
 607
 608                ret = 0;
 609                break;
 610        default:
 611                pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 612                       flags, irq, chip->irq_set_type);
 613        }
 614        if (unmask)
 615                unmask_irq(desc);
 616        return ret;
 617}
 618
 619/*
 620 * Default primary interrupt handler for threaded interrupts. Is
 621 * assigned as primary handler when request_threaded_irq is called
 622 * with handler == NULL. Useful for oneshot interrupts.
 623 */
 624static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 625{
 626        return IRQ_WAKE_THREAD;
 627}
 628
 629/*
 630 * Primary handler for nested threaded interrupts. Should never be
 631 * called.
 632 */
 633static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 634{
 635        WARN(1, "Primary handler called for nested irq %d\n", irq);
 636        return IRQ_NONE;
 637}
 638
 639static int irq_wait_for_interrupt(struct irqaction *action)
 640{
 641        set_current_state(TASK_INTERRUPTIBLE);
 642
 643        while (!kthread_should_stop()) {
 644
 645                if (test_and_clear_bit(IRQTF_RUNTHREAD,
 646                                       &action->thread_flags)) {
 647                        __set_current_state(TASK_RUNNING);
 648                        return 0;
 649                }
 650                schedule();
 651                set_current_state(TASK_INTERRUPTIBLE);
 652        }
 653        __set_current_state(TASK_RUNNING);
 654        return -1;
 655}
 656
 657/*
 658 * Oneshot interrupts keep the irq line masked until the threaded
 659 * handler finished. unmask if the interrupt has not been disabled and
 660 * is marked MASKED.
 661 */
 662static void irq_finalize_oneshot(struct irq_desc *desc,
 663                                 struct irqaction *action)
 664{
 665        if (!(desc->istate & IRQS_ONESHOT))
 666                return;
 667again:
 668        chip_bus_lock(desc);
 669        raw_spin_lock_irq(&desc->lock);
 670
 671        /*
 672         * Implausible though it may be we need to protect us against
 673         * the following scenario:
 674         *
 675         * The thread is faster done than the hard interrupt handler
 676         * on the other CPU. If we unmask the irq line then the
 677         * interrupt can come in again and masks the line, leaves due
 678         * to IRQS_INPROGRESS and the irq line is masked forever.
 679         *
 680         * This also serializes the state of shared oneshot handlers
 681         * versus "desc->threads_onehsot |= action->thread_mask;" in
 682         * irq_wake_thread(). See the comment there which explains the
 683         * serialization.
 684         */
 685        if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 686                raw_spin_unlock_irq(&desc->lock);
 687                chip_bus_sync_unlock(desc);
 688                cpu_relax();
 689                goto again;
 690        }
 691
 692        /*
 693         * Now check again, whether the thread should run. Otherwise
 694         * we would clear the threads_oneshot bit of this thread which
 695         * was just set.
 696         */
 697        if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 698                goto out_unlock;
 699
 700        desc->threads_oneshot &= ~action->thread_mask;
 701
 702        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 703            irqd_irq_masked(&desc->irq_data))
 704                unmask_irq(desc);
 705
 706out_unlock:
 707        raw_spin_unlock_irq(&desc->lock);
 708        chip_bus_sync_unlock(desc);
 709}
 710
 711#ifdef CONFIG_SMP
 712/*
 713 * Check whether we need to chasnge the affinity of the interrupt thread.
 714 */
 715static void
 716irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 717{
 718        cpumask_var_t mask;
 719        bool valid = true;
 720
 721        if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 722                return;
 723
 724        /*
 725         * In case we are out of memory we set IRQTF_AFFINITY again and
 726         * try again next time
 727         */
 728        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 729                set_bit(IRQTF_AFFINITY, &action->thread_flags);
 730                return;
 731        }
 732
 733        raw_spin_lock_irq(&desc->lock);
 734        /*
 735         * This code is triggered unconditionally. Check the affinity
 736         * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 737         */
 738        if (desc->irq_data.affinity)
 739                cpumask_copy(mask, desc->irq_data.affinity);
 740        else
 741                valid = false;
 742        raw_spin_unlock_irq(&desc->lock);
 743
 744        if (valid)
 745                set_cpus_allowed_ptr(current, mask);
 746        free_cpumask_var(mask);
 747}
 748#else
 749static inline void
 750irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 751#endif
 752
 753/*
 754 * Interrupts which are not explicitely requested as threaded
 755 * interrupts rely on the implicit bh/preempt disable of the hard irq
 756 * context. So we need to disable bh here to avoid deadlocks and other
 757 * side effects.
 758 */
 759static irqreturn_t
 760irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 761{
 762        irqreturn_t ret;
 763
 764        local_bh_disable();
 765        ret = action->thread_fn(action->irq, action->dev_id);
 766        irq_finalize_oneshot(desc, action);
 767        local_bh_enable();
 768        return ret;
 769}
 770
 771/*
 772 * Interrupts explicitely requested as threaded interupts want to be
 773 * preemtible - many of them need to sleep and wait for slow busses to
 774 * complete.
 775 */
 776static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 777                struct irqaction *action)
 778{
 779        irqreturn_t ret;
 780
 781        ret = action->thread_fn(action->irq, action->dev_id);
 782        irq_finalize_oneshot(desc, action);
 783        return ret;
 784}
 785
 786static void wake_threads_waitq(struct irq_desc *desc)
 787{
 788        if (atomic_dec_and_test(&desc->threads_active) &&
 789            waitqueue_active(&desc->wait_for_threads))
 790                wake_up(&desc->wait_for_threads);
 791}
 792
 793static void irq_thread_dtor(struct callback_head *unused)
 794{
 795        struct task_struct *tsk = current;
 796        struct irq_desc *desc;
 797        struct irqaction *action;
 798
 799        if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 800                return;
 801
 802        action = kthread_data(tsk);
 803
 804        pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 805               tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
 806
 807
 808        desc = irq_to_desc(action->irq);
 809        /*
 810         * If IRQTF_RUNTHREAD is set, we need to decrement
 811         * desc->threads_active and wake possible waiters.
 812         */
 813        if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 814                wake_threads_waitq(desc);
 815
 816        /* Prevent a stale desc->threads_oneshot */
 817        irq_finalize_oneshot(desc, action);
 818}
 819
 820/*
 821 * Interrupt handler thread
 822 */
 823static int irq_thread(void *data)
 824{
 825        struct callback_head on_exit_work;
 826        static const struct sched_param param = {
 827                .sched_priority = MAX_USER_RT_PRIO/2,
 828        };
 829        struct irqaction *action = data;
 830        struct irq_desc *desc = irq_to_desc(action->irq);
 831        irqreturn_t (*handler_fn)(struct irq_desc *desc,
 832                        struct irqaction *action);
 833
 834        if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 835                                        &action->thread_flags))
 836                handler_fn = irq_forced_thread_fn;
 837        else
 838                handler_fn = irq_thread_fn;
 839
 840        sched_setscheduler(current, SCHED_FIFO, &param);
 841
 842        init_task_work(&on_exit_work, irq_thread_dtor);
 843        task_work_add(current, &on_exit_work, false);
 844
 845        while (!irq_wait_for_interrupt(action)) {
 846                irqreturn_t action_ret;
 847
 848                irq_thread_check_affinity(desc, action);
 849
 850                action_ret = handler_fn(desc, action);
 851                if (!noirqdebug)
 852                        note_interrupt(action->irq, desc, action_ret);
 853
 854                wake_threads_waitq(desc);
 855        }
 856
 857        /*
 858         * This is the regular exit path. __free_irq() is stopping the
 859         * thread via kthread_stop() after calling
 860         * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 861         * oneshot mask bit can be set. We cannot verify that as we
 862         * cannot touch the oneshot mask at this point anymore as
 863         * __setup_irq() might have given out currents thread_mask
 864         * again.
 865         */
 866        task_work_cancel(current, irq_thread_dtor);
 867        return 0;
 868}
 869
 870static void irq_setup_forced_threading(struct irqaction *new)
 871{
 872        if (!force_irqthreads)
 873                return;
 874        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 875                return;
 876
 877        new->flags |= IRQF_ONESHOT;
 878
 879        if (!new->thread_fn) {
 880                set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
 881                new->thread_fn = new->handler;
 882                new->handler = irq_default_primary_handler;
 883        }
 884}
 885
 886/*
 887 * Internal function to register an irqaction - typically used to
 888 * allocate special interrupts that are part of the architecture.
 889 */
 890static int
 891__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 892{
 893        struct irqaction *old, **old_ptr;
 894        unsigned long flags, thread_mask = 0;
 895        int ret, nested, shared = 0;
 896        cpumask_var_t mask;
 897
 898        if (!desc)
 899                return -EINVAL;
 900
 901        if (desc->irq_data.chip == &no_irq_chip)
 902                return -ENOSYS;
 903        if (!try_module_get(desc->owner))
 904                return -ENODEV;
 905
 906        /*
 907         * Check whether the interrupt nests into another interrupt
 908         * thread.
 909         */
 910        nested = irq_settings_is_nested_thread(desc);
 911        if (nested) {
 912                if (!new->thread_fn) {
 913                        ret = -EINVAL;
 914                        goto out_mput;
 915                }
 916                /*
 917                 * Replace the primary handler which was provided from
 918                 * the driver for non nested interrupt handling by the
 919                 * dummy function which warns when called.
 920                 */
 921                new->handler = irq_nested_primary_handler;
 922        } else {
 923                if (irq_settings_can_thread(desc))
 924                        irq_setup_forced_threading(new);
 925        }
 926
 927        /*
 928         * Create a handler thread when a thread function is supplied
 929         * and the interrupt does not nest into another interrupt
 930         * thread.
 931         */
 932        if (new->thread_fn && !nested) {
 933                struct task_struct *t;
 934
 935                t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
 936                                   new->name);
 937                if (IS_ERR(t)) {
 938                        ret = PTR_ERR(t);
 939                        goto out_mput;
 940                }
 941                /*
 942                 * We keep the reference to the task struct even if
 943                 * the thread dies to avoid that the interrupt code
 944                 * references an already freed task_struct.
 945                 */
 946                get_task_struct(t);
 947                new->thread = t;
 948                /*
 949                 * Tell the thread to set its affinity. This is
 950                 * important for shared interrupt handlers as we do
 951                 * not invoke setup_affinity() for the secondary
 952                 * handlers as everything is already set up. Even for
 953                 * interrupts marked with IRQF_NO_BALANCE this is
 954                 * correct as we want the thread to move to the cpu(s)
 955                 * on which the requesting code placed the interrupt.
 956                 */
 957                set_bit(IRQTF_AFFINITY, &new->thread_flags);
 958        }
 959
 960        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 961                ret = -ENOMEM;
 962                goto out_thread;
 963        }
 964
 965        /*
 966         * Drivers are often written to work w/o knowledge about the
 967         * underlying irq chip implementation, so a request for a
 968         * threaded irq without a primary hard irq context handler
 969         * requires the ONESHOT flag to be set. Some irq chips like
 970         * MSI based interrupts are per se one shot safe. Check the
 971         * chip flags, so we can avoid the unmask dance at the end of
 972         * the threaded handler for those.
 973         */
 974        if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
 975                new->flags &= ~IRQF_ONESHOT;
 976
 977        /*
 978         * The following block of code has to be executed atomically
 979         */
 980        raw_spin_lock_irqsave(&desc->lock, flags);
 981        old_ptr = &desc->action;
 982        old = *old_ptr;
 983        if (old) {
 984                /*
 985                 * Can't share interrupts unless both agree to and are
 986                 * the same type (level, edge, polarity). So both flag
 987                 * fields must have IRQF_SHARED set and the bits which
 988                 * set the trigger type must match. Also all must
 989                 * agree on ONESHOT.
 990                 */
 991                if (!((old->flags & new->flags) & IRQF_SHARED) ||
 992                    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
 993                    ((old->flags ^ new->flags) & IRQF_ONESHOT))
 994                        goto mismatch;
 995
 996                /* All handlers must agree on per-cpuness */
 997                if ((old->flags & IRQF_PERCPU) !=
 998                    (new->flags & IRQF_PERCPU))
 999                        goto mismatch;
1000
1001                /* add new interrupt at end of irq queue */
1002                do {
1003                        /*
1004                         * Or all existing action->thread_mask bits,
1005                         * so we can find the next zero bit for this
1006                         * new action.
1007                         */
1008                        thread_mask |= old->thread_mask;
1009                        old_ptr = &old->next;
1010                        old = *old_ptr;
1011                } while (old);
1012                shared = 1;
1013        }
1014
1015        /*
1016         * Setup the thread mask for this irqaction for ONESHOT. For
1017         * !ONESHOT irqs the thread mask is 0 so we can avoid a
1018         * conditional in irq_wake_thread().
1019         */
1020        if (new->flags & IRQF_ONESHOT) {
1021                /*
1022                 * Unlikely to have 32 resp 64 irqs sharing one line,
1023                 * but who knows.
1024                 */
1025                if (thread_mask == ~0UL) {
1026                        ret = -EBUSY;
1027                        goto out_mask;
1028                }
1029                /*
1030                 * The thread_mask for the action is or'ed to
1031                 * desc->thread_active to indicate that the
1032                 * IRQF_ONESHOT thread handler has been woken, but not
1033                 * yet finished. The bit is cleared when a thread
1034                 * completes. When all threads of a shared interrupt
1035                 * line have completed desc->threads_active becomes
1036                 * zero and the interrupt line is unmasked. See
1037                 * handle.c:irq_wake_thread() for further information.
1038                 *
1039                 * If no thread is woken by primary (hard irq context)
1040                 * interrupt handlers, then desc->threads_active is
1041                 * also checked for zero to unmask the irq line in the
1042                 * affected hard irq flow handlers
1043                 * (handle_[fasteoi|level]_irq).
1044                 *
1045                 * The new action gets the first zero bit of
1046                 * thread_mask assigned. See the loop above which or's
1047                 * all existing action->thread_mask bits.
1048                 */
1049                new->thread_mask = 1 << ffz(thread_mask);
1050
1051        } else if (new->handler == irq_default_primary_handler &&
1052                   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1053                /*
1054                 * The interrupt was requested with handler = NULL, so
1055                 * we use the default primary handler for it. But it
1056                 * does not have the oneshot flag set. In combination
1057                 * with level interrupts this is deadly, because the
1058                 * default primary handler just wakes the thread, then
1059                 * the irq lines is reenabled, but the device still
1060                 * has the level irq asserted. Rinse and repeat....
1061                 *
1062                 * While this works for edge type interrupts, we play
1063                 * it safe and reject unconditionally because we can't
1064                 * say for sure which type this interrupt really
1065                 * has. The type flags are unreliable as the
1066                 * underlying chip implementation can override them.
1067                 */
1068                pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1069                       irq);
1070                ret = -EINVAL;
1071                goto out_mask;
1072        }
1073
1074        if (!shared) {
1075                init_waitqueue_head(&desc->wait_for_threads);
1076
1077                /* Setup the type (level, edge polarity) if configured: */
1078                if (new->flags & IRQF_TRIGGER_MASK) {
1079                        ret = __irq_set_trigger(desc, irq,
1080                                        new->flags & IRQF_TRIGGER_MASK);
1081
1082                        if (ret)
1083                                goto out_mask;
1084                }
1085
1086                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1087                                  IRQS_ONESHOT | IRQS_WAITING);
1088                irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1089
1090                if (new->flags & IRQF_PERCPU) {
1091                        irqd_set(&desc->irq_data, IRQD_PER_CPU);
1092                        irq_settings_set_per_cpu(desc);
1093                }
1094
1095                if (new->flags & IRQF_ONESHOT)
1096                        desc->istate |= IRQS_ONESHOT;
1097
1098                if (irq_settings_can_autoenable(desc))
1099                        irq_startup(desc, true);
1100                else
1101                        /* Undo nested disables: */
1102                        desc->depth = 1;
1103
1104                /* Exclude IRQ from balancing if requested */
1105                if (new->flags & IRQF_NOBALANCING) {
1106                        irq_settings_set_no_balancing(desc);
1107                        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1108                }
1109
1110                /* Set default affinity mask once everything is setup */
1111                setup_affinity(irq, desc, mask);
1112
1113        } else if (new->flags & IRQF_TRIGGER_MASK) {
1114                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1115                unsigned int omsk = irq_settings_get_trigger_mask(desc);
1116
1117                if (nmsk != omsk)
1118                        /* hope the handler works with current  trigger mode */
1119                        pr_warning("irq %d uses trigger mode %u; requested %u\n",
1120                                   irq, nmsk, omsk);
1121        }
1122
1123        new->irq = irq;
1124        *old_ptr = new;
1125
1126        /* Reset broken irq detection when installing new handler */
1127        desc->irq_count = 0;
1128        desc->irqs_unhandled = 0;
1129
1130        /*
1131         * Check whether we disabled the irq via the spurious handler
1132         * before. Reenable it and give it another chance.
1133         */
1134        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1135                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1136                __enable_irq(desc, irq, false);
1137        }
1138
1139        raw_spin_unlock_irqrestore(&desc->lock, flags);
1140
1141        /*
1142         * Strictly no need to wake it up, but hung_task complains
1143         * when no hard interrupt wakes the thread up.
1144         */
1145        if (new->thread)
1146                wake_up_process(new->thread);
1147
1148        register_irq_proc(irq, desc);
1149        new->dir = NULL;
1150        register_handler_proc(irq, new);
1151        free_cpumask_var(mask);
1152
1153        return 0;
1154
1155mismatch:
1156        if (!(new->flags & IRQF_PROBE_SHARED)) {
1157                pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1158                       irq, new->flags, new->name, old->flags, old->name);
1159#ifdef CONFIG_DEBUG_SHIRQ
1160                dump_stack();
1161#endif
1162        }
1163        ret = -EBUSY;
1164
1165out_mask:
1166        raw_spin_unlock_irqrestore(&desc->lock, flags);
1167        free_cpumask_var(mask);
1168
1169out_thread:
1170        if (new->thread) {
1171                struct task_struct *t = new->thread;
1172
1173                new->thread = NULL;
1174                kthread_stop(t);
1175                put_task_struct(t);
1176        }
1177out_mput:
1178        module_put(desc->owner);
1179        return ret;
1180}
1181
1182/**
1183 *      setup_irq - setup an interrupt
1184 *      @irq: Interrupt line to setup
1185 *      @act: irqaction for the interrupt
1186 *
1187 * Used to statically setup interrupts in the early boot process.
1188 */
1189int setup_irq(unsigned int irq, struct irqaction *act)
1190{
1191        int retval;
1192        struct irq_desc *desc = irq_to_desc(irq);
1193
1194        if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1195                return -EINVAL;
1196        chip_bus_lock(desc);
1197        retval = __setup_irq(irq, desc, act);
1198        chip_bus_sync_unlock(desc);
1199
1200        return retval;
1201}
1202EXPORT_SYMBOL_GPL(setup_irq);
1203
1204/*
1205 * Internal function to unregister an irqaction - used to free
1206 * regular and special interrupts that are part of the architecture.
1207 */
1208static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1209{
1210        struct irq_desc *desc = irq_to_desc(irq);
1211        struct irqaction *action, **action_ptr;
1212        unsigned long flags;
1213
1214        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1215
1216        if (!desc)
1217                return NULL;
1218
1219        raw_spin_lock_irqsave(&desc->lock, flags);
1220
1221        /*
1222         * There can be multiple actions per IRQ descriptor, find the right
1223         * one based on the dev_id:
1224         */
1225        action_ptr = &desc->action;
1226        for (;;) {
1227                action = *action_ptr;
1228
1229                if (!action) {
1230                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
1231                        raw_spin_unlock_irqrestore(&desc->lock, flags);
1232
1233                        return NULL;
1234                }
1235
1236                if (action->dev_id == dev_id)
1237                        break;
1238                action_ptr = &action->next;
1239        }
1240
1241        /* Found it - now remove it from the list of entries: */
1242        *action_ptr = action->next;
1243
1244        /* If this was the last handler, shut down the IRQ line: */
1245        if (!desc->action)
1246                irq_shutdown(desc);
1247
1248#ifdef CONFIG_SMP
1249        /* make sure affinity_hint is cleaned up */
1250        if (WARN_ON_ONCE(desc->affinity_hint))
1251                desc->affinity_hint = NULL;
1252#endif
1253
1254        raw_spin_unlock_irqrestore(&desc->lock, flags);
1255
1256        unregister_handler_proc(irq, action);
1257
1258        /* Make sure it's not being used on another CPU: */
1259        synchronize_irq(irq);
1260
1261#ifdef CONFIG_DEBUG_SHIRQ
1262        /*
1263         * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1264         * event to happen even now it's being freed, so let's make sure that
1265         * is so by doing an extra call to the handler ....
1266         *
1267         * ( We do this after actually deregistering it, to make sure that a
1268         *   'real' IRQ doesn't run in * parallel with our fake. )
1269         */
1270        if (action->flags & IRQF_SHARED) {
1271                local_irq_save(flags);
1272                action->handler(irq, dev_id);
1273                local_irq_restore(flags);
1274        }
1275#endif
1276
1277        if (action->thread) {
1278                kthread_stop(action->thread);
1279                put_task_struct(action->thread);
1280        }
1281
1282        module_put(desc->owner);
1283        return action;
1284}
1285
1286/**
1287 *      remove_irq - free an interrupt
1288 *      @irq: Interrupt line to free
1289 *      @act: irqaction for the interrupt
1290 *
1291 * Used to remove interrupts statically setup by the early boot process.
1292 */
1293void remove_irq(unsigned int irq, struct irqaction *act)
1294{
1295        struct irq_desc *desc = irq_to_desc(irq);
1296
1297        if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1298            __free_irq(irq, act->dev_id);
1299}
1300EXPORT_SYMBOL_GPL(remove_irq);
1301
1302/**
1303 *      free_irq - free an interrupt allocated with request_irq
1304 *      @irq: Interrupt line to free
1305 *      @dev_id: Device identity to free
1306 *
1307 *      Remove an interrupt handler. The handler is removed and if the
1308 *      interrupt line is no longer in use by any driver it is disabled.
1309 *      On a shared IRQ the caller must ensure the interrupt is disabled
1310 *      on the card it drives before calling this function. The function
1311 *      does not return until any executing interrupts for this IRQ
1312 *      have completed.
1313 *
1314 *      This function must not be called from interrupt context.
1315 */
1316void free_irq(unsigned int irq, void *dev_id)
1317{
1318        struct irq_desc *desc = irq_to_desc(irq);
1319
1320        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1321                return;
1322
1323#ifdef CONFIG_SMP
1324        if (WARN_ON(desc->affinity_notify))
1325                desc->affinity_notify = NULL;
1326#endif
1327
1328        chip_bus_lock(desc);
1329        kfree(__free_irq(irq, dev_id));
1330        chip_bus_sync_unlock(desc);
1331}
1332EXPORT_SYMBOL(free_irq);
1333
1334/**
1335 *      request_threaded_irq - allocate an interrupt line
1336 *      @irq: Interrupt line to allocate
1337 *      @handler: Function to be called when the IRQ occurs.
1338 *                Primary handler for threaded interrupts
1339 *                If NULL and thread_fn != NULL the default
1340 *                primary handler is installed
1341 *      @thread_fn: Function called from the irq handler thread
1342 *                  If NULL, no irq thread is created
1343 *      @irqflags: Interrupt type flags
1344 *      @devname: An ascii name for the claiming device
1345 *      @dev_id: A cookie passed back to the handler function
1346 *
1347 *      This call allocates interrupt resources and enables the
1348 *      interrupt line and IRQ handling. From the point this
1349 *      call is made your handler function may be invoked. Since
1350 *      your handler function must clear any interrupt the board
1351 *      raises, you must take care both to initialise your hardware
1352 *      and to set up the interrupt handler in the right order.
1353 *
1354 *      If you want to set up a threaded irq handler for your device
1355 *      then you need to supply @handler and @thread_fn. @handler is
1356 *      still called in hard interrupt context and has to check
1357 *      whether the interrupt originates from the device. If yes it
1358 *      needs to disable the interrupt on the device and return
1359 *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1360 *      @thread_fn. This split handler design is necessary to support
1361 *      shared interrupts.
1362 *
1363 *      Dev_id must be globally unique. Normally the address of the
1364 *      device data structure is used as the cookie. Since the handler
1365 *      receives this value it makes sense to use it.
1366 *
1367 *      If your interrupt is shared you must pass a non NULL dev_id
1368 *      as this is required when freeing the interrupt.
1369 *
1370 *      Flags:
1371 *
1372 *      IRQF_SHARED             Interrupt is shared
1373 *      IRQF_TRIGGER_*          Specify active edge(s) or level
1374 *
1375 */
1376int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1377                         irq_handler_t thread_fn, unsigned long irqflags,
1378                         const char *devname, void *dev_id)
1379{
1380        struct irqaction *action;
1381        struct irq_desc *desc;
1382        int retval;
1383
1384        /*
1385         * Sanity-check: shared interrupts must pass in a real dev-ID,
1386         * otherwise we'll have trouble later trying to figure out
1387         * which interrupt is which (messes up the interrupt freeing
1388         * logic etc).
1389         */
1390        if ((irqflags & IRQF_SHARED) && !dev_id)
1391                return -EINVAL;
1392
1393        desc = irq_to_desc(irq);
1394        if (!desc)
1395                return -EINVAL;
1396
1397        if (!irq_settings_can_request(desc) ||
1398            WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1399                return -EINVAL;
1400
1401        if (!handler) {
1402                if (!thread_fn)
1403                        return -EINVAL;
1404                handler = irq_default_primary_handler;
1405        }
1406
1407        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1408        if (!action)
1409                return -ENOMEM;
1410
1411        action->handler = handler;
1412        action->thread_fn = thread_fn;
1413        action->flags = irqflags;
1414        action->name = devname;
1415        action->dev_id = dev_id;
1416
1417        chip_bus_lock(desc);
1418        retval = __setup_irq(irq, desc, action);
1419        chip_bus_sync_unlock(desc);
1420
1421        if (retval)
1422                kfree(action);
1423
1424#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1425        if (!retval && (irqflags & IRQF_SHARED)) {
1426                /*
1427                 * It's a shared IRQ -- the driver ought to be prepared for it
1428                 * to happen immediately, so let's make sure....
1429                 * We disable the irq to make sure that a 'real' IRQ doesn't
1430                 * run in parallel with our fake.
1431                 */
1432                unsigned long flags;
1433
1434                disable_irq(irq);
1435                local_irq_save(flags);
1436
1437                handler(irq, dev_id);
1438
1439                local_irq_restore(flags);
1440                enable_irq(irq);
1441        }
1442#endif
1443        return retval;
1444}
1445EXPORT_SYMBOL(request_threaded_irq);
1446
1447/**
1448 *      request_any_context_irq - allocate an interrupt line
1449 *      @irq: Interrupt line to allocate
1450 *      @handler: Function to be called when the IRQ occurs.
1451 *                Threaded handler for threaded interrupts.
1452 *      @flags: Interrupt type flags
1453 *      @name: An ascii name for the claiming device
1454 *      @dev_id: A cookie passed back to the handler function
1455 *
1456 *      This call allocates interrupt resources and enables the
1457 *      interrupt line and IRQ handling. It selects either a
1458 *      hardirq or threaded handling method depending on the
1459 *      context.
1460 *
1461 *      On failure, it returns a negative value. On success,
1462 *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1463 */
1464int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1465                            unsigned long flags, const char *name, void *dev_id)
1466{
1467        struct irq_desc *desc = irq_to_desc(irq);
1468        int ret;
1469
1470        if (!desc)
1471                return -EINVAL;
1472
1473        if (irq_settings_is_nested_thread(desc)) {
1474                ret = request_threaded_irq(irq, NULL, handler,
1475                                           flags, name, dev_id);
1476                return !ret ? IRQC_IS_NESTED : ret;
1477        }
1478
1479        ret = request_irq(irq, handler, flags, name, dev_id);
1480        return !ret ? IRQC_IS_HARDIRQ : ret;
1481}
1482EXPORT_SYMBOL_GPL(request_any_context_irq);
1483
1484void enable_percpu_irq(unsigned int irq, unsigned int type)
1485{
1486        unsigned int cpu = smp_processor_id();
1487        unsigned long flags;
1488        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1489
1490        if (!desc)
1491                return;
1492
1493        type &= IRQ_TYPE_SENSE_MASK;
1494        if (type != IRQ_TYPE_NONE) {
1495                int ret;
1496
1497                ret = __irq_set_trigger(desc, irq, type);
1498
1499                if (ret) {
1500                        WARN(1, "failed to set type for IRQ%d\n", irq);
1501                        goto out;
1502                }
1503        }
1504
1505        irq_percpu_enable(desc, cpu);
1506out:
1507        irq_put_desc_unlock(desc, flags);
1508}
1509
1510void disable_percpu_irq(unsigned int irq)
1511{
1512        unsigned int cpu = smp_processor_id();
1513        unsigned long flags;
1514        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1515
1516        if (!desc)
1517                return;
1518
1519        irq_percpu_disable(desc, cpu);
1520        irq_put_desc_unlock(desc, flags);
1521}
1522
1523/*
1524 * Internal function to unregister a percpu irqaction.
1525 */
1526static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1527{
1528        struct irq_desc *desc = irq_to_desc(irq);
1529        struct irqaction *action;
1530        unsigned long flags;
1531
1532        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1533
1534        if (!desc)
1535                return NULL;
1536
1537        raw_spin_lock_irqsave(&desc->lock, flags);
1538
1539        action = desc->action;
1540        if (!action || action->percpu_dev_id != dev_id) {
1541                WARN(1, "Trying to free already-free IRQ %d\n", irq);
1542                goto bad;
1543        }
1544
1545        if (!cpumask_empty(desc->percpu_enabled)) {
1546                WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1547                     irq, cpumask_first(desc->percpu_enabled));
1548                goto bad;
1549        }
1550
1551        /* Found it - now remove it from the list of entries: */
1552        desc->action = NULL;
1553
1554        raw_spin_unlock_irqrestore(&desc->lock, flags);
1555
1556        unregister_handler_proc(irq, action);
1557
1558        module_put(desc->owner);
1559        return action;
1560
1561bad:
1562        raw_spin_unlock_irqrestore(&desc->lock, flags);
1563        return NULL;
1564}
1565
1566/**
1567 *      remove_percpu_irq - free a per-cpu interrupt
1568 *      @irq: Interrupt line to free
1569 *      @act: irqaction for the interrupt
1570 *
1571 * Used to remove interrupts statically setup by the early boot process.
1572 */
1573void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1574{
1575        struct irq_desc *desc = irq_to_desc(irq);
1576
1577        if (desc && irq_settings_is_per_cpu_devid(desc))
1578            __free_percpu_irq(irq, act->percpu_dev_id);
1579}
1580
1581/**
1582 *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1583 *      @irq: Interrupt line to free
1584 *      @dev_id: Device identity to free
1585 *
1586 *      Remove a percpu interrupt handler. The handler is removed, but
1587 *      the interrupt line is not disabled. This must be done on each
1588 *      CPU before calling this function. The function does not return
1589 *      until any executing interrupts for this IRQ have completed.
1590 *
1591 *      This function must not be called from interrupt context.
1592 */
1593void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1594{
1595        struct irq_desc *desc = irq_to_desc(irq);
1596
1597        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1598                return;
1599
1600        chip_bus_lock(desc);
1601        kfree(__free_percpu_irq(irq, dev_id));
1602        chip_bus_sync_unlock(desc);
1603}
1604
1605/**
1606 *      setup_percpu_irq - setup a per-cpu interrupt
1607 *      @irq: Interrupt line to setup
1608 *      @act: irqaction for the interrupt
1609 *
1610 * Used to statically setup per-cpu interrupts in the early boot process.
1611 */
1612int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1613{
1614        struct irq_desc *desc = irq_to_desc(irq);
1615        int retval;
1616
1617        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1618                return -EINVAL;
1619        chip_bus_lock(desc);
1620        retval = __setup_irq(irq, desc, act);
1621        chip_bus_sync_unlock(desc);
1622
1623        return retval;
1624}
1625
1626/**
1627 *      request_percpu_irq - allocate a percpu interrupt line
1628 *      @irq: Interrupt line to allocate
1629 *      @handler: Function to be called when the IRQ occurs.
1630 *      @devname: An ascii name for the claiming device
1631 *      @dev_id: A percpu cookie passed back to the handler function
1632 *
1633 *      This call allocates interrupt resources, but doesn't
1634 *      automatically enable the interrupt. It has to be done on each
1635 *      CPU using enable_percpu_irq().
1636 *
1637 *      Dev_id must be globally unique. It is a per-cpu variable, and
1638 *      the handler gets called with the interrupted CPU's instance of
1639 *      that variable.
1640 */
1641int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1642                       const char *devname, void __percpu *dev_id)
1643{
1644        struct irqaction *action;
1645        struct irq_desc *desc;
1646        int retval;
1647
1648        if (!dev_id)
1649                return -EINVAL;
1650
1651        desc = irq_to_desc(irq);
1652        if (!desc || !irq_settings_can_request(desc) ||
1653            !irq_settings_is_per_cpu_devid(desc))
1654                return -EINVAL;
1655
1656        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1657        if (!action)
1658                return -ENOMEM;
1659
1660        action->handler = handler;
1661        action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1662        action->name = devname;
1663        action->percpu_dev_id = dev_id;
1664
1665        chip_bus_lock(desc);
1666        retval = __setup_irq(irq, desc, action);
1667        chip_bus_sync_unlock(desc);
1668
1669        if (retval)
1670                kfree(action);
1671
1672        return retval;
1673}
1674
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.