linux/kernel/irq/spurious.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/spurious.c
   3 *
   4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
   5 *
   6 * This file contains spurious interrupt handling.
   7 */
   8
   9#include <linux/jiffies.h>
  10#include <linux/irq.h>
  11#include <linux/module.h>
  12#include <linux/kallsyms.h>
  13#include <linux/interrupt.h>
  14#include <linux/moduleparam.h>
  15#include <linux/timer.h>
  16
  17#include "internals.h"
  18
  19static int irqfixup __read_mostly;
  20
  21#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
  22static void poll_spurious_irqs(unsigned long dummy);
  23static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
  24static int irq_poll_cpu;
  25static atomic_t irq_poll_active;
  26
  27/*
  28 * We wait here for a poller to finish.
  29 *
  30 * If the poll runs on this CPU, then we yell loudly and return
  31 * false. That will leave the interrupt line disabled in the worst
  32 * case, but it should never happen.
  33 *
  34 * We wait until the poller is done and then recheck disabled and
  35 * action (about to be disabled). Only if it's still active, we return
  36 * true and let the handler run.
  37 */
  38bool irq_wait_for_poll(struct irq_desc *desc)
  39{
  40        if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
  41                      "irq poll in progress on cpu %d for irq %d\n",
  42                      smp_processor_id(), desc->irq_data.irq))
  43                return false;
  44
  45#ifdef CONFIG_SMP
  46        do {
  47                raw_spin_unlock(&desc->lock);
  48                while (irqd_irq_inprogress(&desc->irq_data))
  49                        cpu_relax();
  50                raw_spin_lock(&desc->lock);
  51        } while (irqd_irq_inprogress(&desc->irq_data));
  52        /* Might have been disabled in meantime */
  53        return !irqd_irq_disabled(&desc->irq_data) && desc->action;
  54#else
  55        return false;
  56#endif
  57}
  58
  59
  60/*
  61 * Recovery handler for misrouted interrupts.
  62 */
  63static int try_one_irq(int irq, struct irq_desc *desc, bool force)
  64{
  65        irqreturn_t ret = IRQ_NONE;
  66        struct irqaction *action;
  67
  68        raw_spin_lock(&desc->lock);
  69
  70        /* PER_CPU and nested thread interrupts are never polled */
  71        if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
  72                goto out;
  73
  74        /*
  75         * Do not poll disabled interrupts unless the spurious
  76         * disabled poller asks explicitely.
  77         */
  78        if (irqd_irq_disabled(&desc->irq_data) && !force)
  79                goto out;
  80
  81        /*
  82         * All handlers must agree on IRQF_SHARED, so we test just the
  83         * first.
  84         */
  85        action = desc->action;
  86        if (!action || !(action->flags & IRQF_SHARED) ||
  87            (action->flags & __IRQF_TIMER))
  88                goto out;
  89
  90        /* Already running on another processor */
  91        if (irqd_irq_inprogress(&desc->irq_data)) {
  92                /*
  93                 * Already running: If it is shared get the other
  94                 * CPU to go looking for our mystery interrupt too
  95                 */
  96                desc->istate |= IRQS_PENDING;
  97                goto out;
  98        }
  99
 100        /* Mark it poll in progress */
 101        desc->istate |= IRQS_POLL_INPROGRESS;
 102        do {
 103                if (handle_irq_event(desc) == IRQ_HANDLED)
 104                        ret = IRQ_HANDLED;
 105                /* Make sure that there is still a valid action */
 106                action = desc->action;
 107        } while ((desc->istate & IRQS_PENDING) && action);
 108        desc->istate &= ~IRQS_POLL_INPROGRESS;
 109out:
 110        raw_spin_unlock(&desc->lock);
 111        return ret == IRQ_HANDLED;
 112}
 113
 114static int misrouted_irq(int irq)
 115{
 116        struct irq_desc *desc;
 117        int i, ok = 0;
 118
 119        if (atomic_inc_return(&irq_poll_active) != 1)
 120                goto out;
 121
 122        irq_poll_cpu = smp_processor_id();
 123
 124        for_each_irq_desc(i, desc) {
 125                if (!i)
 126                         continue;
 127
 128                if (i == irq)   /* Already tried */
 129                        continue;
 130
 131                if (try_one_irq(i, desc, false))
 132                        ok = 1;
 133        }
 134out:
 135        atomic_dec(&irq_poll_active);
 136        /* So the caller can adjust the irq error counts */
 137        return ok;
 138}
 139
 140static void poll_spurious_irqs(unsigned long dummy)
 141{
 142        struct irq_desc *desc;
 143        int i;
 144
 145        if (atomic_inc_return(&irq_poll_active) != 1)
 146                goto out;
 147        irq_poll_cpu = smp_processor_id();
 148
 149        for_each_irq_desc(i, desc) {
 150                unsigned int state;
 151
 152                if (!i)
 153                         continue;
 154
 155                /* Racy but it doesn't matter */
 156                state = desc->istate;
 157                barrier();
 158                if (!(state & IRQS_SPURIOUS_DISABLED))
 159                        continue;
 160
 161                local_irq_disable();
 162                try_one_irq(i, desc, true);
 163                local_irq_enable();
 164        }
 165out:
 166        atomic_dec(&irq_poll_active);
 167        mod_timer(&poll_spurious_irq_timer,
 168                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 169}
 170
 171static inline int bad_action_ret(irqreturn_t action_ret)
 172{
 173        if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
 174                return 0;
 175        return 1;
 176}
 177
 178/*
 179 * If 99,900 of the previous 100,000 interrupts have not been handled
 180 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
 181 * and try to turn the IRQ off.
 182 *
 183 * (The other 100-of-100,000 interrupts may have been a correctly
 184 *  functioning device sharing an IRQ with the failing one)
 185 */
 186static void
 187__report_bad_irq(unsigned int irq, struct irq_desc *desc,
 188                 irqreturn_t action_ret)
 189{
 190        struct irqaction *action;
 191        unsigned long flags;
 192
 193        if (bad_action_ret(action_ret)) {
 194                printk(KERN_ERR "irq event %d: bogus return value %x\n",
 195                                irq, action_ret);
 196        } else {
 197                printk(KERN_ERR "irq %d: nobody cared (try booting with "
 198                                "the \"irqpoll\" option)\n", irq);
 199        }
 200        dump_stack();
 201        printk(KERN_ERR "handlers:\n");
 202
 203        /*
 204         * We need to take desc->lock here. note_interrupt() is called
 205         * w/o desc->lock held, but IRQ_PROGRESS set. We might race
 206         * with something else removing an action. It's ok to take
 207         * desc->lock here. See synchronize_irq().
 208         */
 209        raw_spin_lock_irqsave(&desc->lock, flags);
 210        action = desc->action;
 211        while (action) {
 212                printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
 213                if (action->thread_fn)
 214                        printk(KERN_CONT " threaded [<%p>] %pf",
 215                                        action->thread_fn, action->thread_fn);
 216                printk(KERN_CONT "\n");
 217                action = action->next;
 218        }
 219        raw_spin_unlock_irqrestore(&desc->lock, flags);
 220}
 221
 222static void
 223report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
 224{
 225        static int count = 100;
 226
 227        if (count > 0) {
 228                count--;
 229                __report_bad_irq(irq, desc, action_ret);
 230        }
 231}
 232
 233static inline int
 234try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
 235                  irqreturn_t action_ret)
 236{
 237        struct irqaction *action;
 238
 239        if (!irqfixup)
 240                return 0;
 241
 242        /* We didn't actually handle the IRQ - see if it was misrouted? */
 243        if (action_ret == IRQ_NONE)
 244                return 1;
 245
 246        /*
 247         * But for 'irqfixup == 2' we also do it for handled interrupts if
 248         * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
 249         * traditional PC timer interrupt.. Legacy)
 250         */
 251        if (irqfixup < 2)
 252                return 0;
 253
 254        if (!irq)
 255                return 1;
 256
 257        /*
 258         * Since we don't get the descriptor lock, "action" can
 259         * change under us.  We don't really care, but we don't
 260         * want to follow a NULL pointer. So tell the compiler to
 261         * just load it once by using a barrier.
 262         */
 263        action = desc->action;
 264        barrier();
 265        return action && (action->flags & IRQF_IRQPOLL);
 266}
 267
 268void note_interrupt(unsigned int irq, struct irq_desc *desc,
 269                    irqreturn_t action_ret)
 270{
 271        if (desc->istate & IRQS_POLL_INPROGRESS)
 272                return;
 273
 274        /* we get here again via the threaded handler */
 275        if (action_ret == IRQ_WAKE_THREAD)
 276                return;
 277
 278        if (bad_action_ret(action_ret)) {
 279                report_bad_irq(irq, desc, action_ret);
 280                return;
 281        }
 282
 283        if (unlikely(action_ret == IRQ_NONE)) {
 284                /*
 285                 * If we are seeing only the odd spurious IRQ caused by
 286                 * bus asynchronicity then don't eventually trigger an error,
 287                 * otherwise the counter becomes a doomsday timer for otherwise
 288                 * working systems
 289                 */
 290                if (time_after(jiffies, desc->last_unhandled + HZ/10))
 291                        desc->irqs_unhandled = 1;
 292                else
 293                        desc->irqs_unhandled++;
 294                desc->last_unhandled = jiffies;
 295        }
 296
 297        if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
 298                int ok = misrouted_irq(irq);
 299                if (action_ret == IRQ_NONE)
 300                        desc->irqs_unhandled -= ok;
 301        }
 302
 303        desc->irq_count++;
 304        if (likely(desc->irq_count < 100000))
 305                return;
 306
 307        desc->irq_count = 0;
 308        if (unlikely(desc->irqs_unhandled > 99900)) {
 309                /*
 310                 * The interrupt is stuck
 311                 */
 312                __report_bad_irq(irq, desc, action_ret);
 313                /*
 314                 * Now kill the IRQ
 315                 */
 316                printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
 317                desc->istate |= IRQS_SPURIOUS_DISABLED;
 318                desc->depth++;
 319                irq_disable(desc);
 320
 321                mod_timer(&poll_spurious_irq_timer,
 322                          jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 323        }
 324        desc->irqs_unhandled = 0;
 325}
 326
 327bool noirqdebug __read_mostly;
 328
 329int noirqdebug_setup(char *str)
 330{
 331        noirqdebug = 1;
 332        printk(KERN_INFO "IRQ lockup detection disabled\n");
 333
 334        return 1;
 335}
 336
 337__setup("noirqdebug", noirqdebug_setup);
 338module_param(noirqdebug, bool, 0644);
 339MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
 340
 341static int __init irqfixup_setup(char *str)
 342{
 343        irqfixup = 1;
 344        printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
 345        printk(KERN_WARNING "This may impact system performance.\n");
 346
 347        return 1;
 348}
 349
 350__setup("irqfixup", irqfixup_setup);
 351module_param(irqfixup, int, 0644);
 352
 353static int __init irqpoll_setup(char *str)
 354{
 355        irqfixup = 2;
 356        printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
 357                                "enabled\n");
 358        printk(KERN_WARNING "This may significantly impact system "
 359                                "performance\n");
 360        return 1;
 361}
 362
 363__setup("irqpoll", irqpoll_setup);
 364
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.