1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include "internals.h"
20
21
22
23
24
25
26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{
28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
30
31 if (!desc)
32 return -EINVAL;
33
34 if (!chip)
35 chip = &no_irq_chip;
36
37 desc->irq_data.chip = chip;
38 irq_put_desc_unlock(desc, flags);
39
40
41
42
43
44 irq_reserve_irq(irq);
45 return 0;
46}
47EXPORT_SYMBOL(irq_set_chip);
48
49
50
51
52
53
54int irq_set_irq_type(unsigned int irq, unsigned int type)
55{
56 unsigned long flags;
57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
58 int ret = 0;
59
60 if (!desc)
61 return -EINVAL;
62
63 type &= IRQ_TYPE_SENSE_MASK;
64 ret = __irq_set_trigger(desc, irq, type);
65 irq_put_desc_busunlock(desc, flags);
66 return ret;
67}
68EXPORT_SYMBOL(irq_set_irq_type);
69
70
71
72
73
74
75
76
77int irq_set_handler_data(unsigned int irq, void *data)
78{
79 unsigned long flags;
80 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
81
82 if (!desc)
83 return -EINVAL;
84 desc->irq_data.handler_data = data;
85 irq_put_desc_unlock(desc, flags);
86 return 0;
87}
88EXPORT_SYMBOL(irq_set_handler_data);
89
90
91
92
93
94
95
96
97int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
98{
99 unsigned long flags;
100 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
101
102 if (!desc)
103 return -EINVAL;
104 desc->irq_data.msi_desc = entry;
105 if (entry)
106 entry->irq = irq;
107 irq_put_desc_unlock(desc, flags);
108 return 0;
109}
110
111
112
113
114
115
116
117
118int irq_set_chip_data(unsigned int irq, void *data)
119{
120 unsigned long flags;
121 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
122
123 if (!desc)
124 return -EINVAL;
125 desc->irq_data.chip_data = data;
126 irq_put_desc_unlock(desc, flags);
127 return 0;
128}
129EXPORT_SYMBOL(irq_set_chip_data);
130
131struct irq_data *irq_get_irq_data(unsigned int irq)
132{
133 struct irq_desc *desc = irq_to_desc(irq);
134
135 return desc ? &desc->irq_data : NULL;
136}
137EXPORT_SYMBOL_GPL(irq_get_irq_data);
138
139static void irq_state_clr_disabled(struct irq_desc *desc)
140{
141 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
142}
143
144static void irq_state_set_disabled(struct irq_desc *desc)
145{
146 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
147}
148
149static void irq_state_clr_masked(struct irq_desc *desc)
150{
151 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
152}
153
154static void irq_state_set_masked(struct irq_desc *desc)
155{
156 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
157}
158
159int irq_startup(struct irq_desc *desc, bool resend)
160{
161 int ret = 0;
162
163 irq_state_clr_disabled(desc);
164 desc->depth = 0;
165
166 if (desc->irq_data.chip->irq_startup) {
167 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
168 irq_state_clr_masked(desc);
169 } else {
170 irq_enable(desc);
171 }
172 if (resend)
173 check_irq_resend(desc, desc->irq_data.irq);
174 return ret;
175}
176
177void irq_shutdown(struct irq_desc *desc)
178{
179 irq_state_set_disabled(desc);
180 desc->depth = 1;
181 if (desc->irq_data.chip->irq_shutdown)
182 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
183 else if (desc->irq_data.chip->irq_disable)
184 desc->irq_data.chip->irq_disable(&desc->irq_data);
185 else
186 desc->irq_data.chip->irq_mask(&desc->irq_data);
187 irq_state_set_masked(desc);
188}
189
190void irq_enable(struct irq_desc *desc)
191{
192 irq_state_clr_disabled(desc);
193 if (desc->irq_data.chip->irq_enable)
194 desc->irq_data.chip->irq_enable(&desc->irq_data);
195 else
196 desc->irq_data.chip->irq_unmask(&desc->irq_data);
197 irq_state_clr_masked(desc);
198}
199
200void irq_disable(struct irq_desc *desc)
201{
202 irq_state_set_disabled(desc);
203 if (desc->irq_data.chip->irq_disable) {
204 desc->irq_data.chip->irq_disable(&desc->irq_data);
205 irq_state_set_masked(desc);
206 }
207}
208
209void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
210{
211 if (desc->irq_data.chip->irq_enable)
212 desc->irq_data.chip->irq_enable(&desc->irq_data);
213 else
214 desc->irq_data.chip->irq_unmask(&desc->irq_data);
215 cpumask_set_cpu(cpu, desc->percpu_enabled);
216}
217
218void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
219{
220 if (desc->irq_data.chip->irq_disable)
221 desc->irq_data.chip->irq_disable(&desc->irq_data);
222 else
223 desc->irq_data.chip->irq_mask(&desc->irq_data);
224 cpumask_clear_cpu(cpu, desc->percpu_enabled);
225}
226
227static inline void mask_ack_irq(struct irq_desc *desc)
228{
229 if (desc->irq_data.chip->irq_mask_ack)
230 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
231 else {
232 desc->irq_data.chip->irq_mask(&desc->irq_data);
233 if (desc->irq_data.chip->irq_ack)
234 desc->irq_data.chip->irq_ack(&desc->irq_data);
235 }
236 irq_state_set_masked(desc);
237}
238
239void mask_irq(struct irq_desc *desc)
240{
241 if (desc->irq_data.chip->irq_mask) {
242 desc->irq_data.chip->irq_mask(&desc->irq_data);
243 irq_state_set_masked(desc);
244 }
245}
246
247void unmask_irq(struct irq_desc *desc)
248{
249 if (desc->irq_data.chip->irq_unmask) {
250 desc->irq_data.chip->irq_unmask(&desc->irq_data);
251 irq_state_clr_masked(desc);
252 }
253}
254
255
256
257
258
259
260
261
262
263void handle_nested_irq(unsigned int irq)
264{
265 struct irq_desc *desc = irq_to_desc(irq);
266 struct irqaction *action;
267 irqreturn_t action_ret;
268
269 might_sleep();
270
271 raw_spin_lock_irq(&desc->lock);
272
273 kstat_incr_irqs_this_cpu(irq, desc);
274
275 action = desc->action;
276 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
277 goto out_unlock;
278
279 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
280 raw_spin_unlock_irq(&desc->lock);
281
282 action_ret = action->thread_fn(action->irq, action->dev_id);
283 if (!noirqdebug)
284 note_interrupt(irq, desc, action_ret);
285
286 raw_spin_lock_irq(&desc->lock);
287 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
288
289out_unlock:
290 raw_spin_unlock_irq(&desc->lock);
291}
292EXPORT_SYMBOL_GPL(handle_nested_irq);
293
294static bool irq_check_poll(struct irq_desc *desc)
295{
296 if (!(desc->istate & IRQS_POLL_INPROGRESS))
297 return false;
298 return irq_wait_for_poll(desc);
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313void
314handle_simple_irq(unsigned int irq, struct irq_desc *desc)
315{
316 raw_spin_lock(&desc->lock);
317
318 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
319 if (!irq_check_poll(desc))
320 goto out_unlock;
321
322 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
323 kstat_incr_irqs_this_cpu(irq, desc);
324
325 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
326 goto out_unlock;
327
328 handle_irq_event(desc);
329
330out_unlock:
331 raw_spin_unlock(&desc->lock);
332}
333EXPORT_SYMBOL_GPL(handle_simple_irq);
334
335
336
337
338
339static void cond_unmask_irq(struct irq_desc *desc)
340{
341
342
343
344
345
346
347
348 if (!irqd_irq_disabled(&desc->irq_data) &&
349 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
350 unmask_irq(desc);
351}
352
353
354
355
356
357
358
359
360
361
362
363void
364handle_level_irq(unsigned int irq, struct irq_desc *desc)
365{
366 raw_spin_lock(&desc->lock);
367 mask_ack_irq(desc);
368
369 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
370 if (!irq_check_poll(desc))
371 goto out_unlock;
372
373 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
374 kstat_incr_irqs_this_cpu(irq, desc);
375
376
377
378
379
380 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
381 goto out_unlock;
382
383 handle_irq_event(desc);
384
385 cond_unmask_irq(desc);
386
387out_unlock:
388 raw_spin_unlock(&desc->lock);
389}
390EXPORT_SYMBOL_GPL(handle_level_irq);
391
392#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
393static inline void preflow_handler(struct irq_desc *desc)
394{
395 if (desc->preflow_handler)
396 desc->preflow_handler(&desc->irq_data);
397}
398#else
399static inline void preflow_handler(struct irq_desc *desc) { }
400#endif
401
402
403
404
405
406
407
408
409
410
411
412void
413handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
414{
415 raw_spin_lock(&desc->lock);
416
417 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
418 if (!irq_check_poll(desc))
419 goto out;
420
421 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
422 kstat_incr_irqs_this_cpu(irq, desc);
423
424
425
426
427
428 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
429 desc->istate |= IRQS_PENDING;
430 mask_irq(desc);
431 goto out;
432 }
433
434 if (desc->istate & IRQS_ONESHOT)
435 mask_irq(desc);
436
437 preflow_handler(desc);
438 handle_irq_event(desc);
439
440 if (desc->istate & IRQS_ONESHOT)
441 cond_unmask_irq(desc);
442
443out_eoi:
444 desc->irq_data.chip->irq_eoi(&desc->irq_data);
445out_unlock:
446 raw_spin_unlock(&desc->lock);
447 return;
448out:
449 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
450 goto out_eoi;
451 goto out_unlock;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470void
471handle_edge_irq(unsigned int irq, struct irq_desc *desc)
472{
473 raw_spin_lock(&desc->lock);
474
475 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
476
477
478
479
480
481 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
482 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
483 if (!irq_check_poll(desc)) {
484 desc->istate |= IRQS_PENDING;
485 mask_ack_irq(desc);
486 goto out_unlock;
487 }
488 }
489 kstat_incr_irqs_this_cpu(irq, desc);
490
491
492 desc->irq_data.chip->irq_ack(&desc->irq_data);
493
494 do {
495 if (unlikely(!desc->action)) {
496 mask_irq(desc);
497 goto out_unlock;
498 }
499
500
501
502
503
504
505 if (unlikely(desc->istate & IRQS_PENDING)) {
506 if (!irqd_irq_disabled(&desc->irq_data) &&
507 irqd_irq_masked(&desc->irq_data))
508 unmask_irq(desc);
509 }
510
511 handle_irq_event(desc);
512
513 } while ((desc->istate & IRQS_PENDING) &&
514 !irqd_irq_disabled(&desc->irq_data));
515
516out_unlock:
517 raw_spin_unlock(&desc->lock);
518}
519
520#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
521
522
523
524
525
526
527
528
529void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
530{
531 struct irq_chip *chip = irq_desc_get_chip(desc);
532
533 raw_spin_lock(&desc->lock);
534
535 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
536
537
538
539
540
541 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
542 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
543 if (!irq_check_poll(desc)) {
544 desc->istate |= IRQS_PENDING;
545 goto out_eoi;
546 }
547 }
548 kstat_incr_irqs_this_cpu(irq, desc);
549
550 do {
551 if (unlikely(!desc->action))
552 goto out_eoi;
553
554 handle_irq_event(desc);
555
556 } while ((desc->istate & IRQS_PENDING) &&
557 !irqd_irq_disabled(&desc->irq_data));
558
559out_eoi:
560 chip->irq_eoi(&desc->irq_data);
561 raw_spin_unlock(&desc->lock);
562}
563#endif
564
565
566
567
568
569
570
571
572void
573handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
574{
575 struct irq_chip *chip = irq_desc_get_chip(desc);
576
577 kstat_incr_irqs_this_cpu(irq, desc);
578
579 if (chip->irq_ack)
580 chip->irq_ack(&desc->irq_data);
581
582 handle_irq_event_percpu(desc, desc->action);
583
584 if (chip->irq_eoi)
585 chip->irq_eoi(&desc->irq_data);
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
601{
602 struct irq_chip *chip = irq_desc_get_chip(desc);
603 struct irqaction *action = desc->action;
604 void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
605 irqreturn_t res;
606
607 kstat_incr_irqs_this_cpu(irq, desc);
608
609 if (chip->irq_ack)
610 chip->irq_ack(&desc->irq_data);
611
612 trace_irq_handler_entry(irq, action);
613 res = action->handler(irq, dev_id);
614 trace_irq_handler_exit(irq, action, res);
615
616 if (chip->irq_eoi)
617 chip->irq_eoi(&desc->irq_data);
618}
619
620void
621__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
622 const char *name)
623{
624 unsigned long flags;
625 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
626
627 if (!desc)
628 return;
629
630 if (!handle) {
631 handle = handle_bad_irq;
632 } else {
633 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
634 goto out;
635 }
636
637
638 if (handle == handle_bad_irq) {
639 if (desc->irq_data.chip != &no_irq_chip)
640 mask_ack_irq(desc);
641 irq_state_set_disabled(desc);
642 desc->depth = 1;
643 }
644 desc->handle_irq = handle;
645 desc->name = name;
646
647 if (handle != handle_bad_irq && is_chained) {
648 irq_settings_set_noprobe(desc);
649 irq_settings_set_norequest(desc);
650 irq_settings_set_nothread(desc);
651 irq_startup(desc, true);
652 }
653out:
654 irq_put_desc_busunlock(desc, flags);
655}
656EXPORT_SYMBOL_GPL(__irq_set_handler);
657
658void
659irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
660 irq_flow_handler_t handle, const char *name)
661{
662 irq_set_chip(irq, chip);
663 __irq_set_handler(irq, handle, 0, name);
664}
665
666void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
667{
668 unsigned long flags;
669 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
670
671 if (!desc)
672 return;
673 irq_settings_clr_and_set(desc, clr, set);
674
675 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
676 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
677 if (irq_settings_has_no_balance_set(desc))
678 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
679 if (irq_settings_is_per_cpu(desc))
680 irqd_set(&desc->irq_data, IRQD_PER_CPU);
681 if (irq_settings_can_move_pcntxt(desc))
682 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
683 if (irq_settings_is_level(desc))
684 irqd_set(&desc->irq_data, IRQD_LEVEL);
685
686 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
687
688 irq_put_desc_unlock(desc, flags);
689}
690EXPORT_SYMBOL_GPL(irq_modify_status);
691
692
693
694
695
696
697
698void irq_cpu_online(void)
699{
700 struct irq_desc *desc;
701 struct irq_chip *chip;
702 unsigned long flags;
703 unsigned int irq;
704
705 for_each_active_irq(irq) {
706 desc = irq_to_desc(irq);
707 if (!desc)
708 continue;
709
710 raw_spin_lock_irqsave(&desc->lock, flags);
711
712 chip = irq_data_get_irq_chip(&desc->irq_data);
713 if (chip && chip->irq_cpu_online &&
714 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
715 !irqd_irq_disabled(&desc->irq_data)))
716 chip->irq_cpu_online(&desc->irq_data);
717
718 raw_spin_unlock_irqrestore(&desc->lock, flags);
719 }
720}
721
722
723
724
725
726
727
728void irq_cpu_offline(void)
729{
730 struct irq_desc *desc;
731 struct irq_chip *chip;
732 unsigned long flags;
733 unsigned int irq;
734
735 for_each_active_irq(irq) {
736 desc = irq_to_desc(irq);
737 if (!desc)
738 continue;
739
740 raw_spin_lock_irqsave(&desc->lock, flags);
741
742 chip = irq_data_get_irq_chip(&desc->irq_data);
743 if (chip && chip->irq_cpu_offline &&
744 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
745 !irqd_irq_disabled(&desc->irq_data)))
746 chip->irq_cpu_offline(&desc->irq_data);
747
748 raw_spin_unlock_irqrestore(&desc->lock, flags);
749 }
750}
751