1
2
3
4
5
6
7
8#include "linux/cpumask.h"
9#include "linux/hardirq.h"
10#include "linux/interrupt.h"
11#include "linux/kernel_stat.h"
12#include "linux/module.h"
13#include "linux/seq_file.h"
14#include "as-layout.h"
15#include "kern_util.h"
16#include "os.h"
17
18
19
20
21
22int show_interrupts(struct seq_file *p, void *v)
23{
24 int i = *(loff_t *) v, j;
25 struct irqaction * action;
26 unsigned long flags;
27
28 if (i == 0) {
29 seq_printf(p, " ");
30 for_each_online_cpu(j)
31 seq_printf(p, "CPU%d ",j);
32 seq_putc(p, '\n');
33 }
34
35 if (i < NR_IRQS) {
36 spin_lock_irqsave(&irq_desc[i].lock, flags);
37 action = irq_desc[i].action;
38 if (!action)
39 goto skip;
40 seq_printf(p, "%3d: ",i);
41#ifndef CONFIG_SMP
42 seq_printf(p, "%10u ", kstat_irqs(i));
43#else
44 for_each_online_cpu(j)
45 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
46#endif
47 seq_printf(p, " %14s", irq_desc[i].chip->typename);
48 seq_printf(p, " %s", action->name);
49
50 for (action=action->next; action; action = action->next)
51 seq_printf(p, ", %s", action->name);
52
53 seq_putc(p, '\n');
54skip:
55 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
56 } else if (i == NR_IRQS)
57 seq_putc(p, '\n');
58
59 return 0;
60}
61
62
63
64
65
66
67
68
69
70static struct irq_fd *active_fds = NULL;
71static struct irq_fd **last_irq_ptr = &active_fds;
72
73extern void free_irqs(void);
74
75void sigio_handler(int sig, struct uml_pt_regs *regs)
76{
77 struct irq_fd *irq_fd;
78 int n;
79
80 if (smp_sigio_handler())
81 return;
82
83 while (1) {
84 n = os_waiting_for_events(active_fds);
85 if (n <= 0) {
86 if (n == -EINTR)
87 continue;
88 else break;
89 }
90
91 for (irq_fd = active_fds; irq_fd != NULL;
92 irq_fd = irq_fd->next) {
93 if (irq_fd->current_events != 0) {
94 irq_fd->current_events = 0;
95 do_IRQ(irq_fd->irq, regs);
96 }
97 }
98 }
99
100 free_irqs();
101}
102
103static DEFINE_SPINLOCK(irq_lock);
104
105static int activate_fd(int irq, int fd, int type, void *dev_id)
106{
107 struct pollfd *tmp_pfd;
108 struct irq_fd *new_fd, *irq_fd;
109 unsigned long flags;
110 int events, err, n;
111
112 err = os_set_fd_async(fd);
113 if (err < 0)
114 goto out;
115
116 err = -ENOMEM;
117 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
118 if (new_fd == NULL)
119 goto out;
120
121 if (type == IRQ_READ)
122 events = UM_POLLIN | UM_POLLPRI;
123 else events = UM_POLLOUT;
124 *new_fd = ((struct irq_fd) { .next = NULL,
125 .id = dev_id,
126 .fd = fd,
127 .type = type,
128 .irq = irq,
129 .events = events,
130 .current_events = 0 } );
131
132 err = -EBUSY;
133 spin_lock_irqsave(&irq_lock, flags);
134 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
135 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
136 printk(KERN_ERR "Registering fd %d twice\n", fd);
137 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
138 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
139 dev_id);
140 goto out_unlock;
141 }
142 }
143
144 if (type == IRQ_WRITE)
145 fd = -1;
146
147 tmp_pfd = NULL;
148 n = 0;
149
150 while (1) {
151 n = os_create_pollfd(fd, events, tmp_pfd, n);
152 if (n == 0)
153 break;
154
155
156
157
158
159
160
161
162
163
164
165
166
167 spin_unlock_irqrestore(&irq_lock, flags);
168 kfree(tmp_pfd);
169
170 tmp_pfd = kmalloc(n, GFP_KERNEL);
171 if (tmp_pfd == NULL)
172 goto out_kfree;
173
174 spin_lock_irqsave(&irq_lock, flags);
175 }
176
177 *last_irq_ptr = new_fd;
178 last_irq_ptr = &new_fd->next;
179
180 spin_unlock_irqrestore(&irq_lock, flags);
181
182
183
184
185
186 maybe_sigio_broken(fd, (type == IRQ_READ));
187
188 return 0;
189
190 out_unlock:
191 spin_unlock_irqrestore(&irq_lock, flags);
192 out_kfree:
193 kfree(new_fd);
194 out:
195 return err;
196}
197
198static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&irq_lock, flags);
203 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
204 spin_unlock_irqrestore(&irq_lock, flags);
205}
206
207struct irq_and_dev {
208 int irq;
209 void *dev;
210};
211
212static int same_irq_and_dev(struct irq_fd *irq, void *d)
213{
214 struct irq_and_dev *data = d;
215
216 return ((irq->irq == data->irq) && (irq->id == data->dev));
217}
218
219static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
220{
221 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
222 .dev = dev });
223
224 free_irq_by_cb(same_irq_and_dev, &data);
225}
226
227static int same_fd(struct irq_fd *irq, void *fd)
228{
229 return (irq->fd == *((int *)fd));
230}
231
232void free_irq_by_fd(int fd)
233{
234 free_irq_by_cb(same_fd, &fd);
235}
236
237
238static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
239{
240 struct irq_fd *irq;
241 int i = 0;
242 int fdi;
243
244 for (irq = active_fds; irq != NULL; irq = irq->next) {
245 if ((irq->fd == fd) && (irq->irq == irqnum))
246 break;
247 i++;
248 }
249 if (irq == NULL) {
250 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
251 fd);
252 goto out;
253 }
254 fdi = os_get_pollfd(i);
255 if ((fdi != -1) && (fdi != fd)) {
256 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
257 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
258 fdi, fd);
259 irq = NULL;
260 goto out;
261 }
262 *index_out = i;
263 out:
264 return irq;
265}
266
267void reactivate_fd(int fd, int irqnum)
268{
269 struct irq_fd *irq;
270 unsigned long flags;
271 int i;
272
273 spin_lock_irqsave(&irq_lock, flags);
274 irq = find_irq_by_fd(fd, irqnum, &i);
275 if (irq == NULL) {
276 spin_unlock_irqrestore(&irq_lock, flags);
277 return;
278 }
279 os_set_pollfd(i, irq->fd);
280 spin_unlock_irqrestore(&irq_lock, flags);
281
282 add_sigio_fd(fd);
283}
284
285void deactivate_fd(int fd, int irqnum)
286{
287 struct irq_fd *irq;
288 unsigned long flags;
289 int i;
290
291 spin_lock_irqsave(&irq_lock, flags);
292 irq = find_irq_by_fd(fd, irqnum, &i);
293 if (irq == NULL) {
294 spin_unlock_irqrestore(&irq_lock, flags);
295 return;
296 }
297
298 os_set_pollfd(i, -1);
299 spin_unlock_irqrestore(&irq_lock, flags);
300
301 ignore_sigio_fd(fd);
302}
303
304
305
306
307
308
309
310int deactivate_all_fds(void)
311{
312 struct irq_fd *irq;
313 int err;
314
315 for (irq = active_fds; irq != NULL; irq = irq->next) {
316 err = os_clear_fd_async(irq->fd);
317 if (err)
318 return err;
319 }
320
321 os_set_ioignore();
322
323 return 0;
324}
325
326
327
328
329
330
331unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
332{
333 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
334 irq_enter();
335 __do_IRQ(irq);
336 irq_exit();
337 set_irq_regs(old_regs);
338 return 1;
339}
340
341int um_request_irq(unsigned int irq, int fd, int type,
342 irq_handler_t handler,
343 unsigned long irqflags, const char * devname,
344 void *dev_id)
345{
346 int err;
347
348 if (fd != -1) {
349 err = activate_fd(irq, fd, type, dev_id);
350 if (err)
351 return err;
352 }
353
354 return request_irq(irq, handler, irqflags, devname, dev_id);
355}
356
357EXPORT_SYMBOL(um_request_irq);
358EXPORT_SYMBOL(reactivate_fd);
359
360
361
362
363
364static void dummy(unsigned int irq)
365{
366}
367
368
369static struct hw_interrupt_type normal_irq_type = {
370 .typename = "SIGIO",
371 .release = free_irq_by_irq_and_dev,
372 .disable = dummy,
373 .enable = dummy,
374 .ack = dummy,
375 .end = dummy
376};
377
378static struct hw_interrupt_type SIGVTALRM_irq_type = {
379 .typename = "SIGVTALRM",
380 .release = free_irq_by_irq_and_dev,
381 .shutdown = dummy,
382 .disable = dummy,
383 .enable = dummy,
384 .ack = dummy,
385 .end = dummy
386};
387
388void __init init_IRQ(void)
389{
390 int i;
391
392 irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
393 irq_desc[TIMER_IRQ].action = NULL;
394 irq_desc[TIMER_IRQ].depth = 1;
395 irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
396 enable_irq(TIMER_IRQ);
397 for (i = 1; i < NR_IRQS; i++) {
398 irq_desc[i].status = IRQ_DISABLED;
399 irq_desc[i].action = NULL;
400 irq_desc[i].depth = 1;
401 irq_desc[i].chip = &normal_irq_type;
402 enable_irq(i);
403 }
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453static unsigned long pending_mask;
454
455unsigned long to_irq_stack(unsigned long *mask_out)
456{
457 struct thread_info *ti;
458 unsigned long mask, old;
459 int nested;
460
461 mask = xchg(&pending_mask, *mask_out);
462 if (mask != 0) {
463
464
465
466
467
468
469
470
471
472 old = *mask_out;
473 do {
474 old |= mask;
475 mask = xchg(&pending_mask, old);
476 } while (mask != old);
477 return 1;
478 }
479
480 ti = current_thread_info();
481 nested = (ti->real_thread != NULL);
482 if (!nested) {
483 struct task_struct *task;
484 struct thread_info *tti;
485
486 task = cpu_tasks[ti->cpu].task;
487 tti = task_thread_info(task);
488
489 *ti = *tti;
490 ti->real_thread = tti;
491 task->stack = ti;
492 }
493
494 mask = xchg(&pending_mask, 0);
495 *mask_out |= mask | nested;
496 return 0;
497}
498
499unsigned long from_irq_stack(int nested)
500{
501 struct thread_info *ti, *to;
502 unsigned long mask;
503
504 ti = current_thread_info();
505
506 pending_mask = 1;
507
508 to = ti->real_thread;
509 current->stack = to;
510 ti->real_thread = NULL;
511 *to = *ti;
512
513 mask = xchg(&pending_mask, 0);
514 return mask & ~1;
515}
516
517