1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/delay.h>
15#include <linux/gfp.h>
16#include <linux/io.h>
17#include <linux/atomic.h>
18#include <asm/debug.h>
19#include <asm/qdio.h>
20#include <asm/ipl.h>
21
22#include "cio.h"
23#include "css.h"
24#include "device.h"
25#include "qdio.h"
26#include "qdio_debug.h"
27
28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30MODULE_DESCRIPTION("QDIO base support");
31MODULE_LICENSE("GPL");
32
33static inline int do_siga_sync(unsigned long schid,
34 unsigned long out_mask, unsigned long in_mask,
35 unsigned int fc)
36{
37 int cc;
38
39 asm volatile(
40 " lgr 0,%[fc]\n"
41 " lgr 1,%[schid]\n"
42 " lgr 2,%[out]\n"
43 " lgr 3,%[in]\n"
44 " siga 0\n"
45 " ipm %[cc]\n"
46 " srl %[cc],28\n"
47 : [cc] "=&d" (cc)
48 : [fc] "d" (fc), [schid] "d" (schid),
49 [out] "d" (out_mask), [in] "d" (in_mask)
50 : "cc", "0", "1", "2", "3");
51 return cc;
52}
53
54static inline int do_siga_input(unsigned long schid, unsigned long mask,
55 unsigned long fc)
56{
57 int cc;
58
59 asm volatile(
60 " lgr 0,%[fc]\n"
61 " lgr 1,%[schid]\n"
62 " lgr 2,%[mask]\n"
63 " siga 0\n"
64 " ipm %[cc]\n"
65 " srl %[cc],28\n"
66 : [cc] "=&d" (cc)
67 : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
68 : "cc", "0", "1", "2");
69 return cc;
70}
71
72
73
74
75
76
77
78
79
80
81
82
83static inline int do_siga_output(unsigned long schid, unsigned long mask,
84 unsigned int *bb, unsigned long fc,
85 unsigned long aob)
86{
87 int cc;
88
89 asm volatile(
90 " lgr 0,%[fc]\n"
91 " lgr 1,%[schid]\n"
92 " lgr 2,%[mask]\n"
93 " lgr 3,%[aob]\n"
94 " siga 0\n"
95 " lgr %[fc],0\n"
96 " ipm %[cc]\n"
97 " srl %[cc],28\n"
98 : [cc] "=&d" (cc), [fc] "+&d" (fc)
99 : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
100 : "cc", "0", "1", "2", "3");
101 *bb = fc >> 31;
102 return cc;
103}
104
105
106
107
108
109
110
111
112
113
114
115
116static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
117 int start, int count, int auto_ack)
118{
119 int tmp_count = count, tmp_start = start, nr = q->nr;
120 unsigned int ccq = 0;
121
122 qperf_inc(q, eqbs);
123
124 if (!q->is_input_q)
125 nr += q->irq_ptr->nr_input_qs;
126again:
127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
128 auto_ack);
129
130 switch (ccq) {
131 case 0:
132 case 32:
133
134 return count - tmp_count;
135 case 96:
136
137 qperf_inc(q, eqbs_partial);
138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
139 tmp_count);
140 return count - tmp_count;
141 case 97:
142
143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
144 goto again;
145 default:
146 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
147 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
148 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
149 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
150 q->first_to_check, count, q->irq_ptr->int_parm);
151 return 0;
152 }
153}
154
155
156
157
158
159
160
161
162
163
164
165
166static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
167 int count)
168{
169 unsigned int ccq = 0;
170 int tmp_count = count, tmp_start = start;
171 int nr = q->nr;
172
173 if (!count)
174 return 0;
175 qperf_inc(q, sqbs);
176
177 if (!q->is_input_q)
178 nr += q->irq_ptr->nr_input_qs;
179again:
180 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
181
182 switch (ccq) {
183 case 0:
184 case 32:
185
186 WARN_ON_ONCE(tmp_count);
187 return count - tmp_count;
188 case 96:
189
190 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
191 qperf_inc(q, sqbs_partial);
192 goto again;
193 default:
194 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
195 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
196 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
197 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
198 q->first_to_check, count, q->irq_ptr->int_parm);
199 return 0;
200 }
201}
202
203
204
205
206
207static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
208 unsigned char *state, unsigned int count,
209 int auto_ack)
210{
211 unsigned char __state = 0;
212 int i = 1;
213
214 if (is_qebsm(q))
215 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
216
217
218 __state = q->slsb.val[bufnr];
219
220
221 if (__state & SLSB_OWNER_CU)
222 goto out;
223
224 for (; i < count; i++) {
225 bufnr = next_buf(bufnr);
226
227
228 if (q->slsb.val[bufnr] != __state)
229 break;
230 }
231
232out:
233 *state = __state;
234 return i;
235}
236
237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state, int auto_ack)
239{
240 return get_buf_states(q, bufnr, state, 1, auto_ack);
241}
242
243
244static inline int set_buf_states(struct qdio_q *q, int bufnr,
245 unsigned char state, int count)
246{
247 int i;
248
249 if (is_qebsm(q))
250 return qdio_do_sqbs(q, state, bufnr, count);
251
252
253 mb();
254
255 for (i = 0; i < count; i++) {
256 WRITE_ONCE(q->slsb.val[bufnr], state);
257 bufnr = next_buf(bufnr);
258 }
259
260
261 mb();
262
263 return count;
264}
265
266static inline int set_buf_state(struct qdio_q *q, int bufnr,
267 unsigned char state)
268{
269 return set_buf_states(q, bufnr, state, 1);
270}
271
272
273static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
274{
275 struct qdio_q *q;
276 int i;
277
278 for_each_input_queue(irq_ptr, q, i)
279 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
280 QDIO_MAX_BUFFERS_PER_Q);
281 for_each_output_queue(irq_ptr, q, i)
282 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
283 QDIO_MAX_BUFFERS_PER_Q);
284}
285
286static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
287 unsigned int input)
288{
289 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
290 unsigned int fc = QDIO_SIGA_SYNC;
291 int cc;
292
293 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
294 qperf_inc(q, siga_sync);
295
296 if (is_qebsm(q)) {
297 schid = q->irq_ptr->sch_token;
298 fc |= QDIO_SIGA_QEBSM_FLAG;
299 }
300
301 cc = do_siga_sync(schid, output, input, fc);
302 if (unlikely(cc))
303 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
304 return (cc) ? -EIO : 0;
305}
306
307static inline int qdio_siga_sync_q(struct qdio_q *q)
308{
309 if (q->is_input_q)
310 return qdio_siga_sync(q, 0, q->mask);
311 else
312 return qdio_siga_sync(q, q->mask, 0);
313}
314
315static int qdio_siga_output(struct qdio_q *q, unsigned int count,
316 unsigned int *busy_bit, unsigned long aob)
317{
318 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
319 unsigned int fc = QDIO_SIGA_WRITE;
320 u64 start_time = 0;
321 int retries = 0, cc;
322
323 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
324 if (count > 1)
325 fc = QDIO_SIGA_WRITEM;
326 else if (aob)
327 fc = QDIO_SIGA_WRITEQ;
328 }
329
330 if (is_qebsm(q)) {
331 schid = q->irq_ptr->sch_token;
332 fc |= QDIO_SIGA_QEBSM_FLAG;
333 }
334again:
335 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
336
337
338 if (unlikely(*busy_bit)) {
339 retries++;
340
341 if (!start_time) {
342 start_time = get_tod_clock_fast();
343 goto again;
344 }
345 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
346 goto again;
347 }
348 if (retries) {
349 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
350 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
351 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
352 }
353 return cc;
354}
355
356static inline int qdio_siga_input(struct qdio_q *q)
357{
358 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
359 unsigned int fc = QDIO_SIGA_READ;
360 int cc;
361
362 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
363 qperf_inc(q, siga_read);
364
365 if (is_qebsm(q)) {
366 schid = q->irq_ptr->sch_token;
367 fc |= QDIO_SIGA_QEBSM_FLAG;
368 }
369
370 cc = do_siga_input(schid, q->mask, fc);
371 if (unlikely(cc))
372 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
373 return (cc) ? -EIO : 0;
374}
375
376#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
377#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
378
379static inline void qdio_sync_queues(struct qdio_q *q)
380{
381
382 if (pci_out_supported(q->irq_ptr))
383 qdio_siga_sync_all(q);
384 else
385 qdio_siga_sync_q(q);
386}
387
388int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
389 unsigned char *state)
390{
391 if (need_siga_sync(q))
392 qdio_siga_sync_q(q);
393 return get_buf_state(q, bufnr, state, 0);
394}
395
396static inline void qdio_stop_polling(struct qdio_q *q)
397{
398 if (!q->u.in.batch_count)
399 return;
400
401 qperf_inc(q, stop_polling);
402
403
404 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
405 q->u.in.batch_count);
406 q->u.in.batch_count = 0;
407}
408
409static inline void account_sbals(struct qdio_q *q, unsigned int count)
410{
411 q->q_stats.nr_sbal_total += count;
412 q->q_stats.nr_sbals[ilog2(count)]++;
413}
414
415static void process_buffer_error(struct qdio_q *q, unsigned int start,
416 int count)
417{
418
419 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
420 q->sbal[start]->element[15].sflags == 0x10) {
421 qperf_inc(q, target_full);
422 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
423 return;
424 }
425
426 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
428 DBF_ERROR("FTC:%3d C:%3d", start, count);
429 DBF_ERROR("F14:%2x F15:%2x",
430 q->sbal[start]->element[14].sflags,
431 q->sbal[start]->element[15].sflags);
432}
433
434static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
435 int count, bool auto_ack)
436{
437
438 if (!auto_ack)
439 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
440
441 if (!q->u.in.batch_count)
442 q->u.in.batch_start = start;
443 q->u.in.batch_count += count;
444}
445
446static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
447 unsigned int *error)
448{
449 unsigned char state = 0;
450 int count;
451
452 q->timestamp = get_tod_clock_fast();
453
454 count = atomic_read(&q->nr_buf_used);
455 if (!count)
456 return 0;
457
458
459
460
461
462 count = get_buf_states(q, start, &state, count, 1);
463 if (!count)
464 return 0;
465
466 switch (state) {
467 case SLSB_P_INPUT_PRIMED:
468 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
469 count);
470
471 inbound_handle_work(q, start, count, is_qebsm(q));
472 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
473 qperf_inc(q, inbound_queue_full);
474 if (q->irq_ptr->perf_stat_enabled)
475 account_sbals(q, count);
476 return count;
477 case SLSB_P_INPUT_ERROR:
478 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
479 count);
480
481 *error = QDIO_ERROR_SLSB_STATE;
482 process_buffer_error(q, start, count);
483 inbound_handle_work(q, start, count, false);
484 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
485 qperf_inc(q, inbound_queue_full);
486 if (q->irq_ptr->perf_stat_enabled)
487 account_sbals_error(q, count);
488 return count;
489 case SLSB_CU_INPUT_EMPTY:
490 if (q->irq_ptr->perf_stat_enabled)
491 q->q_stats.nr_sbal_nop++;
492 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
493 q->nr, start);
494 return 0;
495 case SLSB_P_INPUT_NOT_INIT:
496 case SLSB_P_INPUT_ACK:
497
498 default:
499 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
500 "found state %#x at index %u on queue %u\n",
501 state, start, q->nr);
502 return 0;
503 }
504}
505
506static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
507{
508 unsigned char state = 0;
509
510 if (!atomic_read(&q->nr_buf_used))
511 return 1;
512
513 if (need_siga_sync(q))
514 qdio_siga_sync_q(q);
515 get_buf_state(q, start, &state, 0);
516
517 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
518
519 return 0;
520
521 return 1;
522}
523
524static inline int qdio_tasklet_schedule(struct qdio_q *q)
525{
526 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
527 tasklet_schedule(&q->u.out.tasklet);
528 return 0;
529 }
530 return -EPERM;
531}
532
533static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
534 unsigned int *error)
535{
536 unsigned char state = 0;
537 int count;
538
539 q->timestamp = get_tod_clock_fast();
540
541 if (need_siga_sync(q))
542 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
543 !pci_out_supported(q->irq_ptr)) ||
544 (queue_type(q) == QDIO_IQDIO_QFMT &&
545 multicast_outbound(q)))
546 qdio_siga_sync_q(q);
547
548 count = atomic_read(&q->nr_buf_used);
549 if (!count)
550 return 0;
551
552 count = get_buf_states(q, start, &state, count, 0);
553 if (!count)
554 return 0;
555
556 switch (state) {
557 case SLSB_P_OUTPUT_PENDING:
558 *error = QDIO_ERROR_SLSB_PENDING;
559 fallthrough;
560 case SLSB_P_OUTPUT_EMPTY:
561
562 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
563 "out empty:%1d %02x", q->nr, count);
564
565 atomic_sub(count, &q->nr_buf_used);
566 if (q->irq_ptr->perf_stat_enabled)
567 account_sbals(q, count);
568 return count;
569 case SLSB_P_OUTPUT_ERROR:
570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
571 q->nr, count);
572
573 *error = QDIO_ERROR_SLSB_STATE;
574 process_buffer_error(q, start, count);
575 atomic_sub(count, &q->nr_buf_used);
576 if (q->irq_ptr->perf_stat_enabled)
577 account_sbals_error(q, count);
578 return count;
579 case SLSB_CU_OUTPUT_PRIMED:
580
581 if (q->irq_ptr->perf_stat_enabled)
582 q->q_stats.nr_sbal_nop++;
583 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
584 q->nr);
585 return 0;
586 case SLSB_P_OUTPUT_HALTED:
587 return 0;
588 case SLSB_P_OUTPUT_NOT_INIT:
589
590 default:
591 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
592 "found state %#x at index %u on queue %u\n",
593 state, start, q->nr);
594 return 0;
595 }
596}
597
598
599static inline int qdio_outbound_q_done(struct qdio_q *q)
600{
601 return atomic_read(&q->nr_buf_used) == 0;
602}
603
604static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
605 unsigned long aob)
606{
607 int retries = 0, cc;
608 unsigned int busy_bit;
609
610 if (!need_siga_out(q))
611 return 0;
612
613 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
614retry:
615 qperf_inc(q, siga_write);
616
617 cc = qdio_siga_output(q, count, &busy_bit, aob);
618 switch (cc) {
619 case 0:
620 break;
621 case 2:
622 if (busy_bit) {
623 while (++retries < QDIO_BUSY_BIT_RETRIES) {
624 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
625 goto retry;
626 }
627 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
628 cc = -EBUSY;
629 } else {
630 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
631 cc = -ENOBUFS;
632 }
633 break;
634 case 1:
635 case 3:
636 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
637 cc = -EIO;
638 break;
639 }
640 if (retries) {
641 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
642 DBF_ERROR("count:%u", retries);
643 }
644 return cc;
645}
646
647void qdio_outbound_tasklet(struct tasklet_struct *t)
648{
649 struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
650 struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
651 unsigned int start = q->first_to_check;
652 unsigned int error = 0;
653 int count;
654
655 qperf_inc(q, tasklet_outbound);
656 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
657
658 count = get_outbound_buffer_frontier(q, start, &error);
659 if (count) {
660 q->first_to_check = add_buf(start, count);
661
662 if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
663 qperf_inc(q, outbound_handler);
664 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
665 start, count);
666
667 q->handler(q->irq_ptr->cdev, error, q->nr, start,
668 count, q->irq_ptr->int_parm);
669 }
670 }
671
672 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
673 !qdio_outbound_q_done(q))
674 goto sched;
675
676 if (q->u.out.pci_out_enabled)
677 return;
678
679
680
681
682
683
684 if (qdio_outbound_q_done(q))
685 del_timer_sync(&q->u.out.timer);
686 else
687 if (!timer_pending(&q->u.out.timer) &&
688 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
689 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
690 return;
691
692sched:
693 qdio_tasklet_schedule(q);
694}
695
696void qdio_outbound_timer(struct timer_list *t)
697{
698 struct qdio_q *q = from_timer(q, t, u.out.timer);
699
700 qdio_tasklet_schedule(q);
701}
702
703static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
704{
705 struct qdio_q *out;
706 int i;
707
708 if (!pci_out_supported(irq) || !irq->scan_threshold)
709 return;
710
711 for_each_output_queue(irq, out, i)
712 if (!qdio_outbound_q_done(out))
713 qdio_tasklet_schedule(out);
714}
715
716static inline void qdio_set_state(struct qdio_irq *irq_ptr,
717 enum qdio_irq_states state)
718{
719 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
720
721 irq_ptr->state = state;
722 mb();
723}
724
725static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
726{
727 if (irb->esw.esw0.erw.cons) {
728 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
729 DBF_ERROR_HEX(irb, 64);
730 DBF_ERROR_HEX(irb->ecw, 64);
731 }
732}
733
734
735static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
736{
737 int i;
738 struct qdio_q *q;
739
740 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
741 return;
742
743 qdio_deliver_irq(irq_ptr);
744 irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
745
746 if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
747 return;
748
749 for_each_output_queue(irq_ptr, q, i) {
750 if (qdio_outbound_q_done(q))
751 continue;
752 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
753 qdio_siga_sync_q(q);
754 qdio_tasklet_schedule(q);
755 }
756}
757
758static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
759 unsigned long intparm, int cstat,
760 int dstat)
761{
762 struct qdio_q *q;
763
764 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
765 DBF_ERROR("intp :%lx", intparm);
766 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
767
768 if (irq_ptr->nr_input_qs) {
769 q = irq_ptr->input_qs[0];
770 } else if (irq_ptr->nr_output_qs) {
771 q = irq_ptr->output_qs[0];
772 } else {
773 dump_stack();
774 goto no_handler;
775 }
776
777 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
778 q->nr, q->first_to_check, 0, irq_ptr->int_parm);
779no_handler:
780 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
781
782
783
784
785 lgr_info_log();
786}
787
788static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
789 int dstat)
790{
791 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
792
793 if (cstat)
794 goto error;
795 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
796 goto error;
797 if (!(dstat & DEV_STAT_DEV_END))
798 goto error;
799 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
800 return;
801
802error:
803 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
804 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
805 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
806}
807
808
809void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
810 struct irb *irb)
811{
812 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
813 struct subchannel_id schid;
814 int cstat, dstat;
815
816 if (!intparm || !irq_ptr) {
817 ccw_device_get_schid(cdev, &schid);
818 DBF_ERROR("qint:%4x", schid.sch_no);
819 return;
820 }
821
822 if (irq_ptr->perf_stat_enabled)
823 irq_ptr->perf_stat.qdio_int++;
824
825 if (IS_ERR(irb)) {
826 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
827 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
828 wake_up(&cdev->private->wait_q);
829 return;
830 }
831 qdio_irq_check_sense(irq_ptr, irb);
832 cstat = irb->scsw.cmd.cstat;
833 dstat = irb->scsw.cmd.dstat;
834
835 switch (irq_ptr->state) {
836 case QDIO_IRQ_STATE_INACTIVE:
837 qdio_establish_handle_irq(irq_ptr, cstat, dstat);
838 break;
839 case QDIO_IRQ_STATE_CLEANUP:
840 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
841 break;
842 case QDIO_IRQ_STATE_ESTABLISHED:
843 case QDIO_IRQ_STATE_ACTIVE:
844 if (cstat & SCHN_STAT_PCI) {
845 qdio_int_handler_pci(irq_ptr);
846 return;
847 }
848 if (cstat || dstat)
849 qdio_handle_activate_check(irq_ptr, intparm, cstat,
850 dstat);
851 break;
852 case QDIO_IRQ_STATE_STOPPED:
853 break;
854 default:
855 WARN_ON_ONCE(1);
856 }
857 wake_up(&cdev->private->wait_q);
858}
859
860
861
862
863
864
865
866
867
868int qdio_get_ssqd_desc(struct ccw_device *cdev,
869 struct qdio_ssqd_desc *data)
870{
871 struct subchannel_id schid;
872
873 if (!cdev || !cdev->private)
874 return -EINVAL;
875
876 ccw_device_get_schid(cdev, &schid);
877 DBF_EVENT("get ssqd:%4x", schid.sch_no);
878 return qdio_setup_get_ssqd(NULL, &schid, data);
879}
880EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
881
882static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
883{
884 struct qdio_q *q;
885 int i;
886
887 for_each_output_queue(irq_ptr, q, i) {
888 del_timer_sync(&q->u.out.timer);
889 tasklet_kill(&q->u.out.tasklet);
890 }
891}
892
893
894
895
896
897
898int qdio_shutdown(struct ccw_device *cdev, int how)
899{
900 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
901 struct subchannel_id schid;
902 int rc;
903
904 if (!irq_ptr)
905 return -ENODEV;
906
907 WARN_ON_ONCE(irqs_disabled());
908 ccw_device_get_schid(cdev, &schid);
909 DBF_EVENT("qshutdown:%4x", schid.sch_no);
910
911 mutex_lock(&irq_ptr->setup_mutex);
912
913
914
915
916 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
917 mutex_unlock(&irq_ptr->setup_mutex);
918 return 0;
919 }
920
921
922
923
924
925 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
926
927 qdio_shutdown_queues(irq_ptr);
928 qdio_shutdown_debug_entries(irq_ptr);
929
930
931 spin_lock_irq(get_ccwdev_lock(cdev));
932 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
933 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
934 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
935 else
936
937 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
938 spin_unlock_irq(get_ccwdev_lock(cdev));
939 if (rc) {
940 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
941 DBF_ERROR("rc:%4d", rc);
942 goto no_cleanup;
943 }
944
945 wait_event_interruptible_timeout(cdev->private->wait_q,
946 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
947 irq_ptr->state == QDIO_IRQ_STATE_ERR,
948 10 * HZ);
949
950no_cleanup:
951 qdio_shutdown_thinint(irq_ptr);
952 qdio_shutdown_irq(irq_ptr);
953
954 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
955 mutex_unlock(&irq_ptr->setup_mutex);
956 if (rc)
957 return rc;
958 return 0;
959}
960EXPORT_SYMBOL_GPL(qdio_shutdown);
961
962
963
964
965
966int qdio_free(struct ccw_device *cdev)
967{
968 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
969 struct subchannel_id schid;
970
971 if (!irq_ptr)
972 return -ENODEV;
973
974 ccw_device_get_schid(cdev, &schid);
975 DBF_EVENT("qfree:%4x", schid.sch_no);
976 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
977 mutex_lock(&irq_ptr->setup_mutex);
978
979 irq_ptr->debug_area = NULL;
980 cdev->private->qdio_data = NULL;
981 mutex_unlock(&irq_ptr->setup_mutex);
982
983 qdio_free_queues(irq_ptr);
984 free_page((unsigned long) irq_ptr->qdr);
985 free_page(irq_ptr->chsc_page);
986 free_page((unsigned long) irq_ptr);
987 return 0;
988}
989EXPORT_SYMBOL_GPL(qdio_free);
990
991
992
993
994
995
996
997int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
998 unsigned int no_output_qs)
999{
1000 struct subchannel_id schid;
1001 struct qdio_irq *irq_ptr;
1002 int rc = -ENOMEM;
1003
1004 ccw_device_get_schid(cdev, &schid);
1005 DBF_EVENT("qallocate:%4x", schid.sch_no);
1006
1007 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1008 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
1009 return -EINVAL;
1010
1011
1012 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1013 if (!irq_ptr)
1014 return -ENOMEM;
1015
1016 irq_ptr->cdev = cdev;
1017 mutex_init(&irq_ptr->setup_mutex);
1018 if (qdio_allocate_dbf(irq_ptr))
1019 goto err_dbf;
1020
1021 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1022 no_output_qs);
1023
1024
1025
1026
1027
1028
1029
1030 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1031 if (!irq_ptr->chsc_page)
1032 goto err_chsc;
1033
1034
1035 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1036 if (!irq_ptr->qdr)
1037 goto err_qdr;
1038
1039 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1040 if (rc)
1041 goto err_queues;
1042
1043 cdev->private->qdio_data = irq_ptr;
1044 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1045 return 0;
1046
1047err_queues:
1048 free_page((unsigned long) irq_ptr->qdr);
1049err_qdr:
1050 free_page(irq_ptr->chsc_page);
1051err_chsc:
1052err_dbf:
1053 free_page((unsigned long) irq_ptr);
1054 return rc;
1055}
1056EXPORT_SYMBOL_GPL(qdio_allocate);
1057
1058static void qdio_trace_init_data(struct qdio_irq *irq,
1059 struct qdio_initialize *data)
1060{
1061 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1062 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1063 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1064 DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1065 DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1066 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1067 data->no_output_qs);
1068 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1069 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1070 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1071 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1072 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1073 DBF_ERR);
1074}
1075
1076
1077
1078
1079
1080
1081int qdio_establish(struct ccw_device *cdev,
1082 struct qdio_initialize *init_data)
1083{
1084 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1085 struct subchannel_id schid;
1086 int rc;
1087
1088 ccw_device_get_schid(cdev, &schid);
1089 DBF_EVENT("qestablish:%4x", schid.sch_no);
1090
1091 if (!irq_ptr)
1092 return -ENODEV;
1093
1094 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1095 init_data->no_output_qs > irq_ptr->max_output_qs)
1096 return -EINVAL;
1097
1098 if ((init_data->no_input_qs && !init_data->input_handler) ||
1099 (init_data->no_output_qs && !init_data->output_handler))
1100 return -EINVAL;
1101
1102 if (!init_data->input_sbal_addr_array ||
1103 !init_data->output_sbal_addr_array)
1104 return -EINVAL;
1105
1106 if (!init_data->irq_poll)
1107 return -EINVAL;
1108
1109 mutex_lock(&irq_ptr->setup_mutex);
1110 qdio_trace_init_data(irq_ptr, init_data);
1111 qdio_setup_irq(irq_ptr, init_data);
1112
1113 rc = qdio_establish_thinint(irq_ptr);
1114 if (rc) {
1115 qdio_shutdown_irq(irq_ptr);
1116 mutex_unlock(&irq_ptr->setup_mutex);
1117 return rc;
1118 }
1119
1120
1121 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1122 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1123 irq_ptr->ccw.count = irq_ptr->equeue.count;
1124 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1125
1126 spin_lock_irq(get_ccwdev_lock(cdev));
1127 ccw_device_set_options_mask(cdev, 0);
1128
1129 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1130 spin_unlock_irq(get_ccwdev_lock(cdev));
1131 if (rc) {
1132 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1133 DBF_ERROR("rc:%4x", rc);
1134 qdio_shutdown_thinint(irq_ptr);
1135 qdio_shutdown_irq(irq_ptr);
1136 mutex_unlock(&irq_ptr->setup_mutex);
1137 return rc;
1138 }
1139
1140 wait_event_interruptible_timeout(cdev->private->wait_q,
1141 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1142 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1143
1144 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1145 mutex_unlock(&irq_ptr->setup_mutex);
1146 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1147 return -EIO;
1148 }
1149
1150 qdio_setup_ssqd_info(irq_ptr);
1151
1152
1153 qdio_init_buf_states(irq_ptr);
1154
1155 mutex_unlock(&irq_ptr->setup_mutex);
1156 qdio_print_subchannel_info(irq_ptr);
1157 qdio_setup_debug_entries(irq_ptr);
1158 return 0;
1159}
1160EXPORT_SYMBOL_GPL(qdio_establish);
1161
1162
1163
1164
1165
1166int qdio_activate(struct ccw_device *cdev)
1167{
1168 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1169 struct subchannel_id schid;
1170 int rc;
1171
1172 ccw_device_get_schid(cdev, &schid);
1173 DBF_EVENT("qactivate:%4x", schid.sch_no);
1174
1175 if (!irq_ptr)
1176 return -ENODEV;
1177
1178 mutex_lock(&irq_ptr->setup_mutex);
1179 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1180 rc = -EBUSY;
1181 goto out;
1182 }
1183
1184 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1185 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1186 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1187 irq_ptr->ccw.cda = 0;
1188
1189 spin_lock_irq(get_ccwdev_lock(cdev));
1190 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1191
1192 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1193 0, DOIO_DENY_PREFETCH);
1194 spin_unlock_irq(get_ccwdev_lock(cdev));
1195 if (rc) {
1196 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1197 DBF_ERROR("rc:%4x", rc);
1198 goto out;
1199 }
1200
1201
1202 msleep(5);
1203
1204 switch (irq_ptr->state) {
1205 case QDIO_IRQ_STATE_STOPPED:
1206 case QDIO_IRQ_STATE_ERR:
1207 rc = -EIO;
1208 break;
1209 default:
1210 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1211 rc = 0;
1212 }
1213out:
1214 mutex_unlock(&irq_ptr->setup_mutex);
1215 return rc;
1216}
1217EXPORT_SYMBOL_GPL(qdio_activate);
1218
1219
1220
1221
1222
1223
1224
1225
1226static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1227 int bufnr, int count)
1228{
1229 int overlap;
1230
1231 qperf_inc(q, inbound_call);
1232
1233
1234 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1235 q->u.in.batch_count);
1236 if (overlap > 0) {
1237 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1238 q->u.in.batch_count -= overlap;
1239 }
1240
1241 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1242 atomic_add(count, &q->nr_buf_used);
1243
1244 if (need_siga_in(q))
1245 return qdio_siga_input(q);
1246
1247 return 0;
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1259 unsigned int bufnr, unsigned int count,
1260 struct qaob *aob)
1261{
1262 const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
1263 unsigned char state = 0;
1264 int used, rc = 0;
1265
1266 qperf_inc(q, outbound_call);
1267
1268 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1269 used = atomic_add_return(count, &q->nr_buf_used);
1270
1271 if (used == QDIO_MAX_BUFFERS_PER_Q)
1272 qperf_inc(q, outbound_queue_full);
1273
1274 if (callflags & QDIO_FLAG_PCI_OUT) {
1275 q->u.out.pci_out_enabled = 1;
1276 qperf_inc(q, pci_request_int);
1277 } else
1278 q->u.out.pci_out_enabled = 0;
1279
1280 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1281 unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
1282
1283 WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
1284 rc = qdio_kick_outbound_q(q, count, phys_aob);
1285 } else if (need_siga_sync(q)) {
1286 rc = qdio_siga_sync_q(q);
1287 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1288 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1289 state == SLSB_CU_OUTPUT_PRIMED) {
1290
1291 qperf_inc(q, fast_requeue);
1292 } else {
1293 rc = qdio_kick_outbound_q(q, count, 0);
1294 }
1295
1296
1297 if (!scan_threshold)
1298 return rc;
1299
1300
1301 if (used >= scan_threshold || rc)
1302 qdio_tasklet_schedule(q);
1303 else
1304
1305 if (!timer_pending(&q->u.out.timer) &&
1306 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1307 mod_timer(&q->u.out.timer, jiffies + HZ);
1308 return rc;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1321 int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
1322{
1323 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1324
1325 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1326 return -EINVAL;
1327
1328 if (!irq_ptr)
1329 return -ENODEV;
1330
1331 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1332 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1333
1334 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1335 return -EIO;
1336 if (!count)
1337 return 0;
1338 if (callflags & QDIO_FLAG_SYNC_INPUT)
1339 return handle_inbound(irq_ptr->input_qs[q_nr],
1340 callflags, bufnr, count);
1341 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1342 return handle_outbound(irq_ptr->output_qs[q_nr],
1343 callflags, bufnr, count, aob);
1344 return -EINVAL;
1345}
1346EXPORT_SYMBOL_GPL(do_QDIO);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356int qdio_start_irq(struct ccw_device *cdev)
1357{
1358 struct qdio_q *q;
1359 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1360 unsigned int i;
1361
1362 if (!irq_ptr)
1363 return -ENODEV;
1364
1365 for_each_input_queue(irq_ptr, q, i)
1366 qdio_stop_polling(q);
1367
1368 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1369
1370
1371
1372
1373
1374 if (test_nonshared_ind(irq_ptr))
1375 goto rescan;
1376
1377 for_each_input_queue(irq_ptr, q, i) {
1378 if (!qdio_inbound_q_done(q, q->first_to_check))
1379 goto rescan;
1380 }
1381
1382 return 0;
1383
1384rescan:
1385 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1386 return 0;
1387 else
1388 return 1;
1389
1390}
1391EXPORT_SYMBOL(qdio_start_irq);
1392
1393static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1394 unsigned int *error)
1395{
1396 unsigned int start = q->first_to_check;
1397 int count;
1398
1399 *error = 0;
1400 count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
1401 get_outbound_buffer_frontier(q, start, error);
1402 if (count == 0)
1403 return 0;
1404
1405 *bufnr = start;
1406
1407
1408 q->first_to_check = add_buf(start, count);
1409
1410 return count;
1411}
1412
1413int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1414 unsigned int *bufnr, unsigned int *error)
1415{
1416 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1417 struct qdio_q *q;
1418
1419 if (!irq_ptr)
1420 return -ENODEV;
1421 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1422
1423 if (need_siga_sync(q))
1424 qdio_siga_sync_q(q);
1425
1426 return __qdio_inspect_queue(q, bufnr, error);
1427}
1428EXPORT_SYMBOL_GPL(qdio_inspect_queue);
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1443 int *error)
1444{
1445 struct qdio_q *q;
1446 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1447
1448 if (!irq_ptr)
1449 return -ENODEV;
1450 q = irq_ptr->input_qs[nr];
1451
1452
1453
1454
1455
1456 if (need_siga_sync(q))
1457 qdio_sync_queues(q);
1458
1459 qdio_check_outbound_pci_queues(irq_ptr);
1460
1461
1462 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1463 return -EIO;
1464
1465 return __qdio_inspect_queue(q, bufnr, error);
1466}
1467EXPORT_SYMBOL(qdio_get_next_buffers);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477int qdio_stop_irq(struct ccw_device *cdev)
1478{
1479 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1480
1481 if (!irq_ptr)
1482 return -ENODEV;
1483
1484 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1485 return 0;
1486 else
1487 return 1;
1488}
1489EXPORT_SYMBOL(qdio_stop_irq);
1490
1491static int __init init_QDIO(void)
1492{
1493 int rc;
1494
1495 rc = qdio_debug_init();
1496 if (rc)
1497 return rc;
1498 rc = qdio_setup_init();
1499 if (rc)
1500 goto out_debug;
1501 rc = qdio_thinint_init();
1502 if (rc)
1503 goto out_cache;
1504 return 0;
1505
1506out_cache:
1507 qdio_setup_exit();
1508out_debug:
1509 qdio_debug_exit();
1510 return rc;
1511}
1512
1513static void __exit exit_QDIO(void)
1514{
1515 qdio_thinint_exit();
1516 qdio_setup_exit();
1517 qdio_debug_exit();
1518}
1519
1520module_init(init_QDIO);
1521module_exit(exit_QDIO);
1522