1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/types.h>
15#include <asm/ebcdic.h>
16#include <linux/ctype.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/mempool.h>
20#include <linux/moduleparam.h>
21#include <linux/tty.h>
22#include <linux/wait.h>
23#include <net/iucv/iucv.h>
24
25#include "hvc_console.h"
26
27
28
29#define HVC_IUCV_MAGIC 0xc9e4c3e5
30#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
31#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
32
33
34#define MSG_VERSION 0x02
35#define MSG_TYPE_ERROR 0x01
36#define MSG_TYPE_TERMENV 0x02
37#define MSG_TYPE_TERMIOS 0x04
38#define MSG_TYPE_WINSIZE 0x08
39#define MSG_TYPE_DATA 0x10
40
41struct iucv_tty_msg {
42 u8 version;
43 u8 type;
44#define MSG_MAX_DATALEN ((u16)(~0))
45 u16 datalen;
46 u8 data[];
47} __attribute__((packed));
48#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
49
50enum iucv_state_t {
51 IUCV_DISCONN = 0,
52 IUCV_CONNECTED = 1,
53 IUCV_SEVERED = 2,
54};
55
56enum tty_state_t {
57 TTY_CLOSED = 0,
58 TTY_OPENED = 1,
59};
60
61struct hvc_iucv_private {
62 struct hvc_struct *hvc;
63 u8 srv_name[8];
64 unsigned char is_console;
65 enum iucv_state_t iucv_state;
66 enum tty_state_t tty_state;
67 struct iucv_path *path;
68 spinlock_t lock;
69#define SNDBUF_SIZE (PAGE_SIZE)
70 void *sndbuf;
71 size_t sndbuf_len;
72#define QUEUE_SNDBUF_DELAY (HZ / 25)
73 struct delayed_work sndbuf_work;
74 wait_queue_head_t sndbuf_waitq;
75 struct list_head tty_outqueue;
76 struct list_head tty_inqueue;
77};
78
79struct iucv_tty_buffer {
80 struct list_head list;
81 struct iucv_message msg;
82 size_t offset;
83 struct iucv_tty_msg *mbuf;
84};
85
86
87static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
88static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
89static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
90static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
91
92
93
94static unsigned long hvc_iucv_devices = 1;
95
96
97static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
98#define IUCV_HVC_CON_IDX (0)
99
100#define MAX_VMID_FILTER (500)
101static size_t hvc_iucv_filter_size;
102static void *hvc_iucv_filter;
103static const char *hvc_iucv_filter_string;
104static DEFINE_RWLOCK(hvc_iucv_filter_lock);
105
106
107static struct kmem_cache *hvc_iucv_buffer_cache;
108static mempool_t *hvc_iucv_mempool;
109
110
111static struct iucv_handler hvc_iucv_handler = {
112 .path_pending = hvc_iucv_path_pending,
113 .path_severed = hvc_iucv_path_severed,
114 .message_complete = hvc_iucv_msg_complete,
115 .message_pending = hvc_iucv_msg_pending,
116};
117
118
119
120
121
122
123
124
125
126struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
127{
128 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
129 return NULL;
130 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
131}
132
133
134
135
136
137
138
139
140
141
142
143
144static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
145{
146 struct iucv_tty_buffer *bufp;
147
148 bufp = mempool_alloc(hvc_iucv_mempool, flags);
149 if (!bufp)
150 return NULL;
151 memset(bufp, 0, sizeof(*bufp));
152
153 if (size > 0) {
154 bufp->msg.length = MSG_SIZE(size);
155 bufp->mbuf = kmalloc(bufp->msg.length, flags);
156 if (!bufp->mbuf) {
157 mempool_free(bufp, hvc_iucv_mempool);
158 return NULL;
159 }
160 bufp->mbuf->version = MSG_VERSION;
161 bufp->mbuf->type = MSG_TYPE_DATA;
162 bufp->mbuf->datalen = (u16) size;
163 }
164 return bufp;
165}
166
167
168
169
170
171static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
172{
173 kfree(bufp->mbuf);
174 mempool_free(bufp, hvc_iucv_mempool);
175}
176
177
178
179
180
181static void destroy_tty_buffer_list(struct list_head *list)
182{
183 struct iucv_tty_buffer *ent, *next;
184
185 list_for_each_entry_safe(ent, next, list, list) {
186 list_del(&ent->list);
187 destroy_tty_buffer(ent);
188 }
189}
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int hvc_iucv_write(struct hvc_iucv_private *priv,
212 char *buf, int count, int *has_more_data)
213{
214 struct iucv_tty_buffer *rb;
215 int written;
216 int rc;
217
218
219 if (priv->iucv_state == IUCV_DISCONN)
220 return 0;
221
222
223
224 if (priv->iucv_state == IUCV_SEVERED)
225 return -EPIPE;
226
227
228 if (list_empty(&priv->tty_inqueue))
229 return 0;
230
231
232 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
233
234 written = 0;
235 if (!rb->mbuf) {
236
237
238 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
239 if (!rb->mbuf)
240 return -ENOMEM;
241
242 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
243 rb->mbuf, rb->msg.length, NULL);
244 switch (rc) {
245 case 0:
246 break;
247 case 2:
248 case 9:
249 break;
250 default:
251 written = -EIO;
252 }
253
254
255 if (rc || (rb->mbuf->version != MSG_VERSION) ||
256 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
257 goto out_remove_buffer;
258 }
259
260 switch (rb->mbuf->type) {
261 case MSG_TYPE_DATA:
262 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
263 memcpy(buf, rb->mbuf->data + rb->offset, written);
264 if (written < (rb->mbuf->datalen - rb->offset)) {
265 rb->offset += written;
266 *has_more_data = 1;
267 goto out_written;
268 }
269 break;
270
271 case MSG_TYPE_WINSIZE:
272 if (rb->mbuf->datalen != sizeof(struct winsize))
273 break;
274 hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
275 break;
276
277 case MSG_TYPE_ERROR:
278 case MSG_TYPE_TERMENV:
279 case MSG_TYPE_TERMIOS:
280 break;
281 }
282
283out_remove_buffer:
284 list_del(&rb->list);
285 destroy_tty_buffer(rb);
286 *has_more_data = !list_empty(&priv->tty_inqueue);
287
288out_written:
289 return written;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
307{
308 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
309 int written;
310 int has_more_data;
311
312 if (count <= 0)
313 return 0;
314
315 if (!priv)
316 return -ENODEV;
317
318 spin_lock(&priv->lock);
319 has_more_data = 0;
320 written = hvc_iucv_write(priv, buf, count, &has_more_data);
321 spin_unlock(&priv->lock);
322
323
324 if (has_more_data)
325 hvc_kick();
326
327 return written;
328}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
347 int count)
348{
349 size_t len;
350
351 if (priv->iucv_state == IUCV_DISCONN)
352 return count;
353
354 if (priv->iucv_state == IUCV_SEVERED)
355 return -EPIPE;
356
357 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
358 if (!len)
359 return 0;
360
361 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
362 priv->sndbuf_len += len;
363
364 if (priv->iucv_state == IUCV_CONNECTED)
365 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
366
367 return len;
368}
369
370
371
372
373
374
375
376
377
378
379static int hvc_iucv_send(struct hvc_iucv_private *priv)
380{
381 struct iucv_tty_buffer *sb;
382 int rc, len;
383
384 if (priv->iucv_state == IUCV_SEVERED)
385 return -EPIPE;
386
387 if (priv->iucv_state == IUCV_DISCONN)
388 return -EIO;
389
390 if (!priv->sndbuf_len)
391 return 0;
392
393
394
395 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
396 if (!sb)
397 return -ENOMEM;
398
399 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
400 sb->mbuf->datalen = (u16) priv->sndbuf_len;
401 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
402
403 list_add_tail(&sb->list, &priv->tty_outqueue);
404
405 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
406 (void *) sb->mbuf, sb->msg.length);
407 if (rc) {
408
409
410 list_del(&sb->list);
411 destroy_tty_buffer(sb);
412 }
413 len = priv->sndbuf_len;
414 priv->sndbuf_len = 0;
415
416 return len;
417}
418
419
420
421
422
423
424
425
426static void hvc_iucv_sndbuf_work(struct work_struct *work)
427{
428 struct hvc_iucv_private *priv;
429
430 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
431 if (!priv)
432 return;
433
434 spin_lock_bh(&priv->lock);
435 hvc_iucv_send(priv);
436 spin_unlock_bh(&priv->lock);
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
452{
453 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
454 int queued;
455
456 if (count <= 0)
457 return 0;
458
459 if (!priv)
460 return -ENODEV;
461
462 spin_lock(&priv->lock);
463 queued = hvc_iucv_queue(priv, buf, count);
464 spin_unlock(&priv->lock);
465
466 return queued;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
481{
482 struct hvc_iucv_private *priv;
483
484 priv = hvc_iucv_get_private(id);
485 if (!priv)
486 return 0;
487
488 spin_lock_bh(&priv->lock);
489 priv->tty_state = TTY_OPENED;
490 spin_unlock_bh(&priv->lock);
491
492 return 0;
493}
494
495
496
497
498
499static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
500{
501 destroy_tty_buffer_list(&priv->tty_outqueue);
502 destroy_tty_buffer_list(&priv->tty_inqueue);
503
504 priv->tty_state = TTY_CLOSED;
505 priv->iucv_state = IUCV_DISCONN;
506
507 priv->sndbuf_len = 0;
508}
509
510
511
512
513
514static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
515{
516 int rc;
517
518 spin_lock_bh(&priv->lock);
519 rc = list_empty(&priv->tty_outqueue);
520 spin_unlock_bh(&priv->lock);
521
522 return rc;
523}
524
525
526
527
528
529
530
531
532static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
533{
534 int sync_wait;
535
536 cancel_delayed_work_sync(&priv->sndbuf_work);
537
538 spin_lock_bh(&priv->lock);
539 hvc_iucv_send(priv);
540 sync_wait = !list_empty(&priv->tty_outqueue);
541 spin_unlock_bh(&priv->lock);
542
543 if (sync_wait)
544 wait_event_timeout(priv->sndbuf_waitq,
545 tty_outqueue_empty(priv), HZ);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
566{
567 struct hvc_iucv_private *priv;
568
569 priv = hvc_iucv_get_private(id);
570 if (!priv)
571 return;
572
573 flush_sndbuf_sync(priv);
574
575 spin_lock_bh(&priv->lock);
576
577
578
579
580
581
582
583 priv->tty_state = TTY_CLOSED;
584
585 if (priv->iucv_state == IUCV_SEVERED)
586 hvc_iucv_cleanup(priv);
587 spin_unlock_bh(&priv->lock);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
603{
604 struct hvc_iucv_private *priv;
605 struct iucv_path *path;
606
607 priv = hvc_iucv_get_private(id);
608 if (!priv)
609 return;
610
611 flush_sndbuf_sync(priv);
612
613 spin_lock_bh(&priv->lock);
614 path = priv->path;
615 priv->path = NULL;
616 hvc_iucv_cleanup(priv);
617 spin_unlock_bh(&priv->lock);
618
619
620
621 if (path) {
622 iucv_path_sever(path, NULL);
623 iucv_path_free(path);
624 }
625}
626
627
628
629
630
631
632
633
634static int hvc_iucv_filter_connreq(u8 ipvmid[8])
635{
636 size_t i;
637
638
639 if (!hvc_iucv_filter_size)
640 return 0;
641
642 for (i = 0; i < hvc_iucv_filter_size; i++)
643 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
644 return 0;
645 return 1;
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667static int hvc_iucv_path_pending(struct iucv_path *path,
668 u8 ipvmid[8], u8 ipuser[16])
669{
670 struct hvc_iucv_private *priv;
671 u8 nuser_data[16];
672 u8 vm_user_id[9];
673 int i, rc;
674
675 priv = NULL;
676 for (i = 0; i < hvc_iucv_devices; i++)
677 if (hvc_iucv_table[i] &&
678 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
679 priv = hvc_iucv_table[i];
680 break;
681 }
682 if (!priv)
683 return -ENODEV;
684
685
686 read_lock(&hvc_iucv_filter_lock);
687 rc = hvc_iucv_filter_connreq(ipvmid);
688 read_unlock(&hvc_iucv_filter_lock);
689 if (rc) {
690 iucv_path_sever(path, ipuser);
691 iucv_path_free(path);
692 memcpy(vm_user_id, ipvmid, 8);
693 vm_user_id[8] = 0;
694 pr_info("A connection request from z/VM user ID %s "
695 "was refused\n", vm_user_id);
696 return 0;
697 }
698
699 spin_lock(&priv->lock);
700
701
702
703
704 if (priv->iucv_state != IUCV_DISCONN) {
705 iucv_path_sever(path, ipuser);
706 iucv_path_free(path);
707 goto out_path_handled;
708 }
709
710
711 memcpy(nuser_data, ipuser + 8, 8);
712 memcpy(nuser_data + 8, ipuser, 8);
713 path->msglim = 0xffff;
714 path->flags &= ~IUCV_IPRMDATA;
715 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
716 if (rc) {
717 iucv_path_sever(path, ipuser);
718 iucv_path_free(path);
719 goto out_path_handled;
720 }
721 priv->path = path;
722 priv->iucv_state = IUCV_CONNECTED;
723
724
725 schedule_delayed_work(&priv->sndbuf_work, 5);
726
727out_path_handled:
728 spin_unlock(&priv->lock);
729 return 0;
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
747{
748 struct hvc_iucv_private *priv = path->private;
749
750 spin_lock(&priv->lock);
751 priv->iucv_state = IUCV_SEVERED;
752
753
754
755
756
757
758
759
760
761
762
763 priv->path = NULL;
764 if (priv->tty_state == TTY_CLOSED)
765 hvc_iucv_cleanup(priv);
766 else
767 if (priv->is_console) {
768 hvc_iucv_cleanup(priv);
769 priv->tty_state = TTY_OPENED;
770 } else
771 hvc_kick();
772 spin_unlock(&priv->lock);
773
774
775 iucv_path_sever(path, ipuser);
776 iucv_path_free(path);
777}
778
779
780
781
782
783
784
785
786
787
788
789
790static void hvc_iucv_msg_pending(struct iucv_path *path,
791 struct iucv_message *msg)
792{
793 struct hvc_iucv_private *priv = path->private;
794 struct iucv_tty_buffer *rb;
795
796
797 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
798 iucv_message_reject(path, msg);
799 return;
800 }
801
802 spin_lock(&priv->lock);
803
804
805 if (priv->tty_state == TTY_CLOSED) {
806 iucv_message_reject(path, msg);
807 goto unlock_return;
808 }
809
810
811 rb = alloc_tty_buffer(0, GFP_ATOMIC);
812 if (!rb) {
813 iucv_message_reject(path, msg);
814 goto unlock_return;
815 }
816 rb->msg = *msg;
817
818 list_add_tail(&rb->list, &priv->tty_inqueue);
819
820 hvc_kick();
821
822unlock_return:
823 spin_unlock(&priv->lock);
824}
825
826
827
828
829
830
831
832
833
834
835
836
837
838static void hvc_iucv_msg_complete(struct iucv_path *path,
839 struct iucv_message *msg)
840{
841 struct hvc_iucv_private *priv = path->private;
842 struct iucv_tty_buffer *ent, *next;
843 LIST_HEAD(list_remove);
844
845 spin_lock(&priv->lock);
846 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
847 if (ent->msg.id == msg->id) {
848 list_move(&ent->list, &list_remove);
849 break;
850 }
851 wake_up(&priv->sndbuf_waitq);
852 spin_unlock(&priv->lock);
853 destroy_tty_buffer_list(&list_remove);
854}
855
856
857
858static struct hv_ops hvc_iucv_ops = {
859 .get_chars = hvc_iucv_get_chars,
860 .put_chars = hvc_iucv_put_chars,
861 .notifier_add = hvc_iucv_notifier_add,
862 .notifier_del = hvc_iucv_notifier_del,
863 .notifier_hangup = hvc_iucv_notifier_hangup,
864};
865
866
867
868
869
870
871
872
873
874
875static int __init hvc_iucv_alloc(int id, unsigned int is_console)
876{
877 struct hvc_iucv_private *priv;
878 char name[9];
879 int rc;
880
881 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
882 if (!priv)
883 return -ENOMEM;
884
885 spin_lock_init(&priv->lock);
886 INIT_LIST_HEAD(&priv->tty_outqueue);
887 INIT_LIST_HEAD(&priv->tty_inqueue);
888 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
889 init_waitqueue_head(&priv->sndbuf_waitq);
890
891 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
892 if (!priv->sndbuf) {
893 kfree(priv);
894 return -ENOMEM;
895 }
896
897
898 priv->is_console = is_console;
899
900
901 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
902 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
903 if (IS_ERR(priv->hvc)) {
904 rc = PTR_ERR(priv->hvc);
905 free_page((unsigned long) priv->sndbuf);
906 kfree(priv);
907 return rc;
908 }
909
910
911 priv->hvc->irq_requested = 1;
912
913
914 snprintf(name, 9, "lnxhvc%-2d", id);
915 memcpy(priv->srv_name, name, 8);
916 ASCEBC(priv->srv_name, 8);
917
918 hvc_iucv_table[id] = priv;
919 return 0;
920}
921
922
923
924
925
926static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
927{
928 const char *nextdelim, *residual;
929 size_t len;
930
931 nextdelim = strchr(filter, ',');
932 if (nextdelim) {
933 len = nextdelim - filter;
934 residual = nextdelim + 1;
935 } else {
936 len = strlen(filter);
937 residual = filter + len;
938 }
939
940 if (len == 0)
941 return ERR_PTR(-EINVAL);
942
943
944 if (filter[len - 1] == '\n')
945 len--;
946
947 if (len > 8)
948 return ERR_PTR(-EINVAL);
949
950
951 memset(dest, ' ', 8);
952 while (len--)
953 dest[len] = toupper(filter[len]);
954 return residual;
955}
956
957
958
959
960
961
962
963
964
965
966
967static int hvc_iucv_setup_filter(const char *val)
968{
969 const char *residual;
970 int err;
971 size_t size, count;
972 void *array, *old_filter;
973
974 count = strlen(val);
975 if (count == 0 || (count == 1 && val[0] == '\n')) {
976 size = 0;
977 array = NULL;
978 goto out_replace_filter;
979 }
980
981
982 size = 1;
983 residual = val;
984 while ((residual = strchr(residual, ',')) != NULL) {
985 residual++;
986 size++;
987 }
988
989
990 if (size > MAX_VMID_FILTER)
991 return -ENOSPC;
992
993 array = kzalloc(size * 8, GFP_KERNEL);
994 if (!array)
995 return -ENOMEM;
996
997 count = size;
998 residual = val;
999 while (*residual && count) {
1000 residual = hvc_iucv_parse_filter(residual,
1001 array + ((size - count) * 8));
1002 if (IS_ERR(residual)) {
1003 err = PTR_ERR(residual);
1004 kfree(array);
1005 goto out_err;
1006 }
1007 count--;
1008 }
1009
1010out_replace_filter:
1011 write_lock_bh(&hvc_iucv_filter_lock);
1012 old_filter = hvc_iucv_filter;
1013 hvc_iucv_filter_size = size;
1014 hvc_iucv_filter = array;
1015 write_unlock_bh(&hvc_iucv_filter_lock);
1016 kfree(old_filter);
1017
1018 err = 0;
1019out_err:
1020 return err;
1021}
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
1034{
1035 int rc;
1036
1037 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1038 return -ENODEV;
1039
1040 if (!val)
1041 return -EINVAL;
1042
1043 rc = 0;
1044 if (slab_is_available())
1045 rc = hvc_iucv_setup_filter(val);
1046 else
1047 hvc_iucv_filter_string = val;
1048 return rc;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
1061{
1062 int rc;
1063 size_t index, len;
1064 void *start, *end;
1065
1066 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1067 return -ENODEV;
1068
1069 rc = 0;
1070 read_lock_bh(&hvc_iucv_filter_lock);
1071 for (index = 0; index < hvc_iucv_filter_size; index++) {
1072 start = hvc_iucv_filter + (8 * index);
1073 end = memchr(start, ' ', 8);
1074 len = (end) ? end - start : 8;
1075 memcpy(buffer + rc, start, len);
1076 rc += len;
1077 buffer[rc++] = ',';
1078 }
1079 read_unlock_bh(&hvc_iucv_filter_lock);
1080 if (rc)
1081 buffer[--rc] = '\0';
1082 return rc;
1083}
1084
1085#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1086
1087
1088
1089
1090static int __init hvc_iucv_init(void)
1091{
1092 int rc;
1093 unsigned int i;
1094
1095 if (!hvc_iucv_devices)
1096 return -ENODEV;
1097
1098 if (!MACHINE_IS_VM) {
1099 pr_notice("The z/VM IUCV HVC device driver cannot "
1100 "be used without z/VM\n");
1101 rc = -ENODEV;
1102 goto out_error;
1103 }
1104
1105 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1106 pr_err("%lu is not a valid value for the hvc_iucv= "
1107 "kernel parameter\n", hvc_iucv_devices);
1108 rc = -EINVAL;
1109 goto out_error;
1110 }
1111
1112
1113 if (hvc_iucv_filter_string) {
1114 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1115 switch (rc) {
1116 case 0:
1117 break;
1118 case -ENOMEM:
1119 pr_err("Allocating memory failed with "
1120 "reason code=%d\n", 3);
1121 goto out_error;
1122 case -EINVAL:
1123 pr_err("hvc_iucv_allow= does not specify a valid "
1124 "z/VM user ID list\n");
1125 goto out_error;
1126 case -ENOSPC:
1127 pr_err("hvc_iucv_allow= specifies too many "
1128 "z/VM user IDs\n");
1129 goto out_error;
1130 default:
1131 goto out_error;
1132 }
1133 }
1134
1135 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1136 sizeof(struct iucv_tty_buffer),
1137 0, 0, NULL);
1138 if (!hvc_iucv_buffer_cache) {
1139 pr_err("Allocating memory failed with reason code=%d\n", 1);
1140 rc = -ENOMEM;
1141 goto out_error;
1142 }
1143
1144 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1145 hvc_iucv_buffer_cache);
1146 if (!hvc_iucv_mempool) {
1147 pr_err("Allocating memory failed with reason code=%d\n", 2);
1148 kmem_cache_destroy(hvc_iucv_buffer_cache);
1149 rc = -ENOMEM;
1150 goto out_error;
1151 }
1152
1153
1154
1155 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1156 if (rc) {
1157 pr_err("Registering HVC terminal device as "
1158 "Linux console failed\n");
1159 goto out_error_memory;
1160 }
1161
1162
1163 for (i = 0; i < hvc_iucv_devices; i++) {
1164 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1165 if (rc) {
1166 pr_err("Creating a new HVC terminal device "
1167 "failed with error code=%d\n", rc);
1168 goto out_error_hvc;
1169 }
1170 }
1171
1172
1173 rc = iucv_register(&hvc_iucv_handler, 0);
1174 if (rc) {
1175 pr_err("Registering IUCV handlers failed with error code=%d\n",
1176 rc);
1177 goto out_error_iucv;
1178 }
1179
1180 return 0;
1181
1182out_error_iucv:
1183 iucv_unregister(&hvc_iucv_handler, 0);
1184out_error_hvc:
1185 for (i = 0; i < hvc_iucv_devices; i++)
1186 if (hvc_iucv_table[i]) {
1187 if (hvc_iucv_table[i]->hvc)
1188 hvc_remove(hvc_iucv_table[i]->hvc);
1189 kfree(hvc_iucv_table[i]);
1190 }
1191out_error_memory:
1192 mempool_destroy(hvc_iucv_mempool);
1193 kmem_cache_destroy(hvc_iucv_buffer_cache);
1194out_error:
1195 hvc_iucv_devices = 0;
1196 return rc;
1197}
1198
1199
1200
1201
1202
1203static int __init hvc_iucv_config(char *val)
1204{
1205 return strict_strtoul(val, 10, &hvc_iucv_devices);
1206}
1207
1208
1209device_initcall(hvc_iucv_init);
1210__setup("hvc_iucv=", hvc_iucv_config);
1211core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1212