linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1/*
   2 *  linux/net/iucv/af_iucv.c
   3 *
   4 *  IUCV protocol stack for Linux on zSeries
   5 *
   6 *  Copyright 2006 IBM Corporation
   7 *
   8 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   9 */
  10
  11#define KMSG_COMPONENT "af_iucv"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/skbuff.h>
  22#include <linux/init.h>
  23#include <linux/poll.h>
  24#include <net/sock.h>
  25#include <asm/ebcdic.h>
  26#include <asm/cpcmd.h>
  27#include <linux/kmod.h>
  28
  29#include <net/iucv/iucv.h>
  30#include <net/iucv/af_iucv.h>
  31
  32#define CONFIG_IUCV_SOCK_DEBUG 1
  33
  34#define IPRMDATA 0x80
  35#define VERSION "1.0"
  36
  37static char iucv_userid[80];
  38
  39static struct proto_ops iucv_sock_ops;
  40
  41static struct proto iucv_proto = {
  42        .name           = "AF_IUCV",
  43        .owner          = THIS_MODULE,
  44        .obj_size       = sizeof(struct iucv_sock),
  45};
  46
  47static void iucv_sock_kill(struct sock *sk);
  48static void iucv_sock_close(struct sock *sk);
  49
  50/* Call Back functions */
  51static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  52static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  53static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  54static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
  55                                 u8 ipuser[16]);
  56static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  57
  58static struct iucv_sock_list iucv_sk_list = {
  59        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
  60        .autobind_name = ATOMIC_INIT(0)
  61};
  62
  63static struct iucv_handler af_iucv_handler = {
  64        .path_pending     = iucv_callback_connreq,
  65        .path_complete    = iucv_callback_connack,
  66        .path_severed     = iucv_callback_connrej,
  67        .message_pending  = iucv_callback_rx,
  68        .message_complete = iucv_callback_txdone
  69};
  70
  71static inline void high_nmcpy(unsigned char *dst, char *src)
  72{
  73       memcpy(dst, src, 8);
  74}
  75
  76static inline void low_nmcpy(unsigned char *dst, char *src)
  77{
  78       memcpy(&dst[8], src, 8);
  79}
  80
  81/* Timers */
  82static void iucv_sock_timeout(unsigned long arg)
  83{
  84        struct sock *sk = (struct sock *)arg;
  85
  86        bh_lock_sock(sk);
  87        sk->sk_err = ETIMEDOUT;
  88        sk->sk_state_change(sk);
  89        bh_unlock_sock(sk);
  90
  91        iucv_sock_kill(sk);
  92        sock_put(sk);
  93}
  94
  95static void iucv_sock_clear_timer(struct sock *sk)
  96{
  97        sk_stop_timer(sk, &sk->sk_timer);
  98}
  99
 100static struct sock *__iucv_get_sock_by_name(char *nm)
 101{
 102        struct sock *sk;
 103        struct hlist_node *node;
 104
 105        sk_for_each(sk, node, &iucv_sk_list.head)
 106                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 107                        return sk;
 108
 109        return NULL;
 110}
 111
 112static void iucv_sock_destruct(struct sock *sk)
 113{
 114        skb_queue_purge(&sk->sk_receive_queue);
 115        skb_queue_purge(&sk->sk_write_queue);
 116}
 117
 118/* Cleanup Listen */
 119static void iucv_sock_cleanup_listen(struct sock *parent)
 120{
 121        struct sock *sk;
 122
 123        /* Close non-accepted connections */
 124        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 125                iucv_sock_close(sk);
 126                iucv_sock_kill(sk);
 127        }
 128
 129        parent->sk_state = IUCV_CLOSED;
 130        sock_set_flag(parent, SOCK_ZAPPED);
 131}
 132
 133/* Kill socket */
 134static void iucv_sock_kill(struct sock *sk)
 135{
 136        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 137                return;
 138
 139        iucv_sock_unlink(&iucv_sk_list, sk);
 140        sock_set_flag(sk, SOCK_DEAD);
 141        sock_put(sk);
 142}
 143
 144/* Close an IUCV socket */
 145static void iucv_sock_close(struct sock *sk)
 146{
 147        unsigned char user_data[16];
 148        struct iucv_sock *iucv = iucv_sk(sk);
 149        int err;
 150        unsigned long timeo;
 151
 152        iucv_sock_clear_timer(sk);
 153        lock_sock(sk);
 154
 155        switch (sk->sk_state) {
 156        case IUCV_LISTEN:
 157                iucv_sock_cleanup_listen(sk);
 158                break;
 159
 160        case IUCV_CONNECTED:
 161        case IUCV_DISCONN:
 162                err = 0;
 163
 164                sk->sk_state = IUCV_CLOSING;
 165                sk->sk_state_change(sk);
 166
 167                if (!skb_queue_empty(&iucv->send_skb_q)) {
 168                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 169                                timeo = sk->sk_lingertime;
 170                        else
 171                                timeo = IUCV_DISCONN_TIMEOUT;
 172                        err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
 173                }
 174
 175                sk->sk_state = IUCV_CLOSED;
 176                sk->sk_state_change(sk);
 177
 178                if (iucv->path) {
 179                        low_nmcpy(user_data, iucv->src_name);
 180                        high_nmcpy(user_data, iucv->dst_name);
 181                        ASCEBC(user_data, sizeof(user_data));
 182                        err = iucv_path_sever(iucv->path, user_data);
 183                        iucv_path_free(iucv->path);
 184                        iucv->path = NULL;
 185                }
 186
 187                sk->sk_err = ECONNRESET;
 188                sk->sk_state_change(sk);
 189
 190                skb_queue_purge(&iucv->send_skb_q);
 191                skb_queue_purge(&iucv->backlog_skb_q);
 192
 193                sock_set_flag(sk, SOCK_ZAPPED);
 194                break;
 195
 196        default:
 197                sock_set_flag(sk, SOCK_ZAPPED);
 198                break;
 199        }
 200
 201        release_sock(sk);
 202        iucv_sock_kill(sk);
 203}
 204
 205static void iucv_sock_init(struct sock *sk, struct sock *parent)
 206{
 207        if (parent)
 208                sk->sk_type = parent->sk_type;
 209}
 210
 211static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
 212{
 213        struct sock *sk;
 214
 215        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
 216        if (!sk)
 217                return NULL;
 218
 219        sock_init_data(sock, sk);
 220        INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
 221        spin_lock_init(&iucv_sk(sk)->accept_q_lock);
 222        skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
 223        INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
 224        spin_lock_init(&iucv_sk(sk)->message_q.lock);
 225        skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
 226        iucv_sk(sk)->send_tag = 0;
 227
 228        sk->sk_destruct = iucv_sock_destruct;
 229        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 230        sk->sk_allocation = GFP_DMA;
 231
 232        sock_reset_flag(sk, SOCK_ZAPPED);
 233
 234        sk->sk_protocol = proto;
 235        sk->sk_state    = IUCV_OPEN;
 236
 237        setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
 238
 239        iucv_sock_link(&iucv_sk_list, sk);
 240        return sk;
 241}
 242
 243/* Create an IUCV socket */
 244static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
 245{
 246        struct sock *sk;
 247
 248        if (sock->type != SOCK_STREAM)
 249                return -ESOCKTNOSUPPORT;
 250
 251        sock->state = SS_UNCONNECTED;
 252        sock->ops = &iucv_sock_ops;
 253
 254        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
 255        if (!sk)
 256                return -ENOMEM;
 257
 258        iucv_sock_init(sk, NULL);
 259
 260        return 0;
 261}
 262
 263void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 264{
 265        write_lock_bh(&l->lock);
 266        sk_add_node(sk, &l->head);
 267        write_unlock_bh(&l->lock);
 268}
 269
 270void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 271{
 272        write_lock_bh(&l->lock);
 273        sk_del_node_init(sk);
 274        write_unlock_bh(&l->lock);
 275}
 276
 277void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 278{
 279        unsigned long flags;
 280        struct iucv_sock *par = iucv_sk(parent);
 281
 282        sock_hold(sk);
 283        spin_lock_irqsave(&par->accept_q_lock, flags);
 284        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 285        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 286        iucv_sk(sk)->parent = parent;
 287        parent->sk_ack_backlog++;
 288}
 289
 290void iucv_accept_unlink(struct sock *sk)
 291{
 292        unsigned long flags;
 293        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 294
 295        spin_lock_irqsave(&par->accept_q_lock, flags);
 296        list_del_init(&iucv_sk(sk)->accept_q);
 297        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 298        iucv_sk(sk)->parent->sk_ack_backlog--;
 299        iucv_sk(sk)->parent = NULL;
 300        sock_put(sk);
 301}
 302
 303struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
 304{
 305        struct iucv_sock *isk, *n;
 306        struct sock *sk;
 307
 308        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 309                sk = (struct sock *) isk;
 310                lock_sock(sk);
 311
 312                if (sk->sk_state == IUCV_CLOSED) {
 313                        iucv_accept_unlink(sk);
 314                        release_sock(sk);
 315                        continue;
 316                }
 317
 318                if (sk->sk_state == IUCV_CONNECTED ||
 319                    sk->sk_state == IUCV_SEVERED ||
 320                    !newsock) {
 321                        iucv_accept_unlink(sk);
 322                        if (newsock)
 323                                sock_graft(sk, newsock);
 324
 325                        if (sk->sk_state == IUCV_SEVERED)
 326                                sk->sk_state = IUCV_DISCONN;
 327
 328                        release_sock(sk);
 329                        return sk;
 330                }
 331
 332                release_sock(sk);
 333        }
 334        return NULL;
 335}
 336
 337int iucv_sock_wait_state(struct sock *sk, int state, int state2,
 338                         unsigned long timeo)
 339{
 340        DECLARE_WAITQUEUE(wait, current);
 341        int err = 0;
 342
 343        add_wait_queue(sk->sk_sleep, &wait);
 344        while (sk->sk_state != state && sk->sk_state != state2) {
 345                set_current_state(TASK_INTERRUPTIBLE);
 346
 347                if (!timeo) {
 348                        err = -EAGAIN;
 349                        break;
 350                }
 351
 352                if (signal_pending(current)) {
 353                        err = sock_intr_errno(timeo);
 354                        break;
 355                }
 356
 357                release_sock(sk);
 358                timeo = schedule_timeout(timeo);
 359                lock_sock(sk);
 360
 361                err = sock_error(sk);
 362                if (err)
 363                        break;
 364        }
 365        set_current_state(TASK_RUNNING);
 366        remove_wait_queue(sk->sk_sleep, &wait);
 367        return err;
 368}
 369
 370/* Bind an unbound socket */
 371static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 372                          int addr_len)
 373{
 374        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 375        struct sock *sk = sock->sk;
 376        struct iucv_sock *iucv;
 377        int err;
 378
 379        /* Verify the input sockaddr */
 380        if (!addr || addr->sa_family != AF_IUCV)
 381                return -EINVAL;
 382
 383        lock_sock(sk);
 384        if (sk->sk_state != IUCV_OPEN) {
 385                err = -EBADFD;
 386                goto done;
 387        }
 388
 389        write_lock_bh(&iucv_sk_list.lock);
 390
 391        iucv = iucv_sk(sk);
 392        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 393                err = -EADDRINUSE;
 394                goto done_unlock;
 395        }
 396        if (iucv->path) {
 397                err = 0;
 398                goto done_unlock;
 399        }
 400
 401        /* Bind the socket */
 402        memcpy(iucv->src_name, sa->siucv_name, 8);
 403
 404        /* Copy the user id */
 405        memcpy(iucv->src_user_id, iucv_userid, 8);
 406        sk->sk_state = IUCV_BOUND;
 407        err = 0;
 408
 409done_unlock:
 410        /* Release the socket list lock */
 411        write_unlock_bh(&iucv_sk_list.lock);
 412done:
 413        release_sock(sk);
 414        return err;
 415}
 416
 417/* Automatically bind an unbound socket */
 418static int iucv_sock_autobind(struct sock *sk)
 419{
 420        struct iucv_sock *iucv = iucv_sk(sk);
 421        char query_buffer[80];
 422        char name[12];
 423        int err = 0;
 424
 425        /* Set the userid and name */
 426        cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
 427        if (unlikely(err))
 428                return -EPROTO;
 429
 430        memcpy(iucv->src_user_id, query_buffer, 8);
 431
 432        write_lock_bh(&iucv_sk_list.lock);
 433
 434        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 435        while (__iucv_get_sock_by_name(name)) {
 436                sprintf(name, "%08x",
 437                        atomic_inc_return(&iucv_sk_list.autobind_name));
 438        }
 439
 440        write_unlock_bh(&iucv_sk_list.lock);
 441
 442        memcpy(&iucv->src_name, name, 8);
 443
 444        return err;
 445}
 446
 447/* Connect an unconnected socket */
 448static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 449                             int alen, int flags)
 450{
 451        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 452        struct sock *sk = sock->sk;
 453        struct iucv_sock *iucv;
 454        unsigned char user_data[16];
 455        int err;
 456
 457        if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
 458                return -EINVAL;
 459
 460        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 461                return -EBADFD;
 462
 463        if (sk->sk_type != SOCK_STREAM)
 464                return -EINVAL;
 465
 466        iucv = iucv_sk(sk);
 467
 468        if (sk->sk_state == IUCV_OPEN) {
 469                err = iucv_sock_autobind(sk);
 470                if (unlikely(err))
 471                        return err;
 472        }
 473
 474        lock_sock(sk);
 475
 476        /* Set the destination information */
 477        memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
 478        memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
 479
 480        high_nmcpy(user_data, sa->siucv_name);
 481        low_nmcpy(user_data, iucv_sk(sk)->src_name);
 482        ASCEBC(user_data, sizeof(user_data));
 483
 484        iucv = iucv_sk(sk);
 485        /* Create path. */
 486        iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
 487                                     IPRMDATA, GFP_KERNEL);
 488        if (!iucv->path) {
 489                err = -ENOMEM;
 490                goto done;
 491        }
 492        err = iucv_path_connect(iucv->path, &af_iucv_handler,
 493                                sa->siucv_user_id, NULL, user_data, sk);
 494        if (err) {
 495                iucv_path_free(iucv->path);
 496                iucv->path = NULL;
 497                switch (err) {
 498                case 0x0b:      /* Target communicator is not logged on */
 499                        err = -ENETUNREACH;
 500                        break;
 501                case 0x0d:      /* Max connections for this guest exceeded */
 502                case 0x0e:      /* Max connections for target guest exceeded */
 503                        err = -EAGAIN;
 504                        break;
 505                case 0x0f:      /* Missing IUCV authorization */
 506                        err = -EACCES;
 507                        break;
 508                default:
 509                        err = -ECONNREFUSED;
 510                        break;
 511                }
 512                goto done;
 513        }
 514
 515        if (sk->sk_state != IUCV_CONNECTED) {
 516                err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
 517                                sock_sndtimeo(sk, flags & O_NONBLOCK));
 518        }
 519
 520        if (sk->sk_state == IUCV_DISCONN) {
 521                release_sock(sk);
 522                return -ECONNREFUSED;
 523        }
 524
 525        if (err) {
 526                iucv_path_sever(iucv->path, NULL);
 527                iucv_path_free(iucv->path);
 528                iucv->path = NULL;
 529        }
 530
 531done:
 532        release_sock(sk);
 533        return err;
 534}
 535
 536/* Move a socket into listening state. */
 537static int iucv_sock_listen(struct socket *sock, int backlog)
 538{
 539        struct sock *sk = sock->sk;
 540        int err;
 541
 542        lock_sock(sk);
 543
 544        err = -EINVAL;
 545        if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
 546                goto done;
 547
 548        sk->sk_max_ack_backlog = backlog;
 549        sk->sk_ack_backlog = 0;
 550        sk->sk_state = IUCV_LISTEN;
 551        err = 0;
 552
 553done:
 554        release_sock(sk);
 555        return err;
 556}
 557
 558/* Accept a pending connection */
 559static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 560                            int flags)
 561{
 562        DECLARE_WAITQUEUE(wait, current);
 563        struct sock *sk = sock->sk, *nsk;
 564        long timeo;
 565        int err = 0;
 566
 567        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 568
 569        if (sk->sk_state != IUCV_LISTEN) {
 570                err = -EBADFD;
 571                goto done;
 572        }
 573
 574        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 575
 576        /* Wait for an incoming connection */
 577        add_wait_queue_exclusive(sk->sk_sleep, &wait);
 578        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 579                set_current_state(TASK_INTERRUPTIBLE);
 580                if (!timeo) {
 581                        err = -EAGAIN;
 582                        break;
 583                }
 584
 585                release_sock(sk);
 586                timeo = schedule_timeout(timeo);
 587                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 588
 589                if (sk->sk_state != IUCV_LISTEN) {
 590                        err = -EBADFD;
 591                        break;
 592                }
 593
 594                if (signal_pending(current)) {
 595                        err = sock_intr_errno(timeo);
 596                        break;
 597                }
 598        }
 599
 600        set_current_state(TASK_RUNNING);
 601        remove_wait_queue(sk->sk_sleep, &wait);
 602
 603        if (err)
 604                goto done;
 605
 606        newsock->state = SS_CONNECTED;
 607
 608done:
 609        release_sock(sk);
 610        return err;
 611}
 612
 613static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 614                             int *len, int peer)
 615{
 616        struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
 617        struct sock *sk = sock->sk;
 618
 619        addr->sa_family = AF_IUCV;
 620        *len = sizeof(struct sockaddr_iucv);
 621
 622        if (peer) {
 623                memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
 624                memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
 625        } else {
 626                memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
 627                memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
 628        }
 629        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
 630        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
 631        memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
 632
 633        return 0;
 634}
 635
 636static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 637                             struct msghdr *msg, size_t len)
 638{
 639        struct sock *sk = sock->sk;
 640        struct iucv_sock *iucv = iucv_sk(sk);
 641        struct sk_buff *skb;
 642        struct iucv_message txmsg;
 643        char user_id[9];
 644        char appl_id[9];
 645        int err;
 646
 647        err = sock_error(sk);
 648        if (err)
 649                return err;
 650
 651        if (msg->msg_flags & MSG_OOB)
 652                return -EOPNOTSUPP;
 653
 654        lock_sock(sk);
 655
 656        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 657                err = -EPIPE;
 658                goto out;
 659        }
 660
 661        if (sk->sk_state == IUCV_CONNECTED) {
 662                if (!(skb = sock_alloc_send_skb(sk, len,
 663                                                msg->msg_flags & MSG_DONTWAIT,
 664                                                &err)))
 665                        goto out;
 666
 667                if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
 668                        err = -EFAULT;
 669                        goto fail;
 670                }
 671
 672                txmsg.class = 0;
 673                memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
 674                txmsg.tag = iucv->send_tag++;
 675                memcpy(skb->cb, &txmsg.tag, 4);
 676                skb_queue_tail(&iucv->send_skb_q, skb);
 677                err = iucv_message_send(iucv->path, &txmsg, 0, 0,
 678                                        (void *) skb->data, skb->len);
 679                if (err) {
 680                        if (err == 3) {
 681                                user_id[8] = 0;
 682                                memcpy(user_id, iucv->dst_user_id, 8);
 683                                appl_id[8] = 0;
 684                                memcpy(appl_id, iucv->dst_name, 8);
 685                                pr_err("Application %s on z/VM guest %s"
 686                                       " exceeds message limit\n",
 687                                       user_id, appl_id);
 688                        }
 689                        skb_unlink(skb, &iucv->send_skb_q);
 690                        err = -EPIPE;
 691                        goto fail;
 692                }
 693
 694        } else {
 695                err = -ENOTCONN;
 696                goto out;
 697        }
 698
 699        release_sock(sk);
 700        return len;
 701
 702fail:
 703        kfree_skb(skb);
 704out:
 705        release_sock(sk);
 706        return err;
 707}
 708
 709static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
 710{
 711        int dataleft, size, copied = 0;
 712        struct sk_buff *nskb;
 713
 714        dataleft = len;
 715        while (dataleft) {
 716                if (dataleft >= sk->sk_rcvbuf / 4)
 717                        size = sk->sk_rcvbuf / 4;
 718                else
 719                        size = dataleft;
 720
 721                nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
 722                if (!nskb)
 723                        return -ENOMEM;
 724
 725                memcpy(nskb->data, skb->data + copied, size);
 726                copied += size;
 727                dataleft -= size;
 728
 729                skb_reset_transport_header(nskb);
 730                skb_reset_network_header(nskb);
 731                nskb->len = size;
 732
 733                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
 734        }
 735
 736        return 0;
 737}
 738
 739static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
 740                                 struct iucv_path *path,
 741                                 struct iucv_message *msg)
 742{
 743        int rc;
 744
 745        if (msg->flags & IPRMDATA) {
 746                skb->data = NULL;
 747                skb->len = 0;
 748        } else {
 749                rc = iucv_message_receive(path, msg, 0, skb->data,
 750                                          msg->length, NULL);
 751                if (rc) {
 752                        kfree_skb(skb);
 753                        return;
 754                }
 755                if (skb->truesize >= sk->sk_rcvbuf / 4) {
 756                        rc = iucv_fragment_skb(sk, skb, msg->length);
 757                        kfree_skb(skb);
 758                        skb = NULL;
 759                        if (rc) {
 760                                iucv_path_sever(path, NULL);
 761                                return;
 762                        }
 763                        skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
 764                } else {
 765                        skb_reset_transport_header(skb);
 766                        skb_reset_network_header(skb);
 767                        skb->len = msg->length;
 768                }
 769        }
 770
 771        if (sock_queue_rcv_skb(sk, skb))
 772                skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
 773}
 774
 775static void iucv_process_message_q(struct sock *sk)
 776{
 777        struct iucv_sock *iucv = iucv_sk(sk);
 778        struct sk_buff *skb;
 779        struct sock_msg_q *p, *n;
 780
 781        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
 782                skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
 783                if (!skb)
 784                        break;
 785                iucv_process_message(sk, skb, p->path, &p->msg);
 786                list_del(&p->list);
 787                kfree(p);
 788                if (!skb_queue_empty(&iucv->backlog_skb_q))
 789                        break;
 790        }
 791}
 792
 793static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 794                             struct msghdr *msg, size_t len, int flags)
 795{
 796        int noblock = flags & MSG_DONTWAIT;
 797        struct sock *sk = sock->sk;
 798        struct iucv_sock *iucv = iucv_sk(sk);
 799        int target, copied = 0;
 800        struct sk_buff *skb, *rskb, *cskb;
 801        int err = 0;
 802
 803        if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
 804            skb_queue_empty(&iucv->backlog_skb_q) &&
 805            skb_queue_empty(&sk->sk_receive_queue) &&
 806            list_empty(&iucv->message_q.list))
 807                return 0;
 808
 809        if (flags & (MSG_OOB))
 810                return -EOPNOTSUPP;
 811
 812        target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
 813
 814        skb = skb_recv_datagram(sk, flags, noblock, &err);
 815        if (!skb) {
 816                if (sk->sk_shutdown & RCV_SHUTDOWN)
 817                        return 0;
 818                return err;
 819        }
 820
 821        copied = min_t(unsigned int, skb->len, len);
 822
 823        cskb = skb;
 824        if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
 825                skb_queue_head(&sk->sk_receive_queue, skb);
 826                if (copied == 0)
 827                        return -EFAULT;
 828                goto done;
 829        }
 830
 831        len -= copied;
 832
 833        /* Mark read part of skb as used */
 834        if (!(flags & MSG_PEEK)) {
 835                skb_pull(skb, copied);
 836
 837                if (skb->len) {
 838                        skb_queue_head(&sk->sk_receive_queue, skb);
 839                        goto done;
 840                }
 841
 842                kfree_skb(skb);
 843
 844                /* Queue backlog skbs */
 845                rskb = skb_dequeue(&iucv->backlog_skb_q);
 846                while (rskb) {
 847                        if (sock_queue_rcv_skb(sk, rskb)) {
 848                                skb_queue_head(&iucv->backlog_skb_q,
 849                                                rskb);
 850                                break;
 851                        } else {
 852                                rskb = skb_dequeue(&iucv->backlog_skb_q);
 853                        }
 854                }
 855                if (skb_queue_empty(&iucv->backlog_skb_q)) {
 856                        spin_lock_bh(&iucv->message_q.lock);
 857                        if (!list_empty(&iucv->message_q.list))
 858                                iucv_process_message_q(sk);
 859                        spin_unlock_bh(&iucv->message_q.lock);
 860                }
 861
 862        } else
 863                skb_queue_head(&sk->sk_receive_queue, skb);
 864
 865done:
 866        return err ? : copied;
 867}
 868
 869static inline unsigned int iucv_accept_poll(struct sock *parent)
 870{
 871        struct iucv_sock *isk, *n;
 872        struct sock *sk;
 873
 874        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 875                sk = (struct sock *) isk;
 876
 877                if (sk->sk_state == IUCV_CONNECTED)
 878                        return POLLIN | POLLRDNORM;
 879        }
 880
 881        return 0;
 882}
 883
 884unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
 885                            poll_table *wait)
 886{
 887        struct sock *sk = sock->sk;
 888        unsigned int mask = 0;
 889
 890        poll_wait(file, sk->sk_sleep, wait);
 891
 892        if (sk->sk_state == IUCV_LISTEN)
 893                return iucv_accept_poll(sk);
 894
 895        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 896                mask |= POLLERR;
 897
 898        if (sk->sk_shutdown & RCV_SHUTDOWN)
 899                mask |= POLLRDHUP;
 900
 901        if (sk->sk_shutdown == SHUTDOWN_MASK)
 902                mask |= POLLHUP;
 903
 904        if (!skb_queue_empty(&sk->sk_receive_queue) ||
 905            (sk->sk_shutdown & RCV_SHUTDOWN))
 906                mask |= POLLIN | POLLRDNORM;
 907
 908        if (sk->sk_state == IUCV_CLOSED)
 909                mask |= POLLHUP;
 910
 911        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
 912                mask |= POLLIN;
 913
 914        if (sock_writeable(sk))
 915                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 916        else
 917                set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 918
 919        return mask;
 920}
 921
 922static int iucv_sock_shutdown(struct socket *sock, int how)
 923{
 924        struct sock *sk = sock->sk;
 925        struct iucv_sock *iucv = iucv_sk(sk);
 926        struct iucv_message txmsg;
 927        int err = 0;
 928        u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
 929
 930        how++;
 931
 932        if ((how & ~SHUTDOWN_MASK) || !how)
 933                return -EINVAL;
 934
 935        lock_sock(sk);
 936        switch (sk->sk_state) {
 937        case IUCV_CLOSED:
 938                err = -ENOTCONN;
 939                goto fail;
 940
 941        default:
 942                sk->sk_shutdown |= how;
 943                break;
 944        }
 945
 946        if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
 947                txmsg.class = 0;
 948                txmsg.tag = 0;
 949                err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
 950                                        (void *) prmmsg, 8);
 951                if (err) {
 952                        switch (err) {
 953                        case 1:
 954                                err = -ENOTCONN;
 955                                break;
 956                        case 2:
 957                                err = -ECONNRESET;
 958                                break;
 959                        default:
 960                                err = -ENOTCONN;
 961                                break;
 962                        }
 963                }
 964        }
 965
 966        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
 967                err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
 968                if (err)
 969                        err = -ENOTCONN;
 970
 971                skb_queue_purge(&sk->sk_receive_queue);
 972        }
 973
 974        /* Wake up anyone sleeping in poll */
 975        sk->sk_state_change(sk);
 976
 977fail:
 978        release_sock(sk);
 979        return err;
 980}
 981
 982static int iucv_sock_release(struct socket *sock)
 983{
 984        struct sock *sk = sock->sk;
 985        int err = 0;
 986
 987        if (!sk)
 988                return 0;
 989
 990        iucv_sock_close(sk);
 991
 992        /* Unregister with IUCV base support */
 993        if (iucv_sk(sk)->path) {
 994                iucv_path_sever(iucv_sk(sk)->path, NULL);
 995                iucv_path_free(iucv_sk(sk)->path);
 996                iucv_sk(sk)->path = NULL;
 997        }
 998
 999        sock_orphan(sk);
1000        iucv_sock_kill(sk);
1001        return err;
1002}
1003
1004/* Callback wrappers - called from iucv base support */
1005static int iucv_callback_connreq(struct iucv_path *path,
1006                                 u8 ipvmid[8], u8 ipuser[16])
1007{
1008        unsigned char user_data[16];
1009        unsigned char nuser_data[16];
1010        unsigned char src_name[8];
1011        struct hlist_node *node;
1012        struct sock *sk, *nsk;
1013        struct iucv_sock *iucv, *niucv;
1014        int err;
1015
1016        memcpy(src_name, ipuser, 8);
1017        EBCASC(src_name, 8);
1018        /* Find out if this path belongs to af_iucv. */
1019        read_lock(&iucv_sk_list.lock);
1020        iucv = NULL;
1021        sk = NULL;
1022        sk_for_each(sk, node, &iucv_sk_list.head)
1023                if (sk->sk_state == IUCV_LISTEN &&
1024                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1025                        /*
1026                         * Found a listening socket with
1027                         * src_name == ipuser[0-7].
1028                         */
1029                        iucv = iucv_sk(sk);
1030                        break;
1031                }
1032        read_unlock(&iucv_sk_list.lock);
1033        if (!iucv)
1034                /* No socket found, not one of our paths. */
1035                return -EINVAL;
1036
1037        bh_lock_sock(sk);
1038
1039        /* Check if parent socket is listening */
1040        low_nmcpy(user_data, iucv->src_name);
1041        high_nmcpy(user_data, iucv->dst_name);
1042        ASCEBC(user_data, sizeof(user_data));
1043        if (sk->sk_state != IUCV_LISTEN) {
1044                err = iucv_path_sever(path, user_data);
1045                iucv_path_free(path);
1046                goto fail;
1047        }
1048
1049        /* Check for backlog size */
1050        if (sk_acceptq_is_full(sk)) {
1051                err = iucv_path_sever(path, user_data);
1052                iucv_path_free(path);
1053                goto fail;
1054        }
1055
1056        /* Create the new socket */
1057        nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1058        if (!nsk) {
1059                err = iucv_path_sever(path, user_data);
1060                iucv_path_free(path);
1061                goto fail;
1062        }
1063
1064        niucv = iucv_sk(nsk);
1065        iucv_sock_init(nsk, sk);
1066
1067        /* Set the new iucv_sock */
1068        memcpy(niucv->dst_name, ipuser + 8, 8);
1069        EBCASC(niucv->dst_name, 8);
1070        memcpy(niucv->dst_user_id, ipvmid, 8);
1071        memcpy(niucv->src_name, iucv->src_name, 8);
1072        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1073        niucv->path = path;
1074
1075        /* Call iucv_accept */
1076        high_nmcpy(nuser_data, ipuser + 8);
1077        memcpy(nuser_data + 8, niucv->src_name, 8);
1078        ASCEBC(nuser_data + 8, 8);
1079
1080        path->msglim = IUCV_QUEUELEN_DEFAULT;
1081        err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1082        if (err) {
1083                err = iucv_path_sever(path, user_data);
1084                iucv_path_free(path);
1085                iucv_sock_kill(nsk);
1086                goto fail;
1087        }
1088
1089        iucv_accept_enqueue(sk, nsk);
1090
1091        /* Wake up accept */
1092        nsk->sk_state = IUCV_CONNECTED;
1093        sk->sk_data_ready(sk, 1);
1094        err = 0;
1095fail:
1096        bh_unlock_sock(sk);
1097        return 0;
1098}
1099
1100static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1101{
1102        struct sock *sk = path->private;
1103
1104        sk->sk_state = IUCV_CONNECTED;
1105        sk->sk_state_change(sk);
1106}
1107
1108static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1109{
1110        struct sock *sk = path->private;
1111        struct iucv_sock *iucv = iucv_sk(sk);
1112        struct sk_buff *skb;
1113        struct sock_msg_q *save_msg;
1114        int len;
1115
1116        if (sk->sk_shutdown & RCV_SHUTDOWN)
1117                return;
1118
1119        if (!list_empty(&iucv->message_q.list) ||
1120            !skb_queue_empty(&iucv->backlog_skb_q))
1121                goto save_message;
1122
1123        len = atomic_read(&sk->sk_rmem_alloc);
1124        len += msg->length + sizeof(struct sk_buff);
1125        if (len > sk->sk_rcvbuf)
1126                goto save_message;
1127
1128        skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1129        if (!skb)
1130                goto save_message;
1131
1132        spin_lock(&iucv->message_q.lock);
1133        iucv_process_message(sk, skb, path, msg);
1134        spin_unlock(&iucv->message_q.lock);
1135
1136        return;
1137
1138save_message:
1139        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1140        if (!save_msg)
1141                return;
1142        save_msg->path = path;
1143        save_msg->msg = *msg;
1144
1145        spin_lock(&iucv->message_q.lock);
1146        list_add_tail(&save_msg->list, &iucv->message_q.list);
1147        spin_unlock(&iucv->message_q.lock);
1148}
1149
1150static void iucv_callback_txdone(struct iucv_path *path,
1151                                 struct iucv_message *msg)
1152{
1153        struct sock *sk = path->private;
1154        struct sk_buff *this = NULL;
1155        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1156        struct sk_buff *list_skb = list->next;
1157        unsigned long flags;
1158
1159        if (!skb_queue_empty(list)) {
1160                spin_lock_irqsave(&list->lock, flags);
1161
1162                while (list_skb != (struct sk_buff *)list) {
1163                        if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1164                                this = list_skb;
1165                                break;
1166                        }
1167                        list_skb = list_skb->next;
1168                }
1169                if (this)
1170                        __skb_unlink(this, list);
1171
1172                spin_unlock_irqrestore(&list->lock, flags);
1173
1174                if (this)
1175                        kfree_skb(this);
1176        }
1177        BUG_ON(!this);
1178
1179        if (sk->sk_state == IUCV_CLOSING) {
1180                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1181                        sk->sk_state = IUCV_CLOSED;
1182                        sk->sk_state_change(sk);
1183                }
1184        }
1185
1186}
1187
1188static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1189{
1190        struct sock *sk = path->private;
1191
1192        if (!list_empty(&iucv_sk(sk)->accept_q))
1193                sk->sk_state = IUCV_SEVERED;
1194        else
1195                sk->sk_state = IUCV_DISCONN;
1196
1197        sk->sk_state_change(sk);
1198}
1199
1200static struct proto_ops iucv_sock_ops = {
1201        .family         = PF_IUCV,
1202        .owner          = THIS_MODULE,
1203        .release        = iucv_sock_release,
1204        .bind           = iucv_sock_bind,
1205        .connect        = iucv_sock_connect,
1206        .listen         = iucv_sock_listen,
1207        .accept         = iucv_sock_accept,
1208        .getname        = iucv_sock_getname,
1209        .sendmsg        = iucv_sock_sendmsg,
1210        .recvmsg        = iucv_sock_recvmsg,
1211        .poll           = iucv_sock_poll,
1212        .ioctl          = sock_no_ioctl,
1213        .mmap           = sock_no_mmap,
1214        .socketpair     = sock_no_socketpair,
1215        .shutdown       = iucv_sock_shutdown,
1216        .setsockopt     = sock_no_setsockopt,
1217        .getsockopt     = sock_no_getsockopt
1218};
1219
1220static struct net_proto_family iucv_sock_family_ops = {
1221        .family = AF_IUCV,
1222        .owner  = THIS_MODULE,
1223        .create = iucv_sock_create,
1224};
1225
1226static int __init afiucv_init(void)
1227{
1228        int err;
1229
1230        if (!MACHINE_IS_VM) {
1231                pr_err("The af_iucv module cannot be loaded"
1232                       " without z/VM\n");
1233                err = -EPROTONOSUPPORT;
1234                goto out;
1235        }
1236        cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1237        if (unlikely(err)) {
1238                WARN_ON(err);
1239                err = -EPROTONOSUPPORT;
1240                goto out;
1241        }
1242
1243        err = iucv_register(&af_iucv_handler, 0);
1244        if (err)
1245                goto out;
1246        err = proto_register(&iucv_proto, 0);
1247        if (err)
1248                goto out_iucv;
1249        err = sock_register(&iucv_sock_family_ops);
1250        if (err)
1251                goto out_proto;
1252        return 0;
1253
1254out_proto:
1255        proto_unregister(&iucv_proto);
1256out_iucv:
1257        iucv_unregister(&af_iucv_handler, 0);
1258out:
1259        return err;
1260}
1261
1262static void __exit afiucv_exit(void)
1263{
1264        sock_unregister(PF_IUCV);
1265        proto_unregister(&iucv_proto);
1266        iucv_unregister(&af_iucv_handler, 0);
1267}
1268
1269module_init(afiucv_init);
1270module_exit(afiucv_exit);
1271
1272MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1273MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1274MODULE_VERSION(VERSION);
1275MODULE_LICENSE("GPL");
1276MODULE_ALIAS_NETPROTO(PF_IUCV);
1277