linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1/*
   2 *  linux/net/iucv/af_iucv.c
   3 *
   4 *  IUCV protocol stack for Linux on zSeries
   5 *
   6 *  Copyright 2006 IBM Corporation
   7 *
   8 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   9 */
  10
  11#define KMSG_COMPONENT "af_iucv"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/skbuff.h>
  22#include <linux/init.h>
  23#include <linux/poll.h>
  24#include <net/sock.h>
  25#include <asm/ebcdic.h>
  26#include <asm/cpcmd.h>
  27#include <linux/kmod.h>
  28
  29#include <net/iucv/iucv.h>
  30#include <net/iucv/af_iucv.h>
  31
  32#define CONFIG_IUCV_SOCK_DEBUG 1
  33
  34#define IPRMDATA 0x80
  35#define VERSION "1.0"
  36
  37static char iucv_userid[80];
  38
  39static struct proto_ops iucv_sock_ops;
  40
  41static struct proto iucv_proto = {
  42        .name           = "AF_IUCV",
  43        .owner          = THIS_MODULE,
  44        .obj_size       = sizeof(struct iucv_sock),
  45};
  46
  47static void iucv_sock_kill(struct sock *sk);
  48static void iucv_sock_close(struct sock *sk);
  49
  50/* Call Back functions */
  51static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  52static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  53static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  54static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
  55                                 u8 ipuser[16]);
  56static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  57
  58static struct iucv_sock_list iucv_sk_list = {
  59        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
  60        .autobind_name = ATOMIC_INIT(0)
  61};
  62
  63static struct iucv_handler af_iucv_handler = {
  64        .path_pending     = iucv_callback_connreq,
  65        .path_complete    = iucv_callback_connack,
  66        .path_severed     = iucv_callback_connrej,
  67        .message_pending  = iucv_callback_rx,
  68        .message_complete = iucv_callback_txdone
  69};
  70
  71static inline void high_nmcpy(unsigned char *dst, char *src)
  72{
  73       memcpy(dst, src, 8);
  74}
  75
  76static inline void low_nmcpy(unsigned char *dst, char *src)
  77{
  78       memcpy(&dst[8], src, 8);
  79}
  80
  81/* Timers */
  82static void iucv_sock_timeout(unsigned long arg)
  83{
  84        struct sock *sk = (struct sock *)arg;
  85
  86        bh_lock_sock(sk);
  87        sk->sk_err = ETIMEDOUT;
  88        sk->sk_state_change(sk);
  89        bh_unlock_sock(sk);
  90
  91        iucv_sock_kill(sk);
  92        sock_put(sk);
  93}
  94
  95static void iucv_sock_clear_timer(struct sock *sk)
  96{
  97        sk_stop_timer(sk, &sk->sk_timer);
  98}
  99
 100static struct sock *__iucv_get_sock_by_name(char *nm)
 101{
 102        struct sock *sk;
 103        struct hlist_node *node;
 104
 105        sk_for_each(sk, node, &iucv_sk_list.head)
 106                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 107                        return sk;
 108
 109        return NULL;
 110}
 111
 112static void iucv_sock_destruct(struct sock *sk)
 113{
 114        skb_queue_purge(&sk->sk_receive_queue);
 115        skb_queue_purge(&sk->sk_write_queue);
 116}
 117
 118/* Cleanup Listen */
 119static void iucv_sock_cleanup_listen(struct sock *parent)
 120{
 121        struct sock *sk;
 122
 123        /* Close non-accepted connections */
 124        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 125                iucv_sock_close(sk);
 126                iucv_sock_kill(sk);
 127        }
 128
 129        parent->sk_state = IUCV_CLOSED;
 130        sock_set_flag(parent, SOCK_ZAPPED);
 131}
 132
 133/* Kill socket */
 134static void iucv_sock_kill(struct sock *sk)
 135{
 136        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 137                return;
 138
 139        iucv_sock_unlink(&iucv_sk_list, sk);
 140        sock_set_flag(sk, SOCK_DEAD);
 141        sock_put(sk);
 142}
 143
 144/* Close an IUCV socket */
 145static void iucv_sock_close(struct sock *sk)
 146{
 147        unsigned char user_data[16];
 148        struct iucv_sock *iucv = iucv_sk(sk);
 149        int err;
 150        unsigned long timeo;
 151
 152        iucv_sock_clear_timer(sk);
 153        lock_sock(sk);
 154
 155        switch (sk->sk_state) {
 156        case IUCV_LISTEN:
 157                iucv_sock_cleanup_listen(sk);
 158                break;
 159
 160        case IUCV_CONNECTED:
 161        case IUCV_DISCONN:
 162                err = 0;
 163
 164                sk->sk_state = IUCV_CLOSING;
 165                sk->sk_state_change(sk);
 166
 167                if (!skb_queue_empty(&iucv->send_skb_q)) {
 168                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 169                                timeo = sk->sk_lingertime;
 170                        else
 171                                timeo = IUCV_DISCONN_TIMEOUT;
 172                        err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
 173                }
 174
 175        case IUCV_CLOSING:   /* fall through */
 176                sk->sk_state = IUCV_CLOSED;
 177                sk->sk_state_change(sk);
 178
 179                if (iucv->path) {
 180                        low_nmcpy(user_data, iucv->src_name);
 181                        high_nmcpy(user_data, iucv->dst_name);
 182                        ASCEBC(user_data, sizeof(user_data));
 183                        err = iucv_path_sever(iucv->path, user_data);
 184                        iucv_path_free(iucv->path);
 185                        iucv->path = NULL;
 186                }
 187
 188                sk->sk_err = ECONNRESET;
 189                sk->sk_state_change(sk);
 190
 191                skb_queue_purge(&iucv->send_skb_q);
 192                skb_queue_purge(&iucv->backlog_skb_q);
 193
 194                sock_set_flag(sk, SOCK_ZAPPED);
 195                break;
 196
 197        default:
 198                sock_set_flag(sk, SOCK_ZAPPED);
 199                break;
 200        }
 201
 202        release_sock(sk);
 203        iucv_sock_kill(sk);
 204}
 205
 206static void iucv_sock_init(struct sock *sk, struct sock *parent)
 207{
 208        if (parent)
 209                sk->sk_type = parent->sk_type;
 210}
 211
 212static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
 213{
 214        struct sock *sk;
 215
 216        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
 217        if (!sk)
 218                return NULL;
 219
 220        sock_init_data(sock, sk);
 221        INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
 222        spin_lock_init(&iucv_sk(sk)->accept_q_lock);
 223        skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
 224        INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
 225        spin_lock_init(&iucv_sk(sk)->message_q.lock);
 226        skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
 227        iucv_sk(sk)->send_tag = 0;
 228        iucv_sk(sk)->path = NULL;
 229        memset(&iucv_sk(sk)->src_user_id , 0, 32);
 230
 231        sk->sk_destruct = iucv_sock_destruct;
 232        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 233        sk->sk_allocation = GFP_DMA;
 234
 235        sock_reset_flag(sk, SOCK_ZAPPED);
 236
 237        sk->sk_protocol = proto;
 238        sk->sk_state    = IUCV_OPEN;
 239
 240        setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
 241
 242        iucv_sock_link(&iucv_sk_list, sk);
 243        return sk;
 244}
 245
 246/* Create an IUCV socket */
 247static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
 248{
 249        struct sock *sk;
 250
 251        if (sock->type != SOCK_STREAM)
 252                return -ESOCKTNOSUPPORT;
 253
 254        sock->state = SS_UNCONNECTED;
 255        sock->ops = &iucv_sock_ops;
 256
 257        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
 258        if (!sk)
 259                return -ENOMEM;
 260
 261        iucv_sock_init(sk, NULL);
 262
 263        return 0;
 264}
 265
 266void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 267{
 268        write_lock_bh(&l->lock);
 269        sk_add_node(sk, &l->head);
 270        write_unlock_bh(&l->lock);
 271}
 272
 273void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 274{
 275        write_lock_bh(&l->lock);
 276        sk_del_node_init(sk);
 277        write_unlock_bh(&l->lock);
 278}
 279
 280void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 281{
 282        unsigned long flags;
 283        struct iucv_sock *par = iucv_sk(parent);
 284
 285        sock_hold(sk);
 286        spin_lock_irqsave(&par->accept_q_lock, flags);
 287        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 288        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 289        iucv_sk(sk)->parent = parent;
 290        parent->sk_ack_backlog++;
 291}
 292
 293void iucv_accept_unlink(struct sock *sk)
 294{
 295        unsigned long flags;
 296        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 297
 298        spin_lock_irqsave(&par->accept_q_lock, flags);
 299        list_del_init(&iucv_sk(sk)->accept_q);
 300        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 301        iucv_sk(sk)->parent->sk_ack_backlog--;
 302        iucv_sk(sk)->parent = NULL;
 303        sock_put(sk);
 304}
 305
 306struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
 307{
 308        struct iucv_sock *isk, *n;
 309        struct sock *sk;
 310
 311        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 312                sk = (struct sock *) isk;
 313                lock_sock(sk);
 314
 315                if (sk->sk_state == IUCV_CLOSED) {
 316                        iucv_accept_unlink(sk);
 317                        release_sock(sk);
 318                        continue;
 319                }
 320
 321                if (sk->sk_state == IUCV_CONNECTED ||
 322                    sk->sk_state == IUCV_SEVERED ||
 323                    !newsock) {
 324                        iucv_accept_unlink(sk);
 325                        if (newsock)
 326                                sock_graft(sk, newsock);
 327
 328                        if (sk->sk_state == IUCV_SEVERED)
 329                                sk->sk_state = IUCV_DISCONN;
 330
 331                        release_sock(sk);
 332                        return sk;
 333                }
 334
 335                release_sock(sk);
 336        }
 337        return NULL;
 338}
 339
 340int iucv_sock_wait_state(struct sock *sk, int state, int state2,
 341                         unsigned long timeo)
 342{
 343        DECLARE_WAITQUEUE(wait, current);
 344        int err = 0;
 345
 346        add_wait_queue(sk->sk_sleep, &wait);
 347        while (sk->sk_state != state && sk->sk_state != state2) {
 348                set_current_state(TASK_INTERRUPTIBLE);
 349
 350                if (!timeo) {
 351                        err = -EAGAIN;
 352                        break;
 353                }
 354
 355                if (signal_pending(current)) {
 356                        err = sock_intr_errno(timeo);
 357                        break;
 358                }
 359
 360                release_sock(sk);
 361                timeo = schedule_timeout(timeo);
 362                lock_sock(sk);
 363
 364                err = sock_error(sk);
 365                if (err)
 366                        break;
 367        }
 368        set_current_state(TASK_RUNNING);
 369        remove_wait_queue(sk->sk_sleep, &wait);
 370        return err;
 371}
 372
 373/* Bind an unbound socket */
 374static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 375                          int addr_len)
 376{
 377        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 378        struct sock *sk = sock->sk;
 379        struct iucv_sock *iucv;
 380        int err;
 381
 382        /* Verify the input sockaddr */
 383        if (!addr || addr->sa_family != AF_IUCV)
 384                return -EINVAL;
 385
 386        lock_sock(sk);
 387        if (sk->sk_state != IUCV_OPEN) {
 388                err = -EBADFD;
 389                goto done;
 390        }
 391
 392        write_lock_bh(&iucv_sk_list.lock);
 393
 394        iucv = iucv_sk(sk);
 395        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 396                err = -EADDRINUSE;
 397                goto done_unlock;
 398        }
 399        if (iucv->path) {
 400                err = 0;
 401                goto done_unlock;
 402        }
 403
 404        /* Bind the socket */
 405        memcpy(iucv->src_name, sa->siucv_name, 8);
 406
 407        /* Copy the user id */
 408        memcpy(iucv->src_user_id, iucv_userid, 8);
 409        sk->sk_state = IUCV_BOUND;
 410        err = 0;
 411
 412done_unlock:
 413        /* Release the socket list lock */
 414        write_unlock_bh(&iucv_sk_list.lock);
 415done:
 416        release_sock(sk);
 417        return err;
 418}
 419
 420/* Automatically bind an unbound socket */
 421static int iucv_sock_autobind(struct sock *sk)
 422{
 423        struct iucv_sock *iucv = iucv_sk(sk);
 424        char query_buffer[80];
 425        char name[12];
 426        int err = 0;
 427
 428        /* Set the userid and name */
 429        cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
 430        if (unlikely(err))
 431                return -EPROTO;
 432
 433        memcpy(iucv->src_user_id, query_buffer, 8);
 434
 435        write_lock_bh(&iucv_sk_list.lock);
 436
 437        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 438        while (__iucv_get_sock_by_name(name)) {
 439                sprintf(name, "%08x",
 440                        atomic_inc_return(&iucv_sk_list.autobind_name));
 441        }
 442
 443        write_unlock_bh(&iucv_sk_list.lock);
 444
 445        memcpy(&iucv->src_name, name, 8);
 446
 447        return err;
 448}
 449
 450/* Connect an unconnected socket */
 451static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 452                             int alen, int flags)
 453{
 454        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 455        struct sock *sk = sock->sk;
 456        struct iucv_sock *iucv;
 457        unsigned char user_data[16];
 458        int err;
 459
 460        if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
 461                return -EINVAL;
 462
 463        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 464                return -EBADFD;
 465
 466        if (sk->sk_type != SOCK_STREAM)
 467                return -EINVAL;
 468
 469        iucv = iucv_sk(sk);
 470
 471        if (sk->sk_state == IUCV_OPEN) {
 472                err = iucv_sock_autobind(sk);
 473                if (unlikely(err))
 474                        return err;
 475        }
 476
 477        lock_sock(sk);
 478
 479        /* Set the destination information */
 480        memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
 481        memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
 482
 483        high_nmcpy(user_data, sa->siucv_name);
 484        low_nmcpy(user_data, iucv_sk(sk)->src_name);
 485        ASCEBC(user_data, sizeof(user_data));
 486
 487        iucv = iucv_sk(sk);
 488        /* Create path. */
 489        iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
 490                                     IPRMDATA, GFP_KERNEL);
 491        if (!iucv->path) {
 492                err = -ENOMEM;
 493                goto done;
 494        }
 495        err = iucv_path_connect(iucv->path, &af_iucv_handler,
 496                                sa->siucv_user_id, NULL, user_data, sk);
 497        if (err) {
 498                iucv_path_free(iucv->path);
 499                iucv->path = NULL;
 500                switch (err) {
 501                case 0x0b:      /* Target communicator is not logged on */
 502                        err = -ENETUNREACH;
 503                        break;
 504                case 0x0d:      /* Max connections for this guest exceeded */
 505                case 0x0e:      /* Max connections for target guest exceeded */
 506                        err = -EAGAIN;
 507                        break;
 508                case 0x0f:      /* Missing IUCV authorization */
 509                        err = -EACCES;
 510                        break;
 511                default:
 512                        err = -ECONNREFUSED;
 513                        break;
 514                }
 515                goto done;
 516        }
 517
 518        if (sk->sk_state != IUCV_CONNECTED) {
 519                err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
 520                                sock_sndtimeo(sk, flags & O_NONBLOCK));
 521        }
 522
 523        if (sk->sk_state == IUCV_DISCONN) {
 524                release_sock(sk);
 525                return -ECONNREFUSED;
 526        }
 527
 528        if (err) {
 529                iucv_path_sever(iucv->path, NULL);
 530                iucv_path_free(iucv->path);
 531                iucv->path = NULL;
 532        }
 533
 534done:
 535        release_sock(sk);
 536        return err;
 537}
 538
 539/* Move a socket into listening state. */
 540static int iucv_sock_listen(struct socket *sock, int backlog)
 541{
 542        struct sock *sk = sock->sk;
 543        int err;
 544
 545        lock_sock(sk);
 546
 547        err = -EINVAL;
 548        if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
 549                goto done;
 550
 551        sk->sk_max_ack_backlog = backlog;
 552        sk->sk_ack_backlog = 0;
 553        sk->sk_state = IUCV_LISTEN;
 554        err = 0;
 555
 556done:
 557        release_sock(sk);
 558        return err;
 559}
 560
 561/* Accept a pending connection */
 562static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 563                            int flags)
 564{
 565        DECLARE_WAITQUEUE(wait, current);
 566        struct sock *sk = sock->sk, *nsk;
 567        long timeo;
 568        int err = 0;
 569
 570        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 571
 572        if (sk->sk_state != IUCV_LISTEN) {
 573                err = -EBADFD;
 574                goto done;
 575        }
 576
 577        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 578
 579        /* Wait for an incoming connection */
 580        add_wait_queue_exclusive(sk->sk_sleep, &wait);
 581        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 582                set_current_state(TASK_INTERRUPTIBLE);
 583                if (!timeo) {
 584                        err = -EAGAIN;
 585                        break;
 586                }
 587
 588                release_sock(sk);
 589                timeo = schedule_timeout(timeo);
 590                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 591
 592                if (sk->sk_state != IUCV_LISTEN) {
 593                        err = -EBADFD;
 594                        break;
 595                }
 596
 597                if (signal_pending(current)) {
 598                        err = sock_intr_errno(timeo);
 599                        break;
 600                }
 601        }
 602
 603        set_current_state(TASK_RUNNING);
 604        remove_wait_queue(sk->sk_sleep, &wait);
 605
 606        if (err)
 607                goto done;
 608
 609        newsock->state = SS_CONNECTED;
 610
 611done:
 612        release_sock(sk);
 613        return err;
 614}
 615
 616static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 617                             int *len, int peer)
 618{
 619        struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
 620        struct sock *sk = sock->sk;
 621
 622        addr->sa_family = AF_IUCV;
 623        *len = sizeof(struct sockaddr_iucv);
 624
 625        if (peer) {
 626                memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
 627                memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
 628        } else {
 629                memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
 630                memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
 631        }
 632        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
 633        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
 634        memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
 635
 636        return 0;
 637}
 638
 639static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 640                             struct msghdr *msg, size_t len)
 641{
 642        struct sock *sk = sock->sk;
 643        struct iucv_sock *iucv = iucv_sk(sk);
 644        struct sk_buff *skb;
 645        struct iucv_message txmsg;
 646        char user_id[9];
 647        char appl_id[9];
 648        int err;
 649
 650        err = sock_error(sk);
 651        if (err)
 652                return err;
 653
 654        if (msg->msg_flags & MSG_OOB)
 655                return -EOPNOTSUPP;
 656
 657        lock_sock(sk);
 658
 659        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 660                err = -EPIPE;
 661                goto out;
 662        }
 663
 664        if (sk->sk_state == IUCV_CONNECTED) {
 665                if (!(skb = sock_alloc_send_skb(sk, len,
 666                                                msg->msg_flags & MSG_DONTWAIT,
 667                                                &err)))
 668                        goto out;
 669
 670                if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
 671                        err = -EFAULT;
 672                        goto fail;
 673                }
 674
 675                txmsg.class = 0;
 676                memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
 677                txmsg.tag = iucv->send_tag++;
 678                memcpy(skb->cb, &txmsg.tag, 4);
 679                skb_queue_tail(&iucv->send_skb_q, skb);
 680                err = iucv_message_send(iucv->path, &txmsg, 0, 0,
 681                                        (void *) skb->data, skb->len);
 682                if (err) {
 683                        if (err == 3) {
 684                                user_id[8] = 0;
 685                                memcpy(user_id, iucv->dst_user_id, 8);
 686                                appl_id[8] = 0;
 687                                memcpy(appl_id, iucv->dst_name, 8);
 688                                pr_err("Application %s on z/VM guest %s"
 689                                       " exceeds message limit\n",
 690                                       user_id, appl_id);
 691                        }
 692                        skb_unlink(skb, &iucv->send_skb_q);
 693                        err = -EPIPE;
 694                        goto fail;
 695                }
 696
 697        } else {
 698                err = -ENOTCONN;
 699                goto out;
 700        }
 701
 702        release_sock(sk);
 703        return len;
 704
 705fail:
 706        kfree_skb(skb);
 707out:
 708        release_sock(sk);
 709        return err;
 710}
 711
 712static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
 713{
 714        int dataleft, size, copied = 0;
 715        struct sk_buff *nskb;
 716
 717        dataleft = len;
 718        while (dataleft) {
 719                if (dataleft >= sk->sk_rcvbuf / 4)
 720                        size = sk->sk_rcvbuf / 4;
 721                else
 722                        size = dataleft;
 723
 724                nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
 725                if (!nskb)
 726                        return -ENOMEM;
 727
 728                memcpy(nskb->data, skb->data + copied, size);
 729                copied += size;
 730                dataleft -= size;
 731
 732                skb_reset_transport_header(nskb);
 733                skb_reset_network_header(nskb);
 734                nskb->len = size;
 735
 736                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
 737        }
 738
 739        return 0;
 740}
 741
 742static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
 743                                 struct iucv_path *path,
 744                                 struct iucv_message *msg)
 745{
 746        int rc;
 747
 748        if (msg->flags & IPRMDATA) {
 749                skb->data = NULL;
 750                skb->len = 0;
 751        } else {
 752                rc = iucv_message_receive(path, msg, 0, skb->data,
 753                                          msg->length, NULL);
 754                if (rc) {
 755                        kfree_skb(skb);
 756                        return;
 757                }
 758                if (skb->truesize >= sk->sk_rcvbuf / 4) {
 759                        rc = iucv_fragment_skb(sk, skb, msg->length);
 760                        kfree_skb(skb);
 761                        skb = NULL;
 762                        if (rc) {
 763                                iucv_path_sever(path, NULL);
 764                                return;
 765                        }
 766                        skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
 767                } else {
 768                        skb_reset_transport_header(skb);
 769                        skb_reset_network_header(skb);
 770                        skb->len = msg->length;
 771                }
 772        }
 773
 774        if (sock_queue_rcv_skb(sk, skb))
 775                skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
 776}
 777
 778static void iucv_process_message_q(struct sock *sk)
 779{
 780        struct iucv_sock *iucv = iucv_sk(sk);
 781        struct sk_buff *skb;
 782        struct sock_msg_q *p, *n;
 783
 784        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
 785                skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
 786                if (!skb)
 787                        break;
 788                iucv_process_message(sk, skb, p->path, &p->msg);
 789                list_del(&p->list);
 790                kfree(p);
 791                if (!skb_queue_empty(&iucv->backlog_skb_q))
 792                        break;
 793        }
 794}
 795
 796static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 797                             struct msghdr *msg, size_t len, int flags)
 798{
 799        int noblock = flags & MSG_DONTWAIT;
 800        struct sock *sk = sock->sk;
 801        struct iucv_sock *iucv = iucv_sk(sk);
 802        int target, copied = 0;
 803        struct sk_buff *skb, *rskb, *cskb;
 804        int err = 0;
 805
 806        if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
 807            skb_queue_empty(&iucv->backlog_skb_q) &&
 808            skb_queue_empty(&sk->sk_receive_queue) &&
 809            list_empty(&iucv->message_q.list))
 810                return 0;
 811
 812        if (flags & (MSG_OOB))
 813                return -EOPNOTSUPP;
 814
 815        target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
 816
 817        /* receive/dequeue next skb:
 818         * the function understands MSG_PEEK and, thus, does not dequeue skb */
 819        skb = skb_recv_datagram(sk, flags, noblock, &err);
 820        if (!skb) {
 821                if (sk->sk_shutdown & RCV_SHUTDOWN)
 822                        return 0;
 823                return err;
 824        }
 825
 826        copied = min_t(unsigned int, skb->len, len);
 827
 828        cskb = skb;
 829        if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
 830                skb_queue_head(&sk->sk_receive_queue, skb);
 831                if (copied == 0)
 832                        return -EFAULT;
 833                goto done;
 834        }
 835
 836        len -= copied;
 837
 838        /* Mark read part of skb as used */
 839        if (!(flags & MSG_PEEK)) {
 840                skb_pull(skb, copied);
 841
 842                if (skb->len) {
 843                        skb_queue_head(&sk->sk_receive_queue, skb);
 844                        goto done;
 845                }
 846
 847                kfree_skb(skb);
 848
 849                /* Queue backlog skbs */
 850                rskb = skb_dequeue(&iucv->backlog_skb_q);
 851                while (rskb) {
 852                        if (sock_queue_rcv_skb(sk, rskb)) {
 853                                skb_queue_head(&iucv->backlog_skb_q,
 854                                                rskb);
 855                                break;
 856                        } else {
 857                                rskb = skb_dequeue(&iucv->backlog_skb_q);
 858                        }
 859                }
 860                if (skb_queue_empty(&iucv->backlog_skb_q)) {
 861                        spin_lock_bh(&iucv->message_q.lock);
 862                        if (!list_empty(&iucv->message_q.list))
 863                                iucv_process_message_q(sk);
 864                        spin_unlock_bh(&iucv->message_q.lock);
 865                }
 866        }
 867
 868done:
 869        return err ? : copied;
 870}
 871
 872static inline unsigned int iucv_accept_poll(struct sock *parent)
 873{
 874        struct iucv_sock *isk, *n;
 875        struct sock *sk;
 876
 877        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 878                sk = (struct sock *) isk;
 879
 880                if (sk->sk_state == IUCV_CONNECTED)
 881                        return POLLIN | POLLRDNORM;
 882        }
 883
 884        return 0;
 885}
 886
 887unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
 888                            poll_table *wait)
 889{
 890        struct sock *sk = sock->sk;
 891        unsigned int mask = 0;
 892
 893        poll_wait(file, sk->sk_sleep, wait);
 894
 895        if (sk->sk_state == IUCV_LISTEN)
 896                return iucv_accept_poll(sk);
 897
 898        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 899                mask |= POLLERR;
 900
 901        if (sk->sk_shutdown & RCV_SHUTDOWN)
 902                mask |= POLLRDHUP;
 903
 904        if (sk->sk_shutdown == SHUTDOWN_MASK)
 905                mask |= POLLHUP;
 906
 907        if (!skb_queue_empty(&sk->sk_receive_queue) ||
 908            (sk->sk_shutdown & RCV_SHUTDOWN))
 909                mask |= POLLIN | POLLRDNORM;
 910
 911        if (sk->sk_state == IUCV_CLOSED)
 912                mask |= POLLHUP;
 913
 914        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
 915                mask |= POLLIN;
 916
 917        if (sock_writeable(sk))
 918                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 919        else
 920                set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 921
 922        return mask;
 923}
 924
 925static int iucv_sock_shutdown(struct socket *sock, int how)
 926{
 927        struct sock *sk = sock->sk;
 928        struct iucv_sock *iucv = iucv_sk(sk);
 929        struct iucv_message txmsg;
 930        int err = 0;
 931        u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
 932
 933        how++;
 934
 935        if ((how & ~SHUTDOWN_MASK) || !how)
 936                return -EINVAL;
 937
 938        lock_sock(sk);
 939        switch (sk->sk_state) {
 940        case IUCV_DISCONN:
 941        case IUCV_CLOSING:
 942        case IUCV_SEVERED:
 943        case IUCV_CLOSED:
 944                err = -ENOTCONN;
 945                goto fail;
 946
 947        default:
 948                sk->sk_shutdown |= how;
 949                break;
 950        }
 951
 952        if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
 953                txmsg.class = 0;
 954                txmsg.tag = 0;
 955                err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
 956                                        (void *) prmmsg, 8);
 957                if (err) {
 958                        switch (err) {
 959                        case 1:
 960                                err = -ENOTCONN;
 961                                break;
 962                        case 2:
 963                                err = -ECONNRESET;
 964                                break;
 965                        default:
 966                                err = -ENOTCONN;
 967                                break;
 968                        }
 969                }
 970        }
 971
 972        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
 973                err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
 974                if (err)
 975                        err = -ENOTCONN;
 976
 977                skb_queue_purge(&sk->sk_receive_queue);
 978        }
 979
 980        /* Wake up anyone sleeping in poll */
 981        sk->sk_state_change(sk);
 982
 983fail:
 984        release_sock(sk);
 985        return err;
 986}
 987
 988static int iucv_sock_release(struct socket *sock)
 989{
 990        struct sock *sk = sock->sk;
 991        int err = 0;
 992
 993        if (!sk)
 994                return 0;
 995
 996        iucv_sock_close(sk);
 997
 998        /* Unregister with IUCV base support */
 999        if (iucv_sk(sk)->path) {
1000                iucv_path_sever(iucv_sk(sk)->path, NULL);
1001                iucv_path_free(iucv_sk(sk)->path);
1002                iucv_sk(sk)->path = NULL;
1003        }
1004
1005        sock_orphan(sk);
1006        iucv_sock_kill(sk);
1007        return err;
1008}
1009
1010/* Callback wrappers - called from iucv base support */
1011static int iucv_callback_connreq(struct iucv_path *path,
1012                                 u8 ipvmid[8], u8 ipuser[16])
1013{
1014        unsigned char user_data[16];
1015        unsigned char nuser_data[16];
1016        unsigned char src_name[8];
1017        struct hlist_node *node;
1018        struct sock *sk, *nsk;
1019        struct iucv_sock *iucv, *niucv;
1020        int err;
1021
1022        memcpy(src_name, ipuser, 8);
1023        EBCASC(src_name, 8);
1024        /* Find out if this path belongs to af_iucv. */
1025        read_lock(&iucv_sk_list.lock);
1026        iucv = NULL;
1027        sk = NULL;
1028        sk_for_each(sk, node, &iucv_sk_list.head)
1029                if (sk->sk_state == IUCV_LISTEN &&
1030                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1031                        /*
1032                         * Found a listening socket with
1033                         * src_name == ipuser[0-7].
1034                         */
1035                        iucv = iucv_sk(sk);
1036                        break;
1037                }
1038        read_unlock(&iucv_sk_list.lock);
1039        if (!iucv)
1040                /* No socket found, not one of our paths. */
1041                return -EINVAL;
1042
1043        bh_lock_sock(sk);
1044
1045        /* Check if parent socket is listening */
1046        low_nmcpy(user_data, iucv->src_name);
1047        high_nmcpy(user_data, iucv->dst_name);
1048        ASCEBC(user_data, sizeof(user_data));
1049        if (sk->sk_state != IUCV_LISTEN) {
1050                err = iucv_path_sever(path, user_data);
1051                iucv_path_free(path);
1052                goto fail;
1053        }
1054
1055        /* Check for backlog size */
1056        if (sk_acceptq_is_full(sk)) {
1057                err = iucv_path_sever(path, user_data);
1058                iucv_path_free(path);
1059                goto fail;
1060        }
1061
1062        /* Create the new socket */
1063        nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1064        if (!nsk) {
1065                err = iucv_path_sever(path, user_data);
1066                iucv_path_free(path);
1067                goto fail;
1068        }
1069
1070        niucv = iucv_sk(nsk);
1071        iucv_sock_init(nsk, sk);
1072
1073        /* Set the new iucv_sock */
1074        memcpy(niucv->dst_name, ipuser + 8, 8);
1075        EBCASC(niucv->dst_name, 8);
1076        memcpy(niucv->dst_user_id, ipvmid, 8);
1077        memcpy(niucv->src_name, iucv->src_name, 8);
1078        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1079        niucv->path = path;
1080
1081        /* Call iucv_accept */
1082        high_nmcpy(nuser_data, ipuser + 8);
1083        memcpy(nuser_data + 8, niucv->src_name, 8);
1084        ASCEBC(nuser_data + 8, 8);
1085
1086        path->msglim = IUCV_QUEUELEN_DEFAULT;
1087        err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1088        if (err) {
1089                err = iucv_path_sever(path, user_data);
1090                iucv_path_free(path);
1091                iucv_sock_kill(nsk);
1092                goto fail;
1093        }
1094
1095        iucv_accept_enqueue(sk, nsk);
1096
1097        /* Wake up accept */
1098        nsk->sk_state = IUCV_CONNECTED;
1099        sk->sk_data_ready(sk, 1);
1100        err = 0;
1101fail:
1102        bh_unlock_sock(sk);
1103        return 0;
1104}
1105
1106static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1107{
1108        struct sock *sk = path->private;
1109
1110        sk->sk_state = IUCV_CONNECTED;
1111        sk->sk_state_change(sk);
1112}
1113
1114static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1115{
1116        struct sock *sk = path->private;
1117        struct iucv_sock *iucv = iucv_sk(sk);
1118        struct sk_buff *skb;
1119        struct sock_msg_q *save_msg;
1120        int len;
1121
1122        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1123                iucv_message_reject(path, msg);
1124                return;
1125        }
1126
1127        spin_lock(&iucv->message_q.lock);
1128
1129        if (!list_empty(&iucv->message_q.list) ||
1130            !skb_queue_empty(&iucv->backlog_skb_q))
1131                goto save_message;
1132
1133        len = atomic_read(&sk->sk_rmem_alloc);
1134        len += msg->length + sizeof(struct sk_buff);
1135        if (len > sk->sk_rcvbuf)
1136                goto save_message;
1137
1138        skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1139        if (!skb)
1140                goto save_message;
1141
1142        iucv_process_message(sk, skb, path, msg);
1143        goto out_unlock;
1144
1145        return;
1146
1147save_message:
1148        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1149        if (!save_msg)
1150                return;
1151        save_msg->path = path;
1152        save_msg->msg = *msg;
1153
1154        list_add_tail(&save_msg->list, &iucv->message_q.list);
1155
1156out_unlock:
1157        spin_unlock(&iucv->message_q.lock);
1158}
1159
1160static void iucv_callback_txdone(struct iucv_path *path,
1161                                 struct iucv_message *msg)
1162{
1163        struct sock *sk = path->private;
1164        struct sk_buff *this = NULL;
1165        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1166        struct sk_buff *list_skb = list->next;
1167        unsigned long flags;
1168
1169        if (!skb_queue_empty(list)) {
1170                spin_lock_irqsave(&list->lock, flags);
1171
1172                while (list_skb != (struct sk_buff *)list) {
1173                        if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1174                                this = list_skb;
1175                                break;
1176                        }
1177                        list_skb = list_skb->next;
1178                }
1179                if (this)
1180                        __skb_unlink(this, list);
1181
1182                spin_unlock_irqrestore(&list->lock, flags);
1183
1184                kfree_skb(this);
1185        }
1186        BUG_ON(!this);
1187
1188        if (sk->sk_state == IUCV_CLOSING) {
1189                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1190                        sk->sk_state = IUCV_CLOSED;
1191                        sk->sk_state_change(sk);
1192                }
1193        }
1194
1195}
1196
1197static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1198{
1199        struct sock *sk = path->private;
1200
1201        if (!list_empty(&iucv_sk(sk)->accept_q))
1202                sk->sk_state = IUCV_SEVERED;
1203        else
1204                sk->sk_state = IUCV_DISCONN;
1205
1206        sk->sk_state_change(sk);
1207}
1208
1209static struct proto_ops iucv_sock_ops = {
1210        .family         = PF_IUCV,
1211        .owner          = THIS_MODULE,
1212        .release        = iucv_sock_release,
1213        .bind           = iucv_sock_bind,
1214        .connect        = iucv_sock_connect,
1215        .listen         = iucv_sock_listen,
1216        .accept         = iucv_sock_accept,
1217        .getname        = iucv_sock_getname,
1218        .sendmsg        = iucv_sock_sendmsg,
1219        .recvmsg        = iucv_sock_recvmsg,
1220        .poll           = iucv_sock_poll,
1221        .ioctl          = sock_no_ioctl,
1222        .mmap           = sock_no_mmap,
1223        .socketpair     = sock_no_socketpair,
1224        .shutdown       = iucv_sock_shutdown,
1225        .setsockopt     = sock_no_setsockopt,
1226        .getsockopt     = sock_no_getsockopt
1227};
1228
1229static struct net_proto_family iucv_sock_family_ops = {
1230        .family = AF_IUCV,
1231        .owner  = THIS_MODULE,
1232        .create = iucv_sock_create,
1233};
1234
1235static int __init afiucv_init(void)
1236{
1237        int err;
1238
1239        if (!MACHINE_IS_VM) {
1240                pr_err("The af_iucv module cannot be loaded"
1241                       " without z/VM\n");
1242                err = -EPROTONOSUPPORT;
1243                goto out;
1244        }
1245        cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1246        if (unlikely(err)) {
1247                WARN_ON(err);
1248                err = -EPROTONOSUPPORT;
1249                goto out;
1250        }
1251
1252        err = iucv_register(&af_iucv_handler, 0);
1253        if (err)
1254                goto out;
1255        err = proto_register(&iucv_proto, 0);
1256        if (err)
1257                goto out_iucv;
1258        err = sock_register(&iucv_sock_family_ops);
1259        if (err)
1260                goto out_proto;
1261        return 0;
1262
1263out_proto:
1264        proto_unregister(&iucv_proto);
1265out_iucv:
1266        iucv_unregister(&af_iucv_handler, 0);
1267out:
1268        return err;
1269}
1270
1271static void __exit afiucv_exit(void)
1272{
1273        sock_unregister(PF_IUCV);
1274        proto_unregister(&iucv_proto);
1275        iucv_unregister(&af_iucv_handler, 0);
1276}
1277
1278module_init(afiucv_init);
1279module_exit(afiucv_exit);
1280
1281MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1282MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1283MODULE_VERSION(VERSION);
1284MODULE_LICENSE("GPL");
1285MODULE_ALIAS_NETPROTO(PF_IUCV);
1286