linux/net/caif/caif_socket.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Author:      Sjur Brendeland sjur.brandeland@stericsson.com
   4 * License terms: GNU General Public License (GPL) version 2
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/init.h>
   9#include <linux/module.h>
  10#include <linux/sched.h>
  11#include <linux/spinlock.h>
  12#include <linux/mutex.h>
  13#include <linux/list.h>
  14#include <linux/wait.h>
  15#include <linux/poll.h>
  16#include <linux/tcp.h>
  17#include <linux/uaccess.h>
  18#include <linux/mutex.h>
  19#include <linux/debugfs.h>
  20#include <linux/caif/caif_socket.h>
  21#include <asm/atomic.h>
  22#include <net/sock.h>
  23#include <net/tcp_states.h>
  24#include <net/caif/caif_layer.h>
  25#include <net/caif/caif_dev.h>
  26#include <net/caif/cfpkt.h>
  27
  28MODULE_LICENSE("GPL");
  29MODULE_ALIAS_NETPROTO(AF_CAIF);
  30
  31#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
  32#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
  33
  34/*
  35 * CAIF state is re-using the TCP socket states.
  36 * caif_states stored in sk_state reflect the state as reported by
  37 * the CAIF stack, while sk_socket->state is the state of the socket.
  38 */
  39enum caif_states {
  40        CAIF_CONNECTED          = TCP_ESTABLISHED,
  41        CAIF_CONNECTING = TCP_SYN_SENT,
  42        CAIF_DISCONNECTED       = TCP_CLOSE
  43};
  44
  45#define TX_FLOW_ON_BIT  1
  46#define RX_FLOW_ON_BIT  2
  47
  48static struct dentry *debugfsdir;
  49
  50#ifdef CONFIG_DEBUG_FS
  51struct debug_fs_counter {
  52        atomic_t caif_nr_socks;
  53        atomic_t num_connect_req;
  54        atomic_t num_connect_resp;
  55        atomic_t num_connect_fail_resp;
  56        atomic_t num_disconnect;
  57        atomic_t num_remote_shutdown_ind;
  58        atomic_t num_tx_flow_off_ind;
  59        atomic_t num_tx_flow_on_ind;
  60        atomic_t num_rx_flow_off;
  61        atomic_t num_rx_flow_on;
  62};
  63static struct debug_fs_counter cnt;
  64#define dbfs_atomic_inc(v) atomic_inc(v)
  65#define dbfs_atomic_dec(v) atomic_dec(v)
  66#else
  67#define dbfs_atomic_inc(v)
  68#define dbfs_atomic_dec(v)
  69#endif
  70
  71struct caifsock {
  72        struct sock sk; /* must be first member */
  73        struct cflayer layer;
  74        char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
  75        u32 flow_state;
  76        struct caif_connect_request conn_req;
  77        struct mutex readlock;
  78        struct dentry *debugfs_socket_dir;
  79};
  80
  81static int rx_flow_is_on(struct caifsock *cf_sk)
  82{
  83        return test_bit(RX_FLOW_ON_BIT,
  84                        (void *) &cf_sk->flow_state);
  85}
  86
  87static int tx_flow_is_on(struct caifsock *cf_sk)
  88{
  89        return test_bit(TX_FLOW_ON_BIT,
  90                        (void *) &cf_sk->flow_state);
  91}
  92
  93static void set_rx_flow_off(struct caifsock *cf_sk)
  94{
  95         clear_bit(RX_FLOW_ON_BIT,
  96                 (void *) &cf_sk->flow_state);
  97}
  98
  99static void set_rx_flow_on(struct caifsock *cf_sk)
 100{
 101         set_bit(RX_FLOW_ON_BIT,
 102                        (void *) &cf_sk->flow_state);
 103}
 104
 105static void set_tx_flow_off(struct caifsock *cf_sk)
 106{
 107         clear_bit(TX_FLOW_ON_BIT,
 108                (void *) &cf_sk->flow_state);
 109}
 110
 111static void set_tx_flow_on(struct caifsock *cf_sk)
 112{
 113         set_bit(TX_FLOW_ON_BIT,
 114                (void *) &cf_sk->flow_state);
 115}
 116
 117static void caif_read_lock(struct sock *sk)
 118{
 119        struct caifsock *cf_sk;
 120        cf_sk = container_of(sk, struct caifsock, sk);
 121        mutex_lock(&cf_sk->readlock);
 122}
 123
 124static void caif_read_unlock(struct sock *sk)
 125{
 126        struct caifsock *cf_sk;
 127        cf_sk = container_of(sk, struct caifsock, sk);
 128        mutex_unlock(&cf_sk->readlock);
 129}
 130
 131static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
 132{
 133        /* A quarter of full buffer is used a low water mark */
 134        return cf_sk->sk.sk_rcvbuf / 4;
 135}
 136
 137static void caif_flow_ctrl(struct sock *sk, int mode)
 138{
 139        struct caifsock *cf_sk;
 140        cf_sk = container_of(sk, struct caifsock, sk);
 141        if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
 142                cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
 143}
 144
 145/*
 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
 147 * not dropped, but CAIF is sending flow off instead.
 148 */
 149static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 150{
 151        int err;
 152        int skb_len;
 153        unsigned long flags;
 154        struct sk_buff_head *list = &sk->sk_receive_queue;
 155        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 156
 157        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 158                (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
 159                trace_printk("CAIF: %s():"
 160                        " sending flow OFF (queue len = %d %d)\n",
 161                        __func__,
 162                        atomic_read(&cf_sk->sk.sk_rmem_alloc),
 163                        sk_rcvbuf_lowwater(cf_sk));
 164                set_rx_flow_off(cf_sk);
 165                dbfs_atomic_inc(&cnt.num_rx_flow_off);
 166                caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
 167        }
 168
 169        err = sk_filter(sk, skb);
 170        if (err)
 171                return err;
 172        if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
 173                set_rx_flow_off(cf_sk);
 174                trace_printk("CAIF: %s():"
 175                        " sending flow OFF due to rmem_schedule\n",
 176                        __func__);
 177                dbfs_atomic_inc(&cnt.num_rx_flow_off);
 178                caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
 179        }
 180        skb->dev = NULL;
 181        skb_set_owner_r(skb, sk);
 182        /* Cache the SKB length before we tack it onto the receive
 183         * queue. Once it is added it no longer belongs to us and
 184         * may be freed by other threads of control pulling packets
 185         * from the queue.
 186         */
 187        skb_len = skb->len;
 188        spin_lock_irqsave(&list->lock, flags);
 189        if (!sock_flag(sk, SOCK_DEAD))
 190                __skb_queue_tail(list, skb);
 191        spin_unlock_irqrestore(&list->lock, flags);
 192
 193        if (!sock_flag(sk, SOCK_DEAD))
 194                sk->sk_data_ready(sk, skb_len);
 195        else
 196                kfree_skb(skb);
 197        return 0;
 198}
 199
 200/* Packet Receive Callback function called from CAIF Stack */
 201static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
 202{
 203        struct caifsock *cf_sk;
 204        struct sk_buff *skb;
 205
 206        cf_sk = container_of(layr, struct caifsock, layer);
 207        skb = cfpkt_tonative(pkt);
 208
 209        if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
 210                cfpkt_destroy(pkt);
 211                return 0;
 212        }
 213        caif_queue_rcv_skb(&cf_sk->sk, skb);
 214        return 0;
 215}
 216
 217/* Packet Control Callback function called from CAIF */
 218static void caif_ctrl_cb(struct cflayer *layr,
 219                                enum caif_ctrlcmd flow,
 220                                int phyid)
 221{
 222        struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
 223        switch (flow) {
 224        case CAIF_CTRLCMD_FLOW_ON_IND:
 225                /* OK from modem to start sending again */
 226                dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
 227                set_tx_flow_on(cf_sk);
 228                cf_sk->sk.sk_state_change(&cf_sk->sk);
 229                break;
 230
 231        case CAIF_CTRLCMD_FLOW_OFF_IND:
 232                /* Modem asks us to shut up */
 233                dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
 234                set_tx_flow_off(cf_sk);
 235                cf_sk->sk.sk_state_change(&cf_sk->sk);
 236                break;
 237
 238        case CAIF_CTRLCMD_INIT_RSP:
 239                /* We're now connected */
 240                dbfs_atomic_inc(&cnt.num_connect_resp);
 241                cf_sk->sk.sk_state = CAIF_CONNECTED;
 242                set_tx_flow_on(cf_sk);
 243                cf_sk->sk.sk_state_change(&cf_sk->sk);
 244                break;
 245
 246        case CAIF_CTRLCMD_DEINIT_RSP:
 247                /* We're now disconnected */
 248                cf_sk->sk.sk_state = CAIF_DISCONNECTED;
 249                cf_sk->sk.sk_state_change(&cf_sk->sk);
 250                cfcnfg_release_adap_layer(&cf_sk->layer);
 251                break;
 252
 253        case CAIF_CTRLCMD_INIT_FAIL_RSP:
 254                /* Connect request failed */
 255                dbfs_atomic_inc(&cnt.num_connect_fail_resp);
 256                cf_sk->sk.sk_err = ECONNREFUSED;
 257                cf_sk->sk.sk_state = CAIF_DISCONNECTED;
 258                cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
 259                /*
 260                 * Socket "standards" seems to require POLLOUT to
 261                 * be set at connect failure.
 262                 */
 263                set_tx_flow_on(cf_sk);
 264                cf_sk->sk.sk_state_change(&cf_sk->sk);
 265                break;
 266
 267        case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
 268                /* Modem has closed this connection, or device is down. */
 269                dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
 270                cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
 271                cf_sk->sk.sk_err = ECONNRESET;
 272                set_rx_flow_on(cf_sk);
 273                cf_sk->sk.sk_error_report(&cf_sk->sk);
 274                break;
 275
 276        default:
 277                pr_debug("CAIF: %s(): Unexpected flow command %d\n",
 278                                __func__, flow);
 279        }
 280}
 281
 282static void caif_check_flow_release(struct sock *sk)
 283{
 284        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 285
 286        if (rx_flow_is_on(cf_sk))
 287                return;
 288
 289        if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
 290                        dbfs_atomic_inc(&cnt.num_rx_flow_on);
 291                        set_rx_flow_on(cf_sk);
 292                        caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
 293        }
 294}
 295
 296/*
 297 * Copied from unix_dgram_recvmsg, but removed credit checks,
 298 * changed locking, address handling and added MSG_TRUNC.
 299 */
 300static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
 301                                struct msghdr *m, size_t len, int flags)
 302
 303{
 304        struct sock *sk = sock->sk;
 305        struct sk_buff *skb;
 306        int ret;
 307        int copylen;
 308
 309        ret = -EOPNOTSUPP;
 310        if (m->msg_flags&MSG_OOB)
 311                goto read_error;
 312
 313        skb = skb_recv_datagram(sk, flags, 0 , &ret);
 314        if (!skb)
 315                goto read_error;
 316        copylen = skb->len;
 317        if (len < copylen) {
 318                m->msg_flags |= MSG_TRUNC;
 319                copylen = len;
 320        }
 321
 322        ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
 323        if (ret)
 324                goto out_free;
 325
 326        ret = (flags & MSG_TRUNC) ? skb->len : copylen;
 327out_free:
 328        skb_free_datagram(sk, skb);
 329        caif_check_flow_release(sk);
 330        return ret;
 331
 332read_error:
 333        return ret;
 334}
 335
 336
 337/* Copied from unix_stream_wait_data, identical except for lock call. */
 338static long caif_stream_data_wait(struct sock *sk, long timeo)
 339{
 340        DEFINE_WAIT(wait);
 341        lock_sock(sk);
 342
 343        for (;;) {
 344                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 345
 346                if (!skb_queue_empty(&sk->sk_receive_queue) ||
 347                        sk->sk_err ||
 348                        sk->sk_state != CAIF_CONNECTED ||
 349                        sock_flag(sk, SOCK_DEAD) ||
 350                        (sk->sk_shutdown & RCV_SHUTDOWN) ||
 351                        signal_pending(current) ||
 352                        !timeo)
 353                        break;
 354
 355                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 356                release_sock(sk);
 357                timeo = schedule_timeout(timeo);
 358                lock_sock(sk);
 359                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 360        }
 361
 362        finish_wait(sk_sleep(sk), &wait);
 363        release_sock(sk);
 364        return timeo;
 365}
 366
 367
 368/*
 369 * Copied from unix_stream_recvmsg, but removed credit checks,
 370 * changed locking calls, changed address handling.
 371 */
 372static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 373                                struct msghdr *msg, size_t size,
 374                                int flags)
 375{
 376        struct sock *sk = sock->sk;
 377        int copied = 0;
 378        int target;
 379        int err = 0;
 380        long timeo;
 381
 382        err = -EOPNOTSUPP;
 383        if (flags&MSG_OOB)
 384                goto out;
 385
 386        msg->msg_namelen = 0;
 387
 388        /*
 389         * Lock the socket to prevent queue disordering
 390         * while sleeps in memcpy_tomsg
 391         */
 392        err = -EAGAIN;
 393        if (sk->sk_state == CAIF_CONNECTING)
 394                goto out;
 395
 396        caif_read_lock(sk);
 397        target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
 398        timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
 399
 400        do {
 401                int chunk;
 402                struct sk_buff *skb;
 403
 404                lock_sock(sk);
 405                skb = skb_dequeue(&sk->sk_receive_queue);
 406                caif_check_flow_release(sk);
 407
 408                if (skb == NULL) {
 409                        if (copied >= target)
 410                                goto unlock;
 411                        /*
 412                         *      POSIX 1003.1g mandates this order.
 413                         */
 414                        err = sock_error(sk);
 415                        if (err)
 416                                goto unlock;
 417                        err = -ECONNRESET;
 418                        if (sk->sk_shutdown & RCV_SHUTDOWN)
 419                                goto unlock;
 420
 421                        err = -EPIPE;
 422                        if (sk->sk_state != CAIF_CONNECTED)
 423                                goto unlock;
 424                        if (sock_flag(sk, SOCK_DEAD))
 425                                goto unlock;
 426
 427                        release_sock(sk);
 428
 429                        err = -EAGAIN;
 430                        if (!timeo)
 431                                break;
 432
 433                        caif_read_unlock(sk);
 434
 435                        timeo = caif_stream_data_wait(sk, timeo);
 436
 437                        if (signal_pending(current)) {
 438                                err = sock_intr_errno(timeo);
 439                                goto out;
 440                        }
 441                        caif_read_lock(sk);
 442                        continue;
 443unlock:
 444                        release_sock(sk);
 445                        break;
 446                }
 447                release_sock(sk);
 448                chunk = min_t(unsigned int, skb->len, size);
 449                if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
 450                        skb_queue_head(&sk->sk_receive_queue, skb);
 451                        if (copied == 0)
 452                                copied = -EFAULT;
 453                        break;
 454                }
 455                copied += chunk;
 456                size -= chunk;
 457
 458                /* Mark read part of skb as used */
 459                if (!(flags & MSG_PEEK)) {
 460                        skb_pull(skb, chunk);
 461
 462                        /* put the skb back if we didn't use it up. */
 463                        if (skb->len) {
 464                                skb_queue_head(&sk->sk_receive_queue, skb);
 465                                break;
 466                        }
 467                        kfree_skb(skb);
 468
 469                } else {
 470                        /*
 471                         * It is questionable, see note in unix_dgram_recvmsg.
 472                         */
 473                        /* put message back and return */
 474                        skb_queue_head(&sk->sk_receive_queue, skb);
 475                        break;
 476                }
 477        } while (size);
 478        caif_read_unlock(sk);
 479
 480out:
 481        return copied ? : err;
 482}
 483
 484/*
 485 * Copied from sock.c:sock_wait_for_wmem, but change to wait for
 486 * CAIF flow-on and sock_writable.
 487 */
 488static long caif_wait_for_flow_on(struct caifsock *cf_sk,
 489                                int wait_writeable, long timeo, int *err)
 490{
 491        struct sock *sk = &cf_sk->sk;
 492        DEFINE_WAIT(wait);
 493        for (;;) {
 494                *err = 0;
 495                if (tx_flow_is_on(cf_sk) &&
 496                        (!wait_writeable || sock_writeable(&cf_sk->sk)))
 497                        break;
 498                *err = -ETIMEDOUT;
 499                if (!timeo)
 500                        break;
 501                *err = -ERESTARTSYS;
 502                if (signal_pending(current))
 503                        break;
 504                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 505                *err = -ECONNRESET;
 506                if (sk->sk_shutdown & SHUTDOWN_MASK)
 507                        break;
 508                *err = -sk->sk_err;
 509                if (sk->sk_err)
 510                        break;
 511                *err = -EPIPE;
 512                if (cf_sk->sk.sk_state != CAIF_CONNECTED)
 513                        break;
 514                timeo = schedule_timeout(timeo);
 515        }
 516        finish_wait(sk_sleep(sk), &wait);
 517        return timeo;
 518}
 519
 520/*
 521 * Transmit a SKB. The device may temporarily request re-transmission
 522 * by returning EAGAIN.
 523 */
 524static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
 525                        int noblock, long timeo)
 526{
 527        struct cfpkt *pkt;
 528        int ret, loopcnt = 0;
 529
 530        pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
 531        memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
 532        do {
 533
 534                ret = -ETIMEDOUT;
 535
 536                /* Slight paranoia, probably not needed. */
 537                if (unlikely(loopcnt++ > 1000)) {
 538                        pr_warning("CAIF: %s(): transmit retries failed,"
 539                                " error = %d\n", __func__, ret);
 540                        break;
 541                }
 542
 543                if (cf_sk->layer.dn != NULL)
 544                        ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
 545                if (likely(ret >= 0))
 546                        break;
 547                /* if transmit return -EAGAIN, then retry */
 548                if (noblock && ret == -EAGAIN)
 549                        break;
 550                timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
 551                if (signal_pending(current)) {
 552                        ret = sock_intr_errno(timeo);
 553                        break;
 554                }
 555                if (ret)
 556                        break;
 557                if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
 558                        sock_flag(&cf_sk->sk, SOCK_DEAD) ||
 559                        (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
 560                        ret = -EPIPE;
 561                        cf_sk->sk.sk_err = EPIPE;
 562                        break;
 563                }
 564        } while (ret == -EAGAIN);
 565        return ret;
 566}
 567
 568/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
 569static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
 570                        struct msghdr *msg, size_t len)
 571{
 572        struct sock *sk = sock->sk;
 573        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 574        int buffer_size;
 575        int ret = 0;
 576        struct sk_buff *skb = NULL;
 577        int noblock;
 578        long timeo;
 579        caif_assert(cf_sk);
 580        ret = sock_error(sk);
 581        if (ret)
 582                goto err;
 583
 584        ret = -EOPNOTSUPP;
 585        if (msg->msg_flags&MSG_OOB)
 586                goto err;
 587
 588        ret = -EOPNOTSUPP;
 589        if (msg->msg_namelen)
 590                goto err;
 591
 592        ret = -EINVAL;
 593        if (unlikely(msg->msg_iov->iov_base == NULL))
 594                goto err;
 595        noblock = msg->msg_flags & MSG_DONTWAIT;
 596
 597        buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
 598
 599        ret = -EMSGSIZE;
 600        if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
 601                goto err;
 602
 603        timeo = sock_sndtimeo(sk, noblock);
 604        timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
 605                                1, timeo, &ret);
 606
 607        ret = -EPIPE;
 608        if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
 609                sock_flag(sk, SOCK_DEAD) ||
 610                (sk->sk_shutdown & RCV_SHUTDOWN))
 611                goto err;
 612
 613        ret = -ENOMEM;
 614        skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
 615        if (!skb)
 616                goto err;
 617        skb_reserve(skb, CAIF_NEEDED_HEADROOM);
 618
 619        ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
 620
 621        if (ret)
 622                goto err;
 623        ret = transmit_skb(skb, cf_sk, noblock, timeo);
 624        if (ret < 0)
 625                goto err;
 626        return len;
 627err:
 628        kfree_skb(skb);
 629        return ret;
 630}
 631
 632/*
 633 * Copied from unix_stream_sendmsg and adapted to CAIF:
 634 * Changed removed permission handling and added waiting for flow on
 635 * and other minor adaptations.
 636 */
 637static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
 638                                struct msghdr *msg, size_t len)
 639{
 640        struct sock *sk = sock->sk;
 641        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 642        int err, size;
 643        struct sk_buff *skb;
 644        int sent = 0;
 645        long timeo;
 646
 647        err = -EOPNOTSUPP;
 648
 649        if (unlikely(msg->msg_flags&MSG_OOB))
 650                goto out_err;
 651
 652        if (unlikely(msg->msg_namelen))
 653                goto out_err;
 654
 655        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 656        timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
 657
 658        if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
 659                goto pipe_err;
 660
 661        while (sent < len) {
 662
 663                size = len-sent;
 664
 665                if (size > CAIF_MAX_PAYLOAD_SIZE)
 666                        size = CAIF_MAX_PAYLOAD_SIZE;
 667
 668                /* If size is more than half of sndbuf, chop up message */
 669                if (size > ((sk->sk_sndbuf >> 1) - 64))
 670                        size = (sk->sk_sndbuf >> 1) - 64;
 671
 672                if (size > SKB_MAX_ALLOC)
 673                        size = SKB_MAX_ALLOC;
 674
 675                skb = sock_alloc_send_skb(sk,
 676                                        size + CAIF_NEEDED_HEADROOM
 677                                        + CAIF_NEEDED_TAILROOM,
 678                                        msg->msg_flags&MSG_DONTWAIT,
 679                                        &err);
 680                if (skb == NULL)
 681                        goto out_err;
 682
 683                skb_reserve(skb, CAIF_NEEDED_HEADROOM);
 684                /*
 685                 *      If you pass two values to the sock_alloc_send_skb
 686                 *      it tries to grab the large buffer with GFP_NOFS
 687                 *      (which can fail easily), and if it fails grab the
 688                 *      fallback size buffer which is under a page and will
 689                 *      succeed. [Alan]
 690                 */
 691                size = min_t(int, size, skb_tailroom(skb));
 692
 693                err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
 694                if (err) {
 695                        kfree_skb(skb);
 696                        goto out_err;
 697                }
 698                err = transmit_skb(skb, cf_sk,
 699                                msg->msg_flags&MSG_DONTWAIT, timeo);
 700                if (err < 0) {
 701                        kfree_skb(skb);
 702                        goto pipe_err;
 703                }
 704                sent += size;
 705        }
 706
 707        return sent;
 708
 709pipe_err:
 710        if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
 711                send_sig(SIGPIPE, current, 0);
 712        err = -EPIPE;
 713out_err:
 714        return sent ? : err;
 715}
 716
 717static int setsockopt(struct socket *sock,
 718                        int lvl, int opt, char __user *ov, unsigned int ol)
 719{
 720        struct sock *sk = sock->sk;
 721        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 722        int prio, linksel;
 723        struct ifreq ifreq;
 724
 725        if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
 726                return -ENOPROTOOPT;
 727
 728        switch (opt) {
 729        case CAIFSO_LINK_SELECT:
 730                if (ol < sizeof(int))
 731                        return -EINVAL;
 732                if (lvl != SOL_CAIF)
 733                        goto bad_sol;
 734                if (copy_from_user(&linksel, ov, sizeof(int)))
 735                        return -EINVAL;
 736                lock_sock(&(cf_sk->sk));
 737                cf_sk->conn_req.link_selector = linksel;
 738                release_sock(&cf_sk->sk);
 739                return 0;
 740
 741        case SO_PRIORITY:
 742                if (lvl != SOL_SOCKET)
 743                        goto bad_sol;
 744                if (ol < sizeof(int))
 745                        return -EINVAL;
 746                if (copy_from_user(&prio, ov, sizeof(int)))
 747                        return -EINVAL;
 748                lock_sock(&(cf_sk->sk));
 749                cf_sk->conn_req.priority = prio;
 750                release_sock(&cf_sk->sk);
 751                return 0;
 752
 753        case SO_BINDTODEVICE:
 754                if (lvl != SOL_SOCKET)
 755                        goto bad_sol;
 756                if (ol < sizeof(struct ifreq))
 757                        return -EINVAL;
 758                if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
 759                        return -EFAULT;
 760                lock_sock(&(cf_sk->sk));
 761                strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
 762                        sizeof(cf_sk->conn_req.link_name));
 763                cf_sk->conn_req.link_name
 764                        [sizeof(cf_sk->conn_req.link_name)-1] = 0;
 765                release_sock(&cf_sk->sk);
 766                return 0;
 767
 768        case CAIFSO_REQ_PARAM:
 769                if (lvl != SOL_CAIF)
 770                        goto bad_sol;
 771                if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
 772                        return -ENOPROTOOPT;
 773                lock_sock(&(cf_sk->sk));
 774                cf_sk->conn_req.param.size = ol;
 775                if (ol > sizeof(cf_sk->conn_req.param.data) ||
 776                        copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
 777                        release_sock(&cf_sk->sk);
 778                        return -EINVAL;
 779                }
 780                release_sock(&cf_sk->sk);
 781                return 0;
 782
 783        default:
 784                return -ENOPROTOOPT;
 785        }
 786
 787        return 0;
 788bad_sol:
 789        return -ENOPROTOOPT;
 790
 791}
 792
 793/*
 794 * caif_connect() - Connect a CAIF Socket
 795 * Copied and modified af_irda.c:irda_connect().
 796 *
 797 * Note : by consulting "errno", the user space caller may learn the cause
 798 * of the failure. Most of them are visible in the function, others may come
 799 * from subroutines called and are listed here :
 800 *  o -EAFNOSUPPORT: bad socket family or type.
 801 *  o -ESOCKTNOSUPPORT: bad socket type or protocol
 802 *  o -EINVAL: bad socket address, or CAIF link type
 803 *  o -ECONNREFUSED: remote end refused the connection.
 804 *  o -EINPROGRESS: connect request sent but timed out (or non-blocking)
 805 *  o -EISCONN: already connected.
 806 *  o -ETIMEDOUT: Connection timed out (send timeout)
 807 *  o -ENODEV: No link layer to send request
 808 *  o -ECONNRESET: Received Shutdown indication or lost link layer
 809 *  o -ENOMEM: Out of memory
 810 *
 811 *  State Strategy:
 812 *  o sk_state: holds the CAIF_* protocol state, it's updated by
 813 *      caif_ctrl_cb.
 814 *  o sock->state: holds the SS_* socket state and is updated by connect and
 815 *      disconnect.
 816 */
 817static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
 818                        int addr_len, int flags)
 819{
 820        struct sock *sk = sock->sk;
 821        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 822        long timeo;
 823        int err;
 824        lock_sock(sk);
 825
 826        err = -EAFNOSUPPORT;
 827        if (uaddr->sa_family != AF_CAIF)
 828                goto out;
 829
 830        err = -ESOCKTNOSUPPORT;
 831        if (unlikely(!(sk->sk_type == SOCK_STREAM &&
 832                       cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
 833                       sk->sk_type != SOCK_SEQPACKET))
 834                goto out;
 835        switch (sock->state) {
 836        case SS_UNCONNECTED:
 837                /* Normal case, a fresh connect */
 838                caif_assert(sk->sk_state == CAIF_DISCONNECTED);
 839                break;
 840        case SS_CONNECTING:
 841                switch (sk->sk_state) {
 842                case CAIF_CONNECTED:
 843                        sock->state = SS_CONNECTED;
 844                        err = -EISCONN;
 845                        goto out;
 846                case CAIF_DISCONNECTED:
 847                        /* Reconnect allowed */
 848                        break;
 849                case CAIF_CONNECTING:
 850                        err = -EALREADY;
 851                        if (flags & O_NONBLOCK)
 852                                goto out;
 853                        goto wait_connect;
 854                }
 855                break;
 856        case SS_CONNECTED:
 857                caif_assert(sk->sk_state == CAIF_CONNECTED ||
 858                                sk->sk_state == CAIF_DISCONNECTED);
 859                if (sk->sk_shutdown & SHUTDOWN_MASK) {
 860                        /* Allow re-connect after SHUTDOWN_IND */
 861                        caif_disconnect_client(&cf_sk->layer);
 862                        break;
 863                }
 864                /* No reconnect on a seqpacket socket */
 865                err = -EISCONN;
 866                goto out;
 867        case SS_DISCONNECTING:
 868        case SS_FREE:
 869                caif_assert(1); /*Should never happen */
 870                break;
 871        }
 872        sk->sk_state = CAIF_DISCONNECTED;
 873        sock->state = SS_UNCONNECTED;
 874        sk_stream_kill_queues(&cf_sk->sk);
 875
 876        err = -EINVAL;
 877        if (addr_len != sizeof(struct sockaddr_caif) ||
 878                !uaddr)
 879                goto out;
 880
 881        memcpy(&cf_sk->conn_req.sockaddr, uaddr,
 882                sizeof(struct sockaddr_caif));
 883
 884        /* Move to connecting socket, start sending Connect Requests */
 885        sock->state = SS_CONNECTING;
 886        sk->sk_state = CAIF_CONNECTING;
 887
 888        dbfs_atomic_inc(&cnt.num_connect_req);
 889        cf_sk->layer.receive = caif_sktrecv_cb;
 890        err = caif_connect_client(&cf_sk->conn_req,
 891                                &cf_sk->layer);
 892        if (err < 0) {
 893                cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
 894                cf_sk->sk.sk_state = CAIF_DISCONNECTED;
 895                goto out;
 896        }
 897
 898        err = -EINPROGRESS;
 899wait_connect:
 900
 901        if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
 902                goto out;
 903
 904        timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
 905
 906        release_sock(sk);
 907        err = -ERESTARTSYS;
 908        timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
 909                        sk->sk_state != CAIF_CONNECTING,
 910                        timeo);
 911        lock_sock(sk);
 912        if (timeo < 0)
 913                goto out; /* -ERESTARTSYS */
 914
 915        err = -ETIMEDOUT;
 916        if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
 917                goto out;
 918        if (sk->sk_state != CAIF_CONNECTED) {
 919                sock->state = SS_UNCONNECTED;
 920                err = sock_error(sk);
 921                if (!err)
 922                        err = -ECONNREFUSED;
 923                goto out;
 924        }
 925        sock->state = SS_CONNECTED;
 926        err = 0;
 927out:
 928        release_sock(sk);
 929        return err;
 930}
 931
 932/*
 933 * caif_release() - Disconnect a CAIF Socket
 934 * Copied and modified af_irda.c:irda_release().
 935 */
 936static int caif_release(struct socket *sock)
 937{
 938        struct sock *sk = sock->sk;
 939        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 940        int res = 0;
 941
 942        if (!sk)
 943                return 0;
 944
 945        set_tx_flow_off(cf_sk);
 946
 947        /*
 948         * Ensure that packets are not queued after this point in time.
 949         * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
 950         * this ensures no packets when sock is dead.
 951         */
 952        spin_lock(&sk->sk_receive_queue.lock);
 953        sock_set_flag(sk, SOCK_DEAD);
 954        spin_unlock(&sk->sk_receive_queue.lock);
 955        sock->sk = NULL;
 956
 957        dbfs_atomic_inc(&cnt.num_disconnect);
 958
 959        if (cf_sk->debugfs_socket_dir != NULL)
 960                debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
 961
 962        lock_sock(&(cf_sk->sk));
 963        sk->sk_state = CAIF_DISCONNECTED;
 964        sk->sk_shutdown = SHUTDOWN_MASK;
 965
 966        if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
 967                cf_sk->sk.sk_socket->state == SS_CONNECTING)
 968                res = caif_disconnect_client(&cf_sk->layer);
 969
 970        cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
 971        wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
 972
 973        sock_orphan(sk);
 974        cf_sk->layer.dn = NULL;
 975        sk_stream_kill_queues(&cf_sk->sk);
 976        release_sock(sk);
 977        sock_put(sk);
 978        return res;
 979}
 980
 981/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
 982static unsigned int caif_poll(struct file *file,
 983                                struct socket *sock, poll_table *wait)
 984{
 985        struct sock *sk = sock->sk;
 986        unsigned int mask;
 987        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 988
 989        sock_poll_wait(file, sk_sleep(sk), wait);
 990        mask = 0;
 991
 992        /* exceptional events? */
 993        if (sk->sk_err)
 994                mask |= POLLERR;
 995        if (sk->sk_shutdown == SHUTDOWN_MASK)
 996                mask |= POLLHUP;
 997        if (sk->sk_shutdown & RCV_SHUTDOWN)
 998                mask |= POLLRDHUP;
 999
1000        /* readable? */
1001        if (!skb_queue_empty(&sk->sk_receive_queue) ||
1002                (sk->sk_shutdown & RCV_SHUTDOWN))
1003                mask |= POLLIN | POLLRDNORM;
1004
1005        /*
1006         * we set writable also when the other side has shut down the
1007         * connection. This prevents stuck sockets.
1008         */
1009        if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
1010                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1011
1012        return mask;
1013}
1014
1015static const struct proto_ops caif_seqpacket_ops = {
1016        .family = PF_CAIF,
1017        .owner = THIS_MODULE,
1018        .release = caif_release,
1019        .bind = sock_no_bind,
1020        .connect = caif_connect,
1021        .socketpair = sock_no_socketpair,
1022        .accept = sock_no_accept,
1023        .getname = sock_no_getname,
1024        .poll = caif_poll,
1025        .ioctl = sock_no_ioctl,
1026        .listen = sock_no_listen,
1027        .shutdown = sock_no_shutdown,
1028        .setsockopt = setsockopt,
1029        .getsockopt = sock_no_getsockopt,
1030        .sendmsg = caif_seqpkt_sendmsg,
1031        .recvmsg = caif_seqpkt_recvmsg,
1032        .mmap = sock_no_mmap,
1033        .sendpage = sock_no_sendpage,
1034};
1035
1036static const struct proto_ops caif_stream_ops = {
1037        .family = PF_CAIF,
1038        .owner = THIS_MODULE,
1039        .release = caif_release,
1040        .bind = sock_no_bind,
1041        .connect = caif_connect,
1042        .socketpair = sock_no_socketpair,
1043        .accept = sock_no_accept,
1044        .getname = sock_no_getname,
1045        .poll = caif_poll,
1046        .ioctl = sock_no_ioctl,
1047        .listen = sock_no_listen,
1048        .shutdown = sock_no_shutdown,
1049        .setsockopt = setsockopt,
1050        .getsockopt = sock_no_getsockopt,
1051        .sendmsg = caif_stream_sendmsg,
1052        .recvmsg = caif_stream_recvmsg,
1053        .mmap = sock_no_mmap,
1054        .sendpage = sock_no_sendpage,
1055};
1056
1057/* This function is called when a socket is finally destroyed. */
1058static void caif_sock_destructor(struct sock *sk)
1059{
1060        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1061        caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1062        caif_assert(sk_unhashed(sk));
1063        caif_assert(!sk->sk_socket);
1064        if (!sock_flag(sk, SOCK_DEAD)) {
1065                pr_info("Attempt to release alive CAIF socket: %p\n", sk);
1066                return;
1067        }
1068        sk_stream_kill_queues(&cf_sk->sk);
1069        dbfs_atomic_dec(&cnt.caif_nr_socks);
1070}
1071
1072static int caif_create(struct net *net, struct socket *sock, int protocol,
1073                        int kern)
1074{
1075        struct sock *sk = NULL;
1076        struct caifsock *cf_sk = NULL;
1077        static struct proto prot = {.name = "PF_CAIF",
1078                .owner = THIS_MODULE,
1079                .obj_size = sizeof(struct caifsock),
1080        };
1081
1082        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1083                return -EPERM;
1084        /*
1085         * The sock->type specifies the socket type to use.
1086         * The CAIF socket is a packet stream in the sense
1087         * that it is packet based. CAIF trusts the reliability
1088         * of the link, no resending is implemented.
1089         */
1090        if (sock->type == SOCK_SEQPACKET)
1091                sock->ops = &caif_seqpacket_ops;
1092        else if (sock->type == SOCK_STREAM)
1093                sock->ops = &caif_stream_ops;
1094        else
1095                return -ESOCKTNOSUPPORT;
1096
1097        if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1098                return -EPROTONOSUPPORT;
1099        /*
1100         * Set the socket state to unconnected.  The socket state
1101         * is really not used at all in the net/core or socket.c but the
1102         * initialization makes sure that sock->state is not uninitialized.
1103         */
1104        sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1105        if (!sk)
1106                return -ENOMEM;
1107
1108        cf_sk = container_of(sk, struct caifsock, sk);
1109
1110        /* Store the protocol */
1111        sk->sk_protocol = (unsigned char) protocol;
1112
1113        /* Sendbuf dictates the amount of outbound packets not yet sent */
1114        sk->sk_sndbuf = CAIF_DEF_SNDBUF;
1115        sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
1116
1117        /*
1118         * Lock in order to try to stop someone from opening the socket
1119         * too early.
1120         */
1121        lock_sock(&(cf_sk->sk));
1122
1123        /* Initialize the nozero default sock structure data. */
1124        sock_init_data(sock, sk);
1125        sk->sk_destruct = caif_sock_destructor;
1126
1127        mutex_init(&cf_sk->readlock); /* single task reading lock */
1128        cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1129        cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1130        cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1131
1132        set_tx_flow_off(cf_sk);
1133        set_rx_flow_on(cf_sk);
1134
1135        /* Set default options on configuration */
1136        cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
1137        cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1138        cf_sk->conn_req.protocol = protocol;
1139        /* Increase the number of sockets created. */
1140        dbfs_atomic_inc(&cnt.caif_nr_socks);
1141#ifdef CONFIG_DEBUG_FS
1142        if (!IS_ERR(debugfsdir)) {
1143                /* Fill in some information concerning the misc socket. */
1144                snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
1145                                atomic_read(&cnt.caif_nr_socks));
1146
1147                cf_sk->debugfs_socket_dir =
1148                        debugfs_create_dir(cf_sk->name, debugfsdir);
1149                debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1150                                cf_sk->debugfs_socket_dir,
1151                                (u32 *) &cf_sk->sk.sk_state);
1152                debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1153                                cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1154                debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1155                                cf_sk->debugfs_socket_dir,
1156                                (u32 *) &cf_sk->sk.sk_rmem_alloc);
1157                debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1158                                cf_sk->debugfs_socket_dir,
1159                                (u32 *) &cf_sk->sk.sk_wmem_alloc);
1160                debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1161                                cf_sk->debugfs_socket_dir,
1162                                (u32 *) &cf_sk->layer.id);
1163        }
1164#endif
1165        release_sock(&cf_sk->sk);
1166        return 0;
1167}
1168
1169
1170static struct net_proto_family caif_family_ops = {
1171        .family = PF_CAIF,
1172        .create = caif_create,
1173        .owner = THIS_MODULE,
1174};
1175
1176static int af_caif_init(void)
1177{
1178        int err = sock_register(&caif_family_ops);
1179        if (!err)
1180                return err;
1181        return 0;
1182}
1183
1184static int __init caif_sktinit_module(void)
1185{
1186#ifdef CONFIG_DEBUG_FS
1187        debugfsdir = debugfs_create_dir("caif_sk", NULL);
1188        if (!IS_ERR(debugfsdir)) {
1189                debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1190                                debugfsdir,
1191                                (u32 *) &cnt.caif_nr_socks);
1192                debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1193                                debugfsdir,
1194                                (u32 *) &cnt.num_connect_req);
1195                debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1196                                debugfsdir,
1197                                (u32 *) &cnt.num_connect_resp);
1198                debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1199                                debugfsdir,
1200                                (u32 *) &cnt.num_connect_fail_resp);
1201                debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1202                                debugfsdir,
1203                                (u32 *) &cnt.num_disconnect);
1204                debugfs_create_u32("num_remote_shutdown_ind",
1205                                S_IRUSR | S_IWUSR, debugfsdir,
1206                                (u32 *) &cnt.num_remote_shutdown_ind);
1207                debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1208                                debugfsdir,
1209                                (u32 *) &cnt.num_tx_flow_off_ind);
1210                debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1211                                debugfsdir,
1212                                (u32 *) &cnt.num_tx_flow_on_ind);
1213                debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1214                                debugfsdir,
1215                                (u32 *) &cnt.num_rx_flow_off);
1216                debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1217                                debugfsdir,
1218                                (u32 *) &cnt.num_rx_flow_on);
1219        }
1220#endif
1221        return af_caif_init();
1222}
1223
1224static void __exit caif_sktexit_module(void)
1225{
1226        sock_unregister(PF_CAIF);
1227        if (debugfsdir != NULL)
1228                debugfs_remove_recursive(debugfsdir);
1229}
1230module_init(caif_sktinit_module);
1231module_exit(caif_sktexit_module);
1232