linux/net/rds/recv.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <net/sock.h>
  36#include <linux/in.h>
  37
  38#include "rds.h"
  39#include "rdma.h"
  40
  41void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
  42                  __be32 saddr)
  43{
  44        atomic_set(&inc->i_refcount, 1);
  45        INIT_LIST_HEAD(&inc->i_item);
  46        inc->i_conn = conn;
  47        inc->i_saddr = saddr;
  48        inc->i_rdma_cookie = 0;
  49}
  50EXPORT_SYMBOL_GPL(rds_inc_init);
  51
  52void rds_inc_addref(struct rds_incoming *inc)
  53{
  54        rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
  55        atomic_inc(&inc->i_refcount);
  56}
  57EXPORT_SYMBOL_GPL(rds_inc_addref);
  58
  59void rds_inc_put(struct rds_incoming *inc)
  60{
  61        rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
  62        if (atomic_dec_and_test(&inc->i_refcount)) {
  63                BUG_ON(!list_empty(&inc->i_item));
  64
  65                inc->i_conn->c_trans->inc_free(inc);
  66        }
  67}
  68EXPORT_SYMBOL_GPL(rds_inc_put);
  69
  70static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
  71                                  struct rds_cong_map *map,
  72                                  int delta, __be16 port)
  73{
  74        int now_congested;
  75
  76        if (delta == 0)
  77                return;
  78
  79        rs->rs_rcv_bytes += delta;
  80        now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
  81
  82        rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
  83          "now_cong %d delta %d\n",
  84          rs, &rs->rs_bound_addr,
  85          ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
  86          rds_sk_rcvbuf(rs), now_congested, delta);
  87
  88        /* wasn't -> am congested */
  89        if (!rs->rs_congested && now_congested) {
  90                rs->rs_congested = 1;
  91                rds_cong_set_bit(map, port);
  92                rds_cong_queue_updates(map);
  93        }
  94        /* was -> aren't congested */
  95        /* Require more free space before reporting uncongested to prevent
  96           bouncing cong/uncong state too often */
  97        else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
  98                rs->rs_congested = 0;
  99                rds_cong_clear_bit(map, port);
 100                rds_cong_queue_updates(map);
 101        }
 102
 103        /* do nothing if no change in cong state */
 104}
 105
 106/*
 107 * Process all extension headers that come with this message.
 108 */
 109static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
 110{
 111        struct rds_header *hdr = &inc->i_hdr;
 112        unsigned int pos = 0, type, len;
 113        union {
 114                struct rds_ext_header_version version;
 115                struct rds_ext_header_rdma rdma;
 116                struct rds_ext_header_rdma_dest rdma_dest;
 117        } buffer;
 118
 119        while (1) {
 120                len = sizeof(buffer);
 121                type = rds_message_next_extension(hdr, &pos, &buffer, &len);
 122                if (type == RDS_EXTHDR_NONE)
 123                        break;
 124                /* Process extension header here */
 125                switch (type) {
 126                case RDS_EXTHDR_RDMA:
 127                        rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
 128                        break;
 129
 130                case RDS_EXTHDR_RDMA_DEST:
 131                        /* We ignore the size for now. We could stash it
 132                         * somewhere and use it for error checking. */
 133                        inc->i_rdma_cookie = rds_rdma_make_cookie(
 134                                        be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
 135                                        be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
 136
 137                        break;
 138                }
 139        }
 140}
 141
 142/*
 143 * The transport must make sure that this is serialized against other
 144 * rx and conn reset on this specific conn.
 145 *
 146 * We currently assert that only one fragmented message will be sent
 147 * down a connection at a time.  This lets us reassemble in the conn
 148 * instead of per-flow which means that we don't have to go digging through
 149 * flows to tear down partial reassembly progress on conn failure and
 150 * we save flow lookup and locking for each frag arrival.  It does mean
 151 * that small messages will wait behind large ones.  Fragmenting at all
 152 * is only to reduce the memory consumption of pre-posted buffers.
 153 *
 154 * The caller passes in saddr and daddr instead of us getting it from the
 155 * conn.  This lets loopback, who only has one conn for both directions,
 156 * tell us which roles the addrs in the conn are playing for this message.
 157 */
 158void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
 159                       struct rds_incoming *inc, gfp_t gfp, enum km_type km)
 160{
 161        struct rds_sock *rs = NULL;
 162        struct sock *sk;
 163        unsigned long flags;
 164
 165        inc->i_conn = conn;
 166        inc->i_rx_jiffies = jiffies;
 167
 168        rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
 169                 "flags 0x%x rx_jiffies %lu\n", conn,
 170                 (unsigned long long)conn->c_next_rx_seq,
 171                 inc,
 172                 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
 173                 be32_to_cpu(inc->i_hdr.h_len),
 174                 be16_to_cpu(inc->i_hdr.h_sport),
 175                 be16_to_cpu(inc->i_hdr.h_dport),
 176                 inc->i_hdr.h_flags,
 177                 inc->i_rx_jiffies);
 178
 179        /*
 180         * Sequence numbers should only increase.  Messages get their
 181         * sequence number as they're queued in a sending conn.  They
 182         * can be dropped, though, if the sending socket is closed before
 183         * they hit the wire.  So sequence numbers can skip forward
 184         * under normal operation.  They can also drop back in the conn
 185         * failover case as previously sent messages are resent down the
 186         * new instance of a conn.  We drop those, otherwise we have
 187         * to assume that the next valid seq does not come after a
 188         * hole in the fragment stream.
 189         *
 190         * The headers don't give us a way to realize if fragments of
 191         * a message have been dropped.  We assume that frags that arrive
 192         * to a flow are part of the current message on the flow that is
 193         * being reassembled.  This means that senders can't drop messages
 194         * from the sending conn until all their frags are sent.
 195         *
 196         * XXX we could spend more on the wire to get more robust failure
 197         * detection, arguably worth it to avoid data corruption.
 198         */
 199        if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
 200            (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
 201                rds_stats_inc(s_recv_drop_old_seq);
 202                goto out;
 203        }
 204        conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
 205
 206        if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
 207                rds_stats_inc(s_recv_ping);
 208                rds_send_pong(conn, inc->i_hdr.h_sport);
 209                goto out;
 210        }
 211
 212        rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
 213        if (rs == NULL) {
 214                rds_stats_inc(s_recv_drop_no_sock);
 215                goto out;
 216        }
 217
 218        /* Process extension headers */
 219        rds_recv_incoming_exthdrs(inc, rs);
 220
 221        /* We can be racing with rds_release() which marks the socket dead. */
 222        sk = rds_rs_to_sk(rs);
 223
 224        /* serialize with rds_release -> sock_orphan */
 225        write_lock_irqsave(&rs->rs_recv_lock, flags);
 226        if (!sock_flag(sk, SOCK_DEAD)) {
 227                rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
 228                rds_stats_inc(s_recv_queued);
 229                rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 230                                      be32_to_cpu(inc->i_hdr.h_len),
 231                                      inc->i_hdr.h_dport);
 232                rds_inc_addref(inc);
 233                list_add_tail(&inc->i_item, &rs->rs_recv_queue);
 234                __rds_wake_sk_sleep(sk);
 235        } else {
 236                rds_stats_inc(s_recv_drop_dead_sock);
 237        }
 238        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 239
 240out:
 241        if (rs)
 242                rds_sock_put(rs);
 243}
 244EXPORT_SYMBOL_GPL(rds_recv_incoming);
 245
 246/*
 247 * be very careful here.  This is being called as the condition in
 248 * wait_event_*() needs to cope with being called many times.
 249 */
 250static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
 251{
 252        unsigned long flags;
 253
 254        if (*inc == NULL) {
 255                read_lock_irqsave(&rs->rs_recv_lock, flags);
 256                if (!list_empty(&rs->rs_recv_queue)) {
 257                        *inc = list_entry(rs->rs_recv_queue.next,
 258                                          struct rds_incoming,
 259                                          i_item);
 260                        rds_inc_addref(*inc);
 261                }
 262                read_unlock_irqrestore(&rs->rs_recv_lock, flags);
 263        }
 264
 265        return *inc != NULL;
 266}
 267
 268static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 269                            int drop)
 270{
 271        struct sock *sk = rds_rs_to_sk(rs);
 272        int ret = 0;
 273        unsigned long flags;
 274
 275        write_lock_irqsave(&rs->rs_recv_lock, flags);
 276        if (!list_empty(&inc->i_item)) {
 277                ret = 1;
 278                if (drop) {
 279                        /* XXX make sure this i_conn is reliable */
 280                        rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 281                                              -be32_to_cpu(inc->i_hdr.h_len),
 282                                              inc->i_hdr.h_dport);
 283                        list_del_init(&inc->i_item);
 284                        rds_inc_put(inc);
 285                }
 286        }
 287        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 288
 289        rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
 290        return ret;
 291}
 292
 293/*
 294 * Pull errors off the error queue.
 295 * If msghdr is NULL, we will just purge the error queue.
 296 */
 297int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 298{
 299        struct rds_notifier *notifier;
 300        struct rds_rdma_notify cmsg;
 301        unsigned int count = 0, max_messages = ~0U;
 302        unsigned long flags;
 303        LIST_HEAD(copy);
 304        int err = 0;
 305
 306
 307        /* put_cmsg copies to user space and thus may sleep. We can't do this
 308         * with rs_lock held, so first grab as many notifications as we can stuff
 309         * in the user provided cmsg buffer. We don't try to copy more, to avoid
 310         * losing notifications - except when the buffer is so small that it wouldn't
 311         * even hold a single notification. Then we give him as much of this single
 312         * msg as we can squeeze in, and set MSG_CTRUNC.
 313         */
 314        if (msghdr) {
 315                max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
 316                if (!max_messages)
 317                        max_messages = 1;
 318        }
 319
 320        spin_lock_irqsave(&rs->rs_lock, flags);
 321        while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
 322                notifier = list_entry(rs->rs_notify_queue.next,
 323                                struct rds_notifier, n_list);
 324                list_move(&notifier->n_list, &copy);
 325                count++;
 326        }
 327        spin_unlock_irqrestore(&rs->rs_lock, flags);
 328
 329        if (!count)
 330                return 0;
 331
 332        while (!list_empty(&copy)) {
 333                notifier = list_entry(copy.next, struct rds_notifier, n_list);
 334
 335                if (msghdr) {
 336                        cmsg.user_token = notifier->n_user_token;
 337                        cmsg.status  = notifier->n_status;
 338
 339                        err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
 340                                        sizeof(cmsg), &cmsg);
 341                        if (err)
 342                                break;
 343                }
 344
 345                list_del_init(&notifier->n_list);
 346                kfree(notifier);
 347        }
 348
 349        /* If we bailed out because of an error in put_cmsg,
 350         * we may be left with one or more notifications that we
 351         * didn't process. Return them to the head of the list. */
 352        if (!list_empty(&copy)) {
 353                spin_lock_irqsave(&rs->rs_lock, flags);
 354                list_splice(&copy, &rs->rs_notify_queue);
 355                spin_unlock_irqrestore(&rs->rs_lock, flags);
 356        }
 357
 358        return err;
 359}
 360
 361/*
 362 * Queue a congestion notification
 363 */
 364static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
 365{
 366        uint64_t notify = rs->rs_cong_notify;
 367        unsigned long flags;
 368        int err;
 369
 370        err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
 371                        sizeof(notify), &notify);
 372        if (err)
 373                return err;
 374
 375        spin_lock_irqsave(&rs->rs_lock, flags);
 376        rs->rs_cong_notify &= ~notify;
 377        spin_unlock_irqrestore(&rs->rs_lock, flags);
 378
 379        return 0;
 380}
 381
 382/*
 383 * Receive any control messages.
 384 */
 385static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
 386{
 387        int ret = 0;
 388
 389        if (inc->i_rdma_cookie) {
 390                ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
 391                                sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
 392                if (ret)
 393                        return ret;
 394        }
 395
 396        return 0;
 397}
 398
 399int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 400                size_t size, int msg_flags)
 401{
 402        struct sock *sk = sock->sk;
 403        struct rds_sock *rs = rds_sk_to_rs(sk);
 404        long timeo;
 405        int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
 406        struct sockaddr_in *sin;
 407        struct rds_incoming *inc = NULL;
 408
 409        /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
 410        timeo = sock_rcvtimeo(sk, nonblock);
 411
 412        rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
 413
 414        if (msg_flags & MSG_OOB)
 415                goto out;
 416
 417        while (1) {
 418                /* If there are pending notifications, do those - and nothing else */
 419                if (!list_empty(&rs->rs_notify_queue)) {
 420                        ret = rds_notify_queue_get(rs, msg);
 421                        break;
 422                }
 423
 424                if (rs->rs_cong_notify) {
 425                        ret = rds_notify_cong(rs, msg);
 426                        break;
 427                }
 428
 429                if (!rds_next_incoming(rs, &inc)) {
 430                        if (nonblock) {
 431                                ret = -EAGAIN;
 432                                break;
 433                        }
 434
 435                        timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
 436                                        (!list_empty(&rs->rs_notify_queue) ||
 437                                         rs->rs_cong_notify ||
 438                                         rds_next_incoming(rs, &inc)), timeo);
 439                        rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
 440                                 timeo);
 441                        if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
 442                                continue;
 443
 444                        ret = timeo;
 445                        if (ret == 0)
 446                                ret = -ETIMEDOUT;
 447                        break;
 448                }
 449
 450                rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
 451                         &inc->i_conn->c_faddr,
 452                         ntohs(inc->i_hdr.h_sport));
 453                ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
 454                                                             size);
 455                if (ret < 0)
 456                        break;
 457
 458                /*
 459                 * if the message we just copied isn't at the head of the
 460                 * recv queue then someone else raced us to return it, try
 461                 * to get the next message.
 462                 */
 463                if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
 464                        rds_inc_put(inc);
 465                        inc = NULL;
 466                        rds_stats_inc(s_recv_deliver_raced);
 467                        continue;
 468                }
 469
 470                if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
 471                        if (msg_flags & MSG_TRUNC)
 472                                ret = be32_to_cpu(inc->i_hdr.h_len);
 473                        msg->msg_flags |= MSG_TRUNC;
 474                }
 475
 476                if (rds_cmsg_recv(inc, msg)) {
 477                        ret = -EFAULT;
 478                        goto out;
 479                }
 480
 481                rds_stats_inc(s_recv_delivered);
 482
 483                sin = (struct sockaddr_in *)msg->msg_name;
 484                if (sin) {
 485                        sin->sin_family = AF_INET;
 486                        sin->sin_port = inc->i_hdr.h_sport;
 487                        sin->sin_addr.s_addr = inc->i_saddr;
 488                        memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 489                }
 490                break;
 491        }
 492
 493        if (inc)
 494                rds_inc_put(inc);
 495
 496out:
 497        return ret;
 498}
 499
 500/*
 501 * The socket is being shut down and we're asked to drop messages that were
 502 * queued for recvmsg.  The caller has unbound the socket so the receive path
 503 * won't queue any more incoming fragments or messages on the socket.
 504 */
 505void rds_clear_recv_queue(struct rds_sock *rs)
 506{
 507        struct sock *sk = rds_rs_to_sk(rs);
 508        struct rds_incoming *inc, *tmp;
 509        unsigned long flags;
 510
 511        write_lock_irqsave(&rs->rs_recv_lock, flags);
 512        list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
 513                rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 514                                      -be32_to_cpu(inc->i_hdr.h_len),
 515                                      inc->i_hdr.h_dport);
 516                list_del_init(&inc->i_item);
 517                rds_inc_put(inc);
 518        }
 519        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 520}
 521
 522/*
 523 * inc->i_saddr isn't used here because it is only set in the receive
 524 * path.
 525 */
 526void rds_inc_info_copy(struct rds_incoming *inc,
 527                       struct rds_info_iterator *iter,
 528                       __be32 saddr, __be32 daddr, int flip)
 529{
 530        struct rds_info_message minfo;
 531
 532        minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
 533        minfo.len = be32_to_cpu(inc->i_hdr.h_len);
 534
 535        if (flip) {
 536                minfo.laddr = daddr;
 537                minfo.faddr = saddr;
 538                minfo.lport = inc->i_hdr.h_dport;
 539                minfo.fport = inc->i_hdr.h_sport;
 540        } else {
 541                minfo.laddr = saddr;
 542                minfo.faddr = daddr;
 543                minfo.lport = inc->i_hdr.h_sport;
 544                minfo.fport = inc->i_hdr.h_dport;
 545        }
 546
 547        rds_info_copy(iter, &minfo, sizeof(minfo));
 548}
 549