linux/net/rds/cong.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/types.h>
  34#include <linux/rbtree.h>
  35
  36#include <asm-generic/bitops/le.h>
  37
  38#include "rds.h"
  39
  40/*
  41 * This file implements the receive side of the unconventional congestion
  42 * management in RDS.
  43 *
  44 * Messages waiting in the receive queue on the receiving socket are accounted
  45 * against the sockets SO_RCVBUF option value.  Only the payload bytes in the
  46 * message are accounted for.  If the number of bytes queued equals or exceeds
  47 * rcvbuf then the socket is congested.  All sends attempted to this socket's
  48 * address should return block or return -EWOULDBLOCK.
  49 *
  50 * Applications are expected to be reasonably tuned such that this situation
  51 * very rarely occurs.  An application encountering this "back-pressure" is
  52 * considered a bug.
  53 *
  54 * This is implemented by having each node maintain bitmaps which indicate
  55 * which ports on bound addresses are congested.  As the bitmap changes it is
  56 * sent through all the connections which terminate in the local address of the
  57 * bitmap which changed.
  58 *
  59 * The bitmaps are allocated as connections are brought up.  This avoids
  60 * allocation in the interrupt handling path which queues messages on sockets.
  61 * The dense bitmaps let transports send the entire bitmap on any bitmap change
  62 * reasonably efficiently.  This is much easier to implement than some
  63 * finer-grained communication of per-port congestion.  The sender does a very
  64 * inexpensive bit test to test if the port it's about to send to is congested
  65 * or not.
  66 */
  67
  68/*
  69 * Interaction with poll is a tad tricky. We want all processes stuck in
  70 * poll to wake up and check whether a congested destination became uncongested.
  71 * The really sad thing is we have no idea which destinations the application
  72 * wants to send to - we don't even know which rds_connections are involved.
  73 * So until we implement a more flexible rds poll interface, we have to make
  74 * do with this:
  75 * We maintain a global counter that is incremented each time a congestion map
  76 * update is received. Each rds socket tracks this value, and if rds_poll
  77 * finds that the saved generation number is smaller than the global generation
  78 * number, it wakes up the process.
  79 */
  80static atomic_t         rds_cong_generation = ATOMIC_INIT(0);
  81
  82/*
  83 * Congestion monitoring
  84 */
  85static LIST_HEAD(rds_cong_monitor);
  86static DEFINE_RWLOCK(rds_cong_monitor_lock);
  87
  88/*
  89 * Yes, a global lock.  It's used so infrequently that it's worth keeping it
  90 * global to simplify the locking.  It's only used in the following
  91 * circumstances:
  92 *
  93 *  - on connection buildup to associate a conn with its maps
  94 *  - on map changes to inform conns of a new map to send
  95 *
  96 *  It's sadly ordered under the socket callback lock and the connection lock.
  97 *  Receive paths can mark ports congested from interrupt context so the
  98 *  lock masks interrupts.
  99 */
 100static DEFINE_SPINLOCK(rds_cong_lock);
 101static struct rb_root rds_cong_tree = RB_ROOT;
 102
 103static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
 104                                               struct rds_cong_map *insert)
 105{
 106        struct rb_node **p = &rds_cong_tree.rb_node;
 107        struct rb_node *parent = NULL;
 108        struct rds_cong_map *map;
 109
 110        while (*p) {
 111                parent = *p;
 112                map = rb_entry(parent, struct rds_cong_map, m_rb_node);
 113
 114                if (addr < map->m_addr)
 115                        p = &(*p)->rb_left;
 116                else if (addr > map->m_addr)
 117                        p = &(*p)->rb_right;
 118                else
 119                        return map;
 120        }
 121
 122        if (insert) {
 123                rb_link_node(&insert->m_rb_node, parent, p);
 124                rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
 125        }
 126        return NULL;
 127}
 128
 129/*
 130 * There is only ever one bitmap for any address.  Connections try and allocate
 131 * these bitmaps in the process getting pointers to them.  The bitmaps are only
 132 * ever freed as the module is removed after all connections have been freed.
 133 */
 134static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
 135{
 136        struct rds_cong_map *map;
 137        struct rds_cong_map *ret = NULL;
 138        unsigned long zp;
 139        unsigned long i;
 140        unsigned long flags;
 141
 142        map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
 143        if (map == NULL)
 144                return NULL;
 145
 146        map->m_addr = addr;
 147        init_waitqueue_head(&map->m_waitq);
 148        INIT_LIST_HEAD(&map->m_conn_list);
 149
 150        for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
 151                zp = get_zeroed_page(GFP_KERNEL);
 152                if (zp == 0)
 153                        goto out;
 154                map->m_page_addrs[i] = zp;
 155        }
 156
 157        spin_lock_irqsave(&rds_cong_lock, flags);
 158        ret = rds_cong_tree_walk(addr, map);
 159        spin_unlock_irqrestore(&rds_cong_lock, flags);
 160
 161        if (ret == NULL) {
 162                ret = map;
 163                map = NULL;
 164        }
 165
 166out:
 167        if (map) {
 168                for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
 169                        free_page(map->m_page_addrs[i]);
 170                kfree(map);
 171        }
 172
 173        rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
 174
 175        return ret;
 176}
 177
 178/*
 179 * Put the conn on its local map's list.  This is called when the conn is
 180 * really added to the hash.  It's nested under the rds_conn_lock, sadly.
 181 */
 182void rds_cong_add_conn(struct rds_connection *conn)
 183{
 184        unsigned long flags;
 185
 186        rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
 187        spin_lock_irqsave(&rds_cong_lock, flags);
 188        list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
 189        spin_unlock_irqrestore(&rds_cong_lock, flags);
 190}
 191
 192void rds_cong_remove_conn(struct rds_connection *conn)
 193{
 194        unsigned long flags;
 195
 196        rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
 197        spin_lock_irqsave(&rds_cong_lock, flags);
 198        list_del_init(&conn->c_map_item);
 199        spin_unlock_irqrestore(&rds_cong_lock, flags);
 200}
 201
 202int rds_cong_get_maps(struct rds_connection *conn)
 203{
 204        conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
 205        conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
 206
 207        if (conn->c_lcong == NULL || conn->c_fcong == NULL)
 208                return -ENOMEM;
 209
 210        return 0;
 211}
 212
 213void rds_cong_queue_updates(struct rds_cong_map *map)
 214{
 215        struct rds_connection *conn;
 216        unsigned long flags;
 217
 218        spin_lock_irqsave(&rds_cong_lock, flags);
 219
 220        list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
 221                if (!test_and_set_bit(0, &conn->c_map_queued)) {
 222                        rds_stats_inc(s_cong_update_queued);
 223                        queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 224                }
 225        }
 226
 227        spin_unlock_irqrestore(&rds_cong_lock, flags);
 228}
 229
 230void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
 231{
 232        rdsdebug("waking map %p for %pI4\n",
 233          map, &map->m_addr);
 234        rds_stats_inc(s_cong_update_received);
 235        atomic_inc(&rds_cong_generation);
 236        if (waitqueue_active(&map->m_waitq))
 237                wake_up(&map->m_waitq);
 238        if (waitqueue_active(&rds_poll_waitq))
 239                wake_up_all(&rds_poll_waitq);
 240
 241        if (portmask && !list_empty(&rds_cong_monitor)) {
 242                unsigned long flags;
 243                struct rds_sock *rs;
 244
 245                read_lock_irqsave(&rds_cong_monitor_lock, flags);
 246                list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
 247                        spin_lock(&rs->rs_lock);
 248                        rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
 249                        rs->rs_cong_mask &= ~portmask;
 250                        spin_unlock(&rs->rs_lock);
 251                        if (rs->rs_cong_notify)
 252                                rds_wake_sk_sleep(rs);
 253                }
 254                read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 255        }
 256}
 257
 258int rds_cong_updated_since(unsigned long *recent)
 259{
 260        unsigned long gen = atomic_read(&rds_cong_generation);
 261
 262        if (likely(*recent == gen))
 263                return 0;
 264        *recent = gen;
 265        return 1;
 266}
 267
 268/*
 269 * We're called under the locking that protects the sockets receive buffer
 270 * consumption.  This makes it a lot easier for the caller to only call us
 271 * when it knows that an existing set bit needs to be cleared, and vice versa.
 272 * We can't block and we need to deal with concurrent sockets working against
 273 * the same per-address map.
 274 */
 275void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
 276{
 277        unsigned long i;
 278        unsigned long off;
 279
 280        rdsdebug("setting congestion for %pI4:%u in map %p\n",
 281          &map->m_addr, ntohs(port), map);
 282
 283        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 284        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 285
 286        generic___set_le_bit(off, (void *)map->m_page_addrs[i]);
 287}
 288
 289void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
 290{
 291        unsigned long i;
 292        unsigned long off;
 293
 294        rdsdebug("clearing congestion for %pI4:%u in map %p\n",
 295          &map->m_addr, ntohs(port), map);
 296
 297        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 298        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 299
 300        generic___clear_le_bit(off, (void *)map->m_page_addrs[i]);
 301}
 302
 303static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
 304{
 305        unsigned long i;
 306        unsigned long off;
 307
 308        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 309        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 310
 311        return generic_test_le_bit(off, (void *)map->m_page_addrs[i]);
 312}
 313
 314void rds_cong_add_socket(struct rds_sock *rs)
 315{
 316        unsigned long flags;
 317
 318        write_lock_irqsave(&rds_cong_monitor_lock, flags);
 319        if (list_empty(&rs->rs_cong_list))
 320                list_add(&rs->rs_cong_list, &rds_cong_monitor);
 321        write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 322}
 323
 324void rds_cong_remove_socket(struct rds_sock *rs)
 325{
 326        unsigned long flags;
 327        struct rds_cong_map *map;
 328
 329        write_lock_irqsave(&rds_cong_monitor_lock, flags);
 330        list_del_init(&rs->rs_cong_list);
 331        write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 332
 333        /* update congestion map for now-closed port */
 334        spin_lock_irqsave(&rds_cong_lock, flags);
 335        map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
 336        spin_unlock_irqrestore(&rds_cong_lock, flags);
 337
 338        if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
 339                rds_cong_clear_bit(map, rs->rs_bound_port);
 340                rds_cong_queue_updates(map);
 341        }
 342}
 343
 344int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
 345                  struct rds_sock *rs)
 346{
 347        if (!rds_cong_test_bit(map, port))
 348                return 0;
 349        if (nonblock) {
 350                if (rs && rs->rs_cong_monitor) {
 351                        unsigned long flags;
 352
 353                        /* It would have been nice to have an atomic set_bit on
 354                         * a uint64_t. */
 355                        spin_lock_irqsave(&rs->rs_lock, flags);
 356                        rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
 357                        spin_unlock_irqrestore(&rs->rs_lock, flags);
 358
 359                        /* Test again - a congestion update may have arrived in
 360                         * the meantime. */
 361                        if (!rds_cong_test_bit(map, port))
 362                                return 0;
 363                }
 364                rds_stats_inc(s_cong_send_error);
 365                return -ENOBUFS;
 366        }
 367
 368        rds_stats_inc(s_cong_send_blocked);
 369        rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
 370
 371        return wait_event_interruptible(map->m_waitq,
 372                                        !rds_cong_test_bit(map, port));
 373}
 374
 375void rds_cong_exit(void)
 376{
 377        struct rb_node *node;
 378        struct rds_cong_map *map;
 379        unsigned long i;
 380
 381        while ((node = rb_first(&rds_cong_tree))) {
 382                map = rb_entry(node, struct rds_cong_map, m_rb_node);
 383                rdsdebug("freeing map %p\n", map);
 384                rb_erase(&map->m_rb_node, &rds_cong_tree);
 385                for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
 386                        free_page(map->m_page_addrs[i]);
 387                kfree(map);
 388        }
 389}
 390
 391/*
 392 * Allocate a RDS message containing a congestion update.
 393 */
 394struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
 395{
 396        struct rds_cong_map *map = conn->c_lcong;
 397        struct rds_message *rm;
 398
 399        rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
 400        if (!IS_ERR(rm))
 401                rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
 402
 403        return rm;
 404}
 405