linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, write to
  25 * the Free Software Foundation, 59 Temple Place - Suite 330,
  26 * Boston, MA 02111-1307, USA.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  31 *
  32 * Or submit a bug report through the following website:
  33 *    http://www.sf.net/projects/lksctp
  34 *
  35 * Written or modified by:
  36 *    Jon Grimm             <jgrimm@us.ibm.com>
  37 *    La Monte H.P. Yarroll <piggy@acm.org>
  38 *    Sridhar Samudrala     <sri@us.ibm.com>
  39 *
  40 * Any bugs reported given to us we will try to fix... any fixes shared will
  41 * be incorporated into the next SCTP release.
  42 */
  43
  44#include <linux/slab.h>
  45#include <linux/types.h>
  46#include <linux/skbuff.h>
  47#include <net/sock.h>
  48#include <net/sctp/structs.h>
  49#include <net/sctp/sctp.h>
  50#include <net/sctp/sm.h>
  51
  52/* Forward declarations for internal helpers.  */
  53static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  54                                              struct sctp_ulpevent *);
  55static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
  56                                              struct sctp_ulpevent *);
  57static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  58
  59/* 1st Level Abstractions */
  60
  61/* Initialize a ULP queue from a block of memory.  */
  62struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  63                                 struct sctp_association *asoc)
  64{
  65        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  66
  67        ulpq->asoc = asoc;
  68        skb_queue_head_init(&ulpq->reasm);
  69        skb_queue_head_init(&ulpq->lobby);
  70        ulpq->pd_mode  = 0;
  71        ulpq->malloced = 0;
  72
  73        return ulpq;
  74}
  75
  76
  77/* Flush the reassembly and ordering queues.  */
  78void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  79{
  80        struct sk_buff *skb;
  81        struct sctp_ulpevent *event;
  82
  83        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  84                event = sctp_skb2event(skb);
  85                sctp_ulpevent_free(event);
  86        }
  87
  88        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  89                event = sctp_skb2event(skb);
  90                sctp_ulpevent_free(event);
  91        }
  92
  93}
  94
  95/* Dispose of a ulpqueue.  */
  96void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  97{
  98        sctp_ulpq_flush(ulpq);
  99        if (ulpq->malloced)
 100                kfree(ulpq);
 101}
 102
 103/* Process an incoming DATA chunk.  */
 104int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 105                        gfp_t gfp)
 106{
 107        struct sk_buff_head temp;
 108        struct sctp_ulpevent *event;
 109
 110        /* Create an event from the incoming chunk. */
 111        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 112        if (!event)
 113                return -ENOMEM;
 114
 115        /* Do reassembly if needed.  */
 116        event = sctp_ulpq_reasm(ulpq, event);
 117
 118        /* Do ordering if needed.  */
 119        if ((event) && (event->msg_flags & MSG_EOR)){
 120                /* Create a temporary list to collect chunks on.  */
 121                skb_queue_head_init(&temp);
 122                __skb_queue_tail(&temp, sctp_event2skb(event));
 123
 124                event = sctp_ulpq_order(ulpq, event);
 125        }
 126
 127        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 128         * very first SKB on the 'temp' list.
 129         */
 130        if (event)
 131                sctp_ulpq_tail_event(ulpq, event);
 132
 133        return 0;
 134}
 135
 136/* Add a new event for propagation to the ULP.  */
 137/* Clear the partial delivery mode for this socket.   Note: This
 138 * assumes that no association is currently in partial delivery mode.
 139 */
 140int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 141{
 142        struct sctp_sock *sp = sctp_sk(sk);
 143
 144        if (atomic_dec_and_test(&sp->pd_mode)) {
 145                /* This means there are no other associations in PD, so
 146                 * we can go ahead and clear out the lobby in one shot
 147                 */
 148                if (!skb_queue_empty(&sp->pd_lobby)) {
 149                        struct list_head *list;
 150                        sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 151                        list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 152                        INIT_LIST_HEAD(list);
 153                        return 1;
 154                }
 155        } else {
 156                /* There are other associations in PD, so we only need to
 157                 * pull stuff out of the lobby that belongs to the
 158                 * associations that is exiting PD (all of its notifications
 159                 * are posted here).
 160                 */
 161                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 162                        struct sk_buff *skb, *tmp;
 163                        struct sctp_ulpevent *event;
 164
 165                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 166                                event = sctp_skb2event(skb);
 167                                if (event->asoc == asoc) {
 168                                        __skb_unlink(skb, &sp->pd_lobby);
 169                                        __skb_queue_tail(&sk->sk_receive_queue,
 170                                                         skb);
 171                                }
 172                        }
 173                }
 174        }
 175
 176        return 0;
 177}
 178
 179/* Set the pd_mode on the socket and ulpq */
 180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 181{
 182        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 183
 184        atomic_inc(&sp->pd_mode);
 185        ulpq->pd_mode = 1;
 186}
 187
 188/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 190{
 191        ulpq->pd_mode = 0;
 192        sctp_ulpq_reasm_drain(ulpq);
 193        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 194}
 195
 196/* If the SKB of 'event' is on a list, it is the first such member
 197 * of that list.
 198 */
 199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 200{
 201        struct sock *sk = ulpq->asoc->base.sk;
 202        struct sk_buff_head *queue, *skb_list;
 203        struct sk_buff *skb = sctp_event2skb(event);
 204        int clear_pd = 0;
 205
 206        skb_list = (struct sk_buff_head *) skb->prev;
 207
 208        /* If the socket is just going to throw this away, do not
 209         * even try to deliver it.
 210         */
 211        if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 212                goto out_free;
 213
 214        /* Check if the user wishes to receive this event.  */
 215        if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 216                goto out_free;
 217
 218        /* If we are in partial delivery mode, post to the lobby until
 219         * partial delivery is cleared, unless, of course _this_ is
 220         * the association the cause of the partial delivery.
 221         */
 222
 223        if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 224                queue = &sk->sk_receive_queue;
 225        } else {
 226                if (ulpq->pd_mode) {
 227                        /* If the association is in partial delivery, we
 228                         * need to finish delivering the partially processed
 229                         * packet before passing any other data.  This is
 230                         * because we don't truly support stream interleaving.
 231                         */
 232                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 233                            (SCTP_DATA_NOT_FRAG ==
 234                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 235                                queue = &sctp_sk(sk)->pd_lobby;
 236                        else {
 237                                clear_pd = event->msg_flags & MSG_EOR;
 238                                queue = &sk->sk_receive_queue;
 239                        }
 240                } else {
 241                        /*
 242                         * If fragment interleave is enabled, we
 243                         * can queue this to the receive queue instead
 244                         * of the lobby.
 245                         */
 246                        if (sctp_sk(sk)->frag_interleave)
 247                                queue = &sk->sk_receive_queue;
 248                        else
 249                                queue = &sctp_sk(sk)->pd_lobby;
 250                }
 251        }
 252
 253        /* If we are harvesting multiple skbs they will be
 254         * collected on a list.
 255         */
 256        if (skb_list)
 257                sctp_skb_list_tail(skb_list, queue);
 258        else
 259                __skb_queue_tail(queue, skb);
 260
 261        /* Did we just complete partial delivery and need to get
 262         * rolling again?  Move pending data to the receive
 263         * queue.
 264         */
 265        if (clear_pd)
 266                sctp_ulpq_clear_pd(ulpq);
 267
 268        if (queue == &sk->sk_receive_queue)
 269                sk->sk_data_ready(sk, 0);
 270        return 1;
 271
 272out_free:
 273        if (skb_list)
 274                sctp_queue_purge_ulpevents(skb_list);
 275        else
 276                sctp_ulpevent_free(event);
 277
 278        return 0;
 279}
 280
 281/* 2nd Level Abstractions */
 282
 283/* Helper function to store chunks that need to be reassembled.  */
 284static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 285                                         struct sctp_ulpevent *event)
 286{
 287        struct sk_buff *pos;
 288        struct sctp_ulpevent *cevent;
 289        __u32 tsn, ctsn;
 290
 291        tsn = event->tsn;
 292
 293        /* See if it belongs at the end. */
 294        pos = skb_peek_tail(&ulpq->reasm);
 295        if (!pos) {
 296                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 297                return;
 298        }
 299
 300        /* Short circuit just dropping it at the end. */
 301        cevent = sctp_skb2event(pos);
 302        ctsn = cevent->tsn;
 303        if (TSN_lt(ctsn, tsn)) {
 304                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 305                return;
 306        }
 307
 308        /* Find the right place in this list. We store them by TSN.  */
 309        skb_queue_walk(&ulpq->reasm, pos) {
 310                cevent = sctp_skb2event(pos);
 311                ctsn = cevent->tsn;
 312
 313                if (TSN_lt(tsn, ctsn))
 314                        break;
 315        }
 316
 317        /* Insert before pos. */
 318        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 319
 320}
 321
 322/* Helper function to return an event corresponding to the reassembled
 323 * datagram.
 324 * This routine creates a re-assembled skb given the first and last skb's
 325 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 326 * payload was fragmented on the way and ip had to reassemble them.
 327 * We add the rest of skb's to the first skb's fraglist.
 328 */
 329static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
 330{
 331        struct sk_buff *pos;
 332        struct sk_buff *new = NULL;
 333        struct sctp_ulpevent *event;
 334        struct sk_buff *pnext, *last;
 335        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 336
 337        /* Store the pointer to the 2nd skb */
 338        if (f_frag == l_frag)
 339                pos = NULL;
 340        else
 341                pos = f_frag->next;
 342
 343        /* Get the last skb in the f_frag's frag_list if present. */
 344        for (last = list; list; last = list, list = list->next);
 345
 346        /* Add the list of remaining fragments to the first fragments
 347         * frag_list.
 348         */
 349        if (last)
 350                last->next = pos;
 351        else {
 352                if (skb_cloned(f_frag)) {
 353                        /* This is a cloned skb, we can't just modify
 354                         * the frag_list.  We need a new skb to do that.
 355                         * Instead of calling skb_unshare(), we'll do it
 356                         * ourselves since we need to delay the free.
 357                         */
 358                        new = skb_copy(f_frag, GFP_ATOMIC);
 359                        if (!new)
 360                                return NULL;    /* try again later */
 361
 362                        sctp_skb_set_owner_r(new, f_frag->sk);
 363
 364                        skb_shinfo(new)->frag_list = pos;
 365                } else
 366                        skb_shinfo(f_frag)->frag_list = pos;
 367        }
 368
 369        /* Remove the first fragment from the reassembly queue.  */
 370        __skb_unlink(f_frag, queue);
 371
 372        /* if we did unshare, then free the old skb and re-assign */
 373        if (new) {
 374                kfree_skb(f_frag);
 375                f_frag = new;
 376        }
 377
 378        while (pos) {
 379
 380                pnext = pos->next;
 381
 382                /* Update the len and data_len fields of the first fragment. */
 383                f_frag->len += pos->len;
 384                f_frag->data_len += pos->len;
 385
 386                /* Remove the fragment from the reassembly queue.  */
 387                __skb_unlink(pos, queue);
 388
 389                /* Break if we have reached the last fragment.  */
 390                if (pos == l_frag)
 391                        break;
 392                pos->next = pnext;
 393                pos = pnext;
 394        }
 395
 396        event = sctp_skb2event(f_frag);
 397        SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
 398
 399        return event;
 400}
 401
 402
 403/* Helper function to check if an incoming chunk has filled up the last
 404 * missing fragment in a SCTP datagram and return the corresponding event.
 405 */
 406static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 407{
 408        struct sk_buff *pos;
 409        struct sctp_ulpevent *cevent;
 410        struct sk_buff *first_frag = NULL;
 411        __u32 ctsn, next_tsn;
 412        struct sctp_ulpevent *retval = NULL;
 413        struct sk_buff *pd_first = NULL;
 414        struct sk_buff *pd_last = NULL;
 415        size_t pd_len = 0;
 416        struct sctp_association *asoc;
 417        u32 pd_point;
 418
 419        /* Initialized to 0 just to avoid compiler warning message.  Will
 420         * never be used with this value. It is referenced only after it
 421         * is set when we find the first fragment of a message.
 422         */
 423        next_tsn = 0;
 424
 425        /* The chunks are held in the reasm queue sorted by TSN.
 426         * Walk through the queue sequentially and look for a sequence of
 427         * fragmented chunks that complete a datagram.
 428         * 'first_frag' and next_tsn are reset when we find a chunk which
 429         * is the first fragment of a datagram. Once these 2 fields are set
 430         * we expect to find the remaining middle fragments and the last
 431         * fragment in order. If not, first_frag is reset to NULL and we
 432         * start the next pass when we find another first fragment.
 433         *
 434         * There is a potential to do partial delivery if user sets
 435         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 436         * to see if can do PD.
 437         */
 438        skb_queue_walk(&ulpq->reasm, pos) {
 439                cevent = sctp_skb2event(pos);
 440                ctsn = cevent->tsn;
 441
 442                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 443                case SCTP_DATA_FIRST_FRAG:
 444                        /* If this "FIRST_FRAG" is the first
 445                         * element in the queue, then count it towards
 446                         * possible PD.
 447                         */
 448                        if (pos == ulpq->reasm.next) {
 449                            pd_first = pos;
 450                            pd_last = pos;
 451                            pd_len = pos->len;
 452                        } else {
 453                            pd_first = NULL;
 454                            pd_last = NULL;
 455                            pd_len = 0;
 456                        }
 457
 458                        first_frag = pos;
 459                        next_tsn = ctsn + 1;
 460                        break;
 461
 462                case SCTP_DATA_MIDDLE_FRAG:
 463                        if ((first_frag) && (ctsn == next_tsn)) {
 464                                next_tsn++;
 465                                if (pd_first) {
 466                                    pd_last = pos;
 467                                    pd_len += pos->len;
 468                                }
 469                        } else
 470                                first_frag = NULL;
 471                        break;
 472
 473                case SCTP_DATA_LAST_FRAG:
 474                        if (first_frag && (ctsn == next_tsn))
 475                                goto found;
 476                        else
 477                                first_frag = NULL;
 478                        break;
 479                }
 480        }
 481
 482        asoc = ulpq->asoc;
 483        if (pd_first) {
 484                /* Make sure we can enter partial deliver.
 485                 * We can trigger partial delivery only if framgent
 486                 * interleave is set, or the socket is not already
 487                 * in  partial delivery.
 488                 */
 489                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 490                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 491                        goto done;
 492
 493                cevent = sctp_skb2event(pd_first);
 494                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 495                if (pd_point && pd_point <= pd_len) {
 496                        retval = sctp_make_reassembled_event(&ulpq->reasm,
 497                                                             pd_first,
 498                                                             pd_last);
 499                        if (retval)
 500                                sctp_ulpq_set_pd(ulpq);
 501                }
 502        }
 503done:
 504        return retval;
 505found:
 506        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
 507        if (retval)
 508                retval->msg_flags |= MSG_EOR;
 509        goto done;
 510}
 511
 512/* Retrieve the next set of fragments of a partial message. */
 513static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 514{
 515        struct sk_buff *pos, *last_frag, *first_frag;
 516        struct sctp_ulpevent *cevent;
 517        __u32 ctsn, next_tsn;
 518        int is_last;
 519        struct sctp_ulpevent *retval;
 520
 521        /* The chunks are held in the reasm queue sorted by TSN.
 522         * Walk through the queue sequentially and look for the first
 523         * sequence of fragmented chunks.
 524         */
 525
 526        if (skb_queue_empty(&ulpq->reasm))
 527                return NULL;
 528
 529        last_frag = first_frag = NULL;
 530        retval = NULL;
 531        next_tsn = 0;
 532        is_last = 0;
 533
 534        skb_queue_walk(&ulpq->reasm, pos) {
 535                cevent = sctp_skb2event(pos);
 536                ctsn = cevent->tsn;
 537
 538                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 539                case SCTP_DATA_MIDDLE_FRAG:
 540                        if (!first_frag) {
 541                                first_frag = pos;
 542                                next_tsn = ctsn + 1;
 543                                last_frag = pos;
 544                        } else if (next_tsn == ctsn)
 545                                next_tsn++;
 546                        else
 547                                goto done;
 548                        break;
 549                case SCTP_DATA_LAST_FRAG:
 550                        if (!first_frag)
 551                                first_frag = pos;
 552                        else if (ctsn != next_tsn)
 553                                goto done;
 554                        last_frag = pos;
 555                        is_last = 1;
 556                        goto done;
 557                default:
 558                        return NULL;
 559                }
 560        }
 561
 562        /* We have the reassembled event. There is no need to look
 563         * further.
 564         */
 565done:
 566        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 567        if (retval && is_last)
 568                retval->msg_flags |= MSG_EOR;
 569
 570        return retval;
 571}
 572
 573
 574/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 575 * need reassembling.
 576 */
 577static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 578                                                struct sctp_ulpevent *event)
 579{
 580        struct sctp_ulpevent *retval = NULL;
 581
 582        /* Check if this is part of a fragmented message.  */
 583        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 584                event->msg_flags |= MSG_EOR;
 585                return event;
 586        }
 587
 588        sctp_ulpq_store_reasm(ulpq, event);
 589        if (!ulpq->pd_mode)
 590                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 591        else {
 592                __u32 ctsn, ctsnap;
 593
 594                /* Do not even bother unless this is the next tsn to
 595                 * be delivered.
 596                 */
 597                ctsn = event->tsn;
 598                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 599                if (TSN_lte(ctsn, ctsnap))
 600                        retval = sctp_ulpq_retrieve_partial(ulpq);
 601        }
 602
 603        return retval;
 604}
 605
 606/* Retrieve the first part (sequential fragments) for partial delivery.  */
 607static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 608{
 609        struct sk_buff *pos, *last_frag, *first_frag;
 610        struct sctp_ulpevent *cevent;
 611        __u32 ctsn, next_tsn;
 612        struct sctp_ulpevent *retval;
 613
 614        /* The chunks are held in the reasm queue sorted by TSN.
 615         * Walk through the queue sequentially and look for a sequence of
 616         * fragmented chunks that start a datagram.
 617         */
 618
 619        if (skb_queue_empty(&ulpq->reasm))
 620                return NULL;
 621
 622        last_frag = first_frag = NULL;
 623        retval = NULL;
 624        next_tsn = 0;
 625
 626        skb_queue_walk(&ulpq->reasm, pos) {
 627                cevent = sctp_skb2event(pos);
 628                ctsn = cevent->tsn;
 629
 630                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 631                case SCTP_DATA_FIRST_FRAG:
 632                        if (!first_frag) {
 633                                first_frag = pos;
 634                                next_tsn = ctsn + 1;
 635                                last_frag = pos;
 636                        } else
 637                                goto done;
 638                        break;
 639
 640                case SCTP_DATA_MIDDLE_FRAG:
 641                        if (!first_frag)
 642                                return NULL;
 643                        if (ctsn == next_tsn) {
 644                                next_tsn++;
 645                                last_frag = pos;
 646                        } else
 647                                goto done;
 648                        break;
 649                default:
 650                        return NULL;
 651                }
 652        }
 653
 654        /* We have the reassembled event. There is no need to look
 655         * further.
 656         */
 657done:
 658        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 659        return retval;
 660}
 661
 662/*
 663 * Flush out stale fragments from the reassembly queue when processing
 664 * a Forward TSN.
 665 *
 666 * RFC 3758, Section 3.6
 667 *
 668 * After receiving and processing a FORWARD TSN, the data receiver MUST
 669 * take cautions in updating its re-assembly queue.  The receiver MUST
 670 * remove any partially reassembled message, which is still missing one
 671 * or more TSNs earlier than or equal to the new cumulative TSN point.
 672 * In the event that the receiver has invoked the partial delivery API,
 673 * a notification SHOULD also be generated to inform the upper layer API
 674 * that the message being partially delivered will NOT be completed.
 675 */
 676void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 677{
 678        struct sk_buff *pos, *tmp;
 679        struct sctp_ulpevent *event;
 680        __u32 tsn;
 681
 682        if (skb_queue_empty(&ulpq->reasm))
 683                return;
 684
 685        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 686                event = sctp_skb2event(pos);
 687                tsn = event->tsn;
 688
 689                /* Since the entire message must be abandoned by the
 690                 * sender (item A3 in Section 3.5, RFC 3758), we can
 691                 * free all fragments on the list that are less then
 692                 * or equal to ctsn_point
 693                 */
 694                if (TSN_lte(tsn, fwd_tsn)) {
 695                        __skb_unlink(pos, &ulpq->reasm);
 696                        sctp_ulpevent_free(event);
 697                } else
 698                        break;
 699        }
 700}
 701
 702/*
 703 * Drain the reassembly queue.  If we just cleared parted delivery, it
 704 * is possible that the reassembly queue will contain already reassembled
 705 * messages.  Retrieve any such messages and give them to the user.
 706 */
 707static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 708{
 709        struct sctp_ulpevent *event = NULL;
 710        struct sk_buff_head temp;
 711
 712        if (skb_queue_empty(&ulpq->reasm))
 713                return;
 714
 715        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 716                /* Do ordering if needed.  */
 717                if ((event) && (event->msg_flags & MSG_EOR)){
 718                        skb_queue_head_init(&temp);
 719                        __skb_queue_tail(&temp, sctp_event2skb(event));
 720
 721                        event = sctp_ulpq_order(ulpq, event);
 722                }
 723
 724                /* Send event to the ULP.  'event' is the
 725                 * sctp_ulpevent for  very first SKB on the  temp' list.
 726                 */
 727                if (event)
 728                        sctp_ulpq_tail_event(ulpq, event);
 729        }
 730}
 731
 732
 733/* Helper function to gather skbs that have possibly become
 734 * ordered by an an incoming chunk.
 735 */
 736static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 737                                              struct sctp_ulpevent *event)
 738{
 739        struct sk_buff_head *event_list;
 740        struct sk_buff *pos, *tmp;
 741        struct sctp_ulpevent *cevent;
 742        struct sctp_stream *in;
 743        __u16 sid, csid, cssn;
 744
 745        sid = event->stream;
 746        in  = &ulpq->asoc->ssnmap->in;
 747
 748        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 749
 750        /* We are holding the chunks by stream, by SSN.  */
 751        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 752                cevent = (struct sctp_ulpevent *) pos->cb;
 753                csid = cevent->stream;
 754                cssn = cevent->ssn;
 755
 756                /* Have we gone too far?  */
 757                if (csid > sid)
 758                        break;
 759
 760                /* Have we not gone far enough?  */
 761                if (csid < sid)
 762                        continue;
 763
 764                if (cssn != sctp_ssn_peek(in, sid))
 765                        break;
 766
 767                /* Found it, so mark in the ssnmap. */
 768                sctp_ssn_next(in, sid);
 769
 770                __skb_unlink(pos, &ulpq->lobby);
 771
 772                /* Attach all gathered skbs to the event.  */
 773                __skb_queue_tail(event_list, pos);
 774        }
 775}
 776
 777/* Helper function to store chunks needing ordering.  */
 778static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 779                                           struct sctp_ulpevent *event)
 780{
 781        struct sk_buff *pos;
 782        struct sctp_ulpevent *cevent;
 783        __u16 sid, csid;
 784        __u16 ssn, cssn;
 785
 786        pos = skb_peek_tail(&ulpq->lobby);
 787        if (!pos) {
 788                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 789                return;
 790        }
 791
 792        sid = event->stream;
 793        ssn = event->ssn;
 794
 795        cevent = (struct sctp_ulpevent *) pos->cb;
 796        csid = cevent->stream;
 797        cssn = cevent->ssn;
 798        if (sid > csid) {
 799                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 800                return;
 801        }
 802
 803        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 804                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 805                return;
 806        }
 807
 808        /* Find the right place in this list.  We store them by
 809         * stream ID and then by SSN.
 810         */
 811        skb_queue_walk(&ulpq->lobby, pos) {
 812                cevent = (struct sctp_ulpevent *) pos->cb;
 813                csid = cevent->stream;
 814                cssn = cevent->ssn;
 815
 816                if (csid > sid)
 817                        break;
 818                if (csid == sid && SSN_lt(ssn, cssn))
 819                        break;
 820        }
 821
 822
 823        /* Insert before pos. */
 824        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 825}
 826
 827static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 828                                             struct sctp_ulpevent *event)
 829{
 830        __u16 sid, ssn;
 831        struct sctp_stream *in;
 832
 833        /* Check if this message needs ordering.  */
 834        if (SCTP_DATA_UNORDERED & event->msg_flags)
 835                return event;
 836
 837        /* Note: The stream ID must be verified before this routine.  */
 838        sid = event->stream;
 839        ssn = event->ssn;
 840        in  = &ulpq->asoc->ssnmap->in;
 841
 842        /* Is this the expected SSN for this stream ID?  */
 843        if (ssn != sctp_ssn_peek(in, sid)) {
 844                /* We've received something out of order, so find where it
 845                 * needs to be placed.  We order by stream and then by SSN.
 846                 */
 847                sctp_ulpq_store_ordered(ulpq, event);
 848                return NULL;
 849        }
 850
 851        /* Mark that the next chunk has been found.  */
 852        sctp_ssn_next(in, sid);
 853
 854        /* Go find any other chunks that were waiting for
 855         * ordering.
 856         */
 857        sctp_ulpq_retrieve_ordered(ulpq, event);
 858
 859        return event;
 860}
 861
 862/* Helper function to gather skbs that have possibly become
 863 * ordered by forward tsn skipping their dependencies.
 864 */
 865static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 866{
 867        struct sk_buff *pos, *tmp;
 868        struct sctp_ulpevent *cevent;
 869        struct sctp_ulpevent *event;
 870        struct sctp_stream *in;
 871        struct sk_buff_head temp;
 872        struct sk_buff_head *lobby = &ulpq->lobby;
 873        __u16 csid, cssn;
 874
 875        in  = &ulpq->asoc->ssnmap->in;
 876
 877        /* We are holding the chunks by stream, by SSN.  */
 878        skb_queue_head_init(&temp);
 879        event = NULL;
 880        sctp_skb_for_each(pos, lobby, tmp) {
 881                cevent = (struct sctp_ulpevent *) pos->cb;
 882                csid = cevent->stream;
 883                cssn = cevent->ssn;
 884
 885                /* Have we gone too far?  */
 886                if (csid > sid)
 887                        break;
 888
 889                /* Have we not gone far enough?  */
 890                if (csid < sid)
 891                        continue;
 892
 893                /* see if this ssn has been marked by skipping */
 894                if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 895                        break;
 896
 897                __skb_unlink(pos, lobby);
 898                if (!event)
 899                        /* Create a temporary list to collect chunks on.  */
 900                        event = sctp_skb2event(pos);
 901
 902                /* Attach all gathered skbs to the event.  */
 903                __skb_queue_tail(&temp, pos);
 904        }
 905
 906        /* If we didn't reap any data, see if the next expected SSN
 907         * is next on the queue and if so, use that.
 908         */
 909        if (event == NULL && pos != (struct sk_buff *)lobby) {
 910                cevent = (struct sctp_ulpevent *) pos->cb;
 911                csid = cevent->stream;
 912                cssn = cevent->ssn;
 913
 914                if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 915                        sctp_ssn_next(in, csid);
 916                        __skb_unlink(pos, lobby);
 917                        __skb_queue_tail(&temp, pos);
 918                        event = sctp_skb2event(pos);
 919                }
 920        }
 921
 922        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 923         * very first SKB on the 'temp' list.
 924         */
 925        if (event) {
 926                /* see if we have more ordered that we can deliver */
 927                sctp_ulpq_retrieve_ordered(ulpq, event);
 928                sctp_ulpq_tail_event(ulpq, event);
 929        }
 930}
 931
 932/* Skip over an SSN. This is used during the processing of
 933 * Forwared TSN chunk to skip over the abandoned ordered data
 934 */
 935void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 936{
 937        struct sctp_stream *in;
 938
 939        /* Note: The stream ID must be verified before this routine.  */
 940        in  = &ulpq->asoc->ssnmap->in;
 941
 942        /* Is this an old SSN?  If so ignore. */
 943        if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 944                return;
 945
 946        /* Mark that we are no longer expecting this SSN or lower. */
 947        sctp_ssn_skip(in, sid, ssn);
 948
 949        /* Go find any other chunks that were waiting for
 950         * ordering and deliver them if needed.
 951         */
 952        sctp_ulpq_reap_ordered(ulpq, sid);
 953}
 954
 955static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 956                struct sk_buff_head *list, __u16 needed)
 957{
 958        __u16 freed = 0;
 959        __u32 tsn;
 960        struct sk_buff *skb;
 961        struct sctp_ulpevent *event;
 962        struct sctp_tsnmap *tsnmap;
 963
 964        tsnmap = &ulpq->asoc->peer.tsn_map;
 965
 966        while ((skb = __skb_dequeue_tail(list)) != NULL) {
 967                freed += skb_headlen(skb);
 968                event = sctp_skb2event(skb);
 969                tsn = event->tsn;
 970
 971                sctp_ulpevent_free(event);
 972                sctp_tsnmap_renege(tsnmap, tsn);
 973                if (freed >= needed)
 974                        return freed;
 975        }
 976
 977        return freed;
 978}
 979
 980/* Renege 'needed' bytes from the ordering queue. */
 981static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
 982{
 983        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
 984}
 985
 986/* Renege 'needed' bytes from the reassembly queue. */
 987static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
 988{
 989        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
 990}
 991
 992/* Partial deliver the first message as there is pressure on rwnd. */
 993void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 994                                struct sctp_chunk *chunk,
 995                                gfp_t gfp)
 996{
 997        struct sctp_ulpevent *event;
 998        struct sctp_association *asoc;
 999        struct sctp_sock *sp;
1000
1001        asoc = ulpq->asoc;
1002        sp = sctp_sk(asoc->base.sk);
1003
1004        /* If the association is already in Partial Delivery mode
1005         * we have noting to do.
1006         */
1007        if (ulpq->pd_mode)
1008                return;
1009
1010        /* If the user enabled fragment interleave socket option,
1011         * multiple associations can enter partial delivery.
1012         * Otherwise, we can only enter partial delivery if the
1013         * socket is not in partial deliver mode.
1014         */
1015        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1016                /* Is partial delivery possible?  */
1017                event = sctp_ulpq_retrieve_first(ulpq);
1018                /* Send event to the ULP.   */
1019                if (event) {
1020                        sctp_ulpq_tail_event(ulpq, event);
1021                        sctp_ulpq_set_pd(ulpq);
1022                        return;
1023                }
1024        }
1025}
1026
1027/* Renege some packets to make room for an incoming chunk.  */
1028void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1029                      gfp_t gfp)
1030{
1031        struct sctp_association *asoc;
1032        __u16 needed, freed;
1033
1034        asoc = ulpq->asoc;
1035
1036        if (chunk) {
1037                needed = ntohs(chunk->chunk_hdr->length);
1038                needed -= sizeof(sctp_data_chunk_t);
1039        } else
1040                needed = SCTP_DEFAULT_MAXWINDOW;
1041
1042        freed = 0;
1043
1044        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1045                freed = sctp_ulpq_renege_order(ulpq, needed);
1046                if (freed < needed) {
1047                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1048                }
1049        }
1050        /* If able to free enough room, accept this chunk. */
1051        if (chunk && (freed >= needed)) {
1052                __u32 tsn;
1053                tsn = ntohl(chunk->subh.data_hdr->tsn);
1054                sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
1055                sctp_ulpq_tail_data(ulpq, chunk, gfp);
1056
1057                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1058        }
1059
1060        sk_mem_reclaim(asoc->base.sk);
1061}
1062
1063
1064
1065/* Notify the application if an association is aborted and in
1066 * partial delivery mode.  Send up any pending received messages.
1067 */
1068void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1069{
1070        struct sctp_ulpevent *ev = NULL;
1071        struct sock *sk;
1072
1073        if (!ulpq->pd_mode)
1074                return;
1075
1076        sk = ulpq->asoc->base.sk;
1077        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1078                                       &sctp_sk(sk)->subscribe))
1079                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1080                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1081                                              gfp);
1082        if (ev)
1083                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1084
1085        /* If there is data waiting, send it up the socket now. */
1086        if (sctp_ulpq_clear_pd(ulpq) || ev)
1087                sk->sk_data_ready(sk, 0);
1088}
1089
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.