linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, write to
  25 * the Free Software Foundation, 59 Temple Place - Suite 330,
  26 * Boston, MA 02111-1307, USA.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  31 *
  32 * Or submit a bug report through the following website:
  33 *    http://www.sf.net/projects/lksctp
  34 *
  35 * Written or modified by:
  36 *    Jon Grimm             <jgrimm@us.ibm.com>
  37 *    La Monte H.P. Yarroll <piggy@acm.org>
  38 *    Sridhar Samudrala     <sri@us.ibm.com>
  39 *
  40 * Any bugs reported given to us we will try to fix... any fixes shared will
  41 * be incorporated into the next SCTP release.
  42 */
  43
  44#include <linux/slab.h>
  45#include <linux/types.h>
  46#include <linux/skbuff.h>
  47#include <net/sock.h>
  48#include <net/sctp/structs.h>
  49#include <net/sctp/sctp.h>
  50#include <net/sctp/sm.h>
  51
  52/* Forward declarations for internal helpers.  */
  53static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  54                                              struct sctp_ulpevent *);
  55static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
  56                                              struct sctp_ulpevent *);
  57static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  58
  59/* 1st Level Abstractions */
  60
  61/* Initialize a ULP queue from a block of memory.  */
  62struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  63                                 struct sctp_association *asoc)
  64{
  65        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  66
  67        ulpq->asoc = asoc;
  68        skb_queue_head_init(&ulpq->reasm);
  69        skb_queue_head_init(&ulpq->lobby);
  70        ulpq->pd_mode  = 0;
  71        ulpq->malloced = 0;
  72
  73        return ulpq;
  74}
  75
  76
  77/* Flush the reassembly and ordering queues.  */
  78void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  79{
  80        struct sk_buff *skb;
  81        struct sctp_ulpevent *event;
  82
  83        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  84                event = sctp_skb2event(skb);
  85                sctp_ulpevent_free(event);
  86        }
  87
  88        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  89                event = sctp_skb2event(skb);
  90                sctp_ulpevent_free(event);
  91        }
  92
  93}
  94
  95/* Dispose of a ulpqueue.  */
  96void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  97{
  98        sctp_ulpq_flush(ulpq);
  99        if (ulpq->malloced)
 100                kfree(ulpq);
 101}
 102
 103/* Process an incoming DATA chunk.  */
 104int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 105                        gfp_t gfp)
 106{
 107        struct sk_buff_head temp;
 108        sctp_data_chunk_t *hdr;
 109        struct sctp_ulpevent *event;
 110
 111        hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
 112
 113        /* Create an event from the incoming chunk. */
 114        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 115        if (!event)
 116                return -ENOMEM;
 117
 118        /* Do reassembly if needed.  */
 119        event = sctp_ulpq_reasm(ulpq, event);
 120
 121        /* Do ordering if needed.  */
 122        if ((event) && (event->msg_flags & MSG_EOR)){
 123                /* Create a temporary list to collect chunks on.  */
 124                skb_queue_head_init(&temp);
 125                __skb_queue_tail(&temp, sctp_event2skb(event));
 126
 127                event = sctp_ulpq_order(ulpq, event);
 128        }
 129
 130        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 131         * very first SKB on the 'temp' list.
 132         */
 133        if (event)
 134                sctp_ulpq_tail_event(ulpq, event);
 135
 136        return 0;
 137}
 138
 139/* Add a new event for propagation to the ULP.  */
 140/* Clear the partial delivery mode for this socket.   Note: This
 141 * assumes that no association is currently in partial delivery mode.
 142 */
 143int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 144{
 145        struct sctp_sock *sp = sctp_sk(sk);
 146
 147        if (atomic_dec_and_test(&sp->pd_mode)) {
 148                /* This means there are no other associations in PD, so
 149                 * we can go ahead and clear out the lobby in one shot
 150                 */
 151                if (!skb_queue_empty(&sp->pd_lobby)) {
 152                        struct list_head *list;
 153                        sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 154                        list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 155                        INIT_LIST_HEAD(list);
 156                        return 1;
 157                }
 158        } else {
 159                /* There are other associations in PD, so we only need to
 160                 * pull stuff out of the lobby that belongs to the
 161                 * associations that is exiting PD (all of its notifications
 162                 * are posted here).
 163                 */
 164                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 165                        struct sk_buff *skb, *tmp;
 166                        struct sctp_ulpevent *event;
 167
 168                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 169                                event = sctp_skb2event(skb);
 170                                if (event->asoc == asoc) {
 171                                        __skb_unlink(skb, &sp->pd_lobby);
 172                                        __skb_queue_tail(&sk->sk_receive_queue,
 173                                                         skb);
 174                                }
 175                        }
 176                }
 177        }
 178
 179        return 0;
 180}
 181
 182/* Set the pd_mode on the socket and ulpq */
 183static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 184{
 185        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 186
 187        atomic_inc(&sp->pd_mode);
 188        ulpq->pd_mode = 1;
 189}
 190
 191/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 192static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 193{
 194        ulpq->pd_mode = 0;
 195        sctp_ulpq_reasm_drain(ulpq);
 196        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 197}
 198
 199/* If the SKB of 'event' is on a list, it is the first such member
 200 * of that list.
 201 */
 202int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 203{
 204        struct sock *sk = ulpq->asoc->base.sk;
 205        struct sk_buff_head *queue, *skb_list;
 206        struct sk_buff *skb = sctp_event2skb(event);
 207        int clear_pd = 0;
 208
 209        skb_list = (struct sk_buff_head *) skb->prev;
 210
 211        /* If the socket is just going to throw this away, do not
 212         * even try to deliver it.
 213         */
 214        if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 215                goto out_free;
 216
 217        /* Check if the user wishes to receive this event.  */
 218        if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 219                goto out_free;
 220
 221        /* If we are in partial delivery mode, post to the lobby until
 222         * partial delivery is cleared, unless, of course _this_ is
 223         * the association the cause of the partial delivery.
 224         */
 225
 226        if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 227                queue = &sk->sk_receive_queue;
 228        } else {
 229                if (ulpq->pd_mode) {
 230                        /* If the association is in partial delivery, we
 231                         * need to finish delivering the partially processed
 232                         * packet before passing any other data.  This is
 233                         * because we don't truly support stream interleaving.
 234                         */
 235                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 236                            (SCTP_DATA_NOT_FRAG ==
 237                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 238                                queue = &sctp_sk(sk)->pd_lobby;
 239                        else {
 240                                clear_pd = event->msg_flags & MSG_EOR;
 241                                queue = &sk->sk_receive_queue;
 242                        }
 243                } else {
 244                        /*
 245                         * If fragment interleave is enabled, we
 246                         * can queue this to the recieve queue instead
 247                         * of the lobby.
 248                         */
 249                        if (sctp_sk(sk)->frag_interleave)
 250                                queue = &sk->sk_receive_queue;
 251                        else
 252                                queue = &sctp_sk(sk)->pd_lobby;
 253                }
 254        }
 255
 256        /* If we are harvesting multiple skbs they will be
 257         * collected on a list.
 258         */
 259        if (skb_list)
 260                sctp_skb_list_tail(skb_list, queue);
 261        else
 262                __skb_queue_tail(queue, skb);
 263
 264        /* Did we just complete partial delivery and need to get
 265         * rolling again?  Move pending data to the receive
 266         * queue.
 267         */
 268        if (clear_pd)
 269                sctp_ulpq_clear_pd(ulpq);
 270
 271        if (queue == &sk->sk_receive_queue)
 272                sk->sk_data_ready(sk, 0);
 273        return 1;
 274
 275out_free:
 276        if (skb_list)
 277                sctp_queue_purge_ulpevents(skb_list);
 278        else
 279                sctp_ulpevent_free(event);
 280
 281        return 0;
 282}
 283
 284/* 2nd Level Abstractions */
 285
 286/* Helper function to store chunks that need to be reassembled.  */
 287static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 288                                         struct sctp_ulpevent *event)
 289{
 290        struct sk_buff *pos;
 291        struct sctp_ulpevent *cevent;
 292        __u32 tsn, ctsn;
 293
 294        tsn = event->tsn;
 295
 296        /* See if it belongs at the end. */
 297        pos = skb_peek_tail(&ulpq->reasm);
 298        if (!pos) {
 299                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 300                return;
 301        }
 302
 303        /* Short circuit just dropping it at the end. */
 304        cevent = sctp_skb2event(pos);
 305        ctsn = cevent->tsn;
 306        if (TSN_lt(ctsn, tsn)) {
 307                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 308                return;
 309        }
 310
 311        /* Find the right place in this list. We store them by TSN.  */
 312        skb_queue_walk(&ulpq->reasm, pos) {
 313                cevent = sctp_skb2event(pos);
 314                ctsn = cevent->tsn;
 315
 316                if (TSN_lt(tsn, ctsn))
 317                        break;
 318        }
 319
 320        /* Insert before pos. */
 321        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 322
 323}
 324
 325/* Helper function to return an event corresponding to the reassembled
 326 * datagram.
 327 * This routine creates a re-assembled skb given the first and last skb's
 328 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 329 * payload was fragmented on the way and ip had to reassemble them.
 330 * We add the rest of skb's to the first skb's fraglist.
 331 */
 332static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
 333{
 334        struct sk_buff *pos;
 335        struct sk_buff *new = NULL;
 336        struct sctp_ulpevent *event;
 337        struct sk_buff *pnext, *last;
 338        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 339
 340        /* Store the pointer to the 2nd skb */
 341        if (f_frag == l_frag)
 342                pos = NULL;
 343        else
 344                pos = f_frag->next;
 345
 346        /* Get the last skb in the f_frag's frag_list if present. */
 347        for (last = list; list; last = list, list = list->next);
 348
 349        /* Add the list of remaining fragments to the first fragments
 350         * frag_list.
 351         */
 352        if (last)
 353                last->next = pos;
 354        else {
 355                if (skb_cloned(f_frag)) {
 356                        /* This is a cloned skb, we can't just modify
 357                         * the frag_list.  We need a new skb to do that.
 358                         * Instead of calling skb_unshare(), we'll do it
 359                         * ourselves since we need to delay the free.
 360                         */
 361                        new = skb_copy(f_frag, GFP_ATOMIC);
 362                        if (!new)
 363                                return NULL;    /* try again later */
 364
 365                        sctp_skb_set_owner_r(new, f_frag->sk);
 366
 367                        skb_shinfo(new)->frag_list = pos;
 368                } else
 369                        skb_shinfo(f_frag)->frag_list = pos;
 370        }
 371
 372        /* Remove the first fragment from the reassembly queue.  */
 373        __skb_unlink(f_frag, queue);
 374
 375        /* if we did unshare, then free the old skb and re-assign */
 376        if (new) {
 377                kfree_skb(f_frag);
 378                f_frag = new;
 379        }
 380
 381        while (pos) {
 382
 383                pnext = pos->next;
 384
 385                /* Update the len and data_len fields of the first fragment. */
 386                f_frag->len += pos->len;
 387                f_frag->data_len += pos->len;
 388
 389                /* Remove the fragment from the reassembly queue.  */
 390                __skb_unlink(pos, queue);
 391
 392                /* Break if we have reached the last fragment.  */
 393                if (pos == l_frag)
 394                        break;
 395                pos->next = pnext;
 396                pos = pnext;
 397        }
 398
 399        event = sctp_skb2event(f_frag);
 400        SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
 401
 402        return event;
 403}
 404
 405
 406/* Helper function to check if an incoming chunk has filled up the last
 407 * missing fragment in a SCTP datagram and return the corresponding event.
 408 */
 409static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 410{
 411        struct sk_buff *pos;
 412        struct sctp_ulpevent *cevent;
 413        struct sk_buff *first_frag = NULL;
 414        __u32 ctsn, next_tsn;
 415        struct sctp_ulpevent *retval = NULL;
 416        struct sk_buff *pd_first = NULL;
 417        struct sk_buff *pd_last = NULL;
 418        size_t pd_len = 0;
 419        struct sctp_association *asoc;
 420        u32 pd_point;
 421
 422        /* Initialized to 0 just to avoid compiler warning message.  Will
 423         * never be used with this value. It is referenced only after it
 424         * is set when we find the first fragment of a message.
 425         */
 426        next_tsn = 0;
 427
 428        /* The chunks are held in the reasm queue sorted by TSN.
 429         * Walk through the queue sequentially and look for a sequence of
 430         * fragmented chunks that complete a datagram.
 431         * 'first_frag' and next_tsn are reset when we find a chunk which
 432         * is the first fragment of a datagram. Once these 2 fields are set
 433         * we expect to find the remaining middle fragments and the last
 434         * fragment in order. If not, first_frag is reset to NULL and we
 435         * start the next pass when we find another first fragment.
 436         *
 437         * There is a potential to do partial delivery if user sets
 438         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 439         * to see if can do PD.
 440         */
 441        skb_queue_walk(&ulpq->reasm, pos) {
 442                cevent = sctp_skb2event(pos);
 443                ctsn = cevent->tsn;
 444
 445                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 446                case SCTP_DATA_FIRST_FRAG:
 447                        /* If this "FIRST_FRAG" is the first
 448                         * element in the queue, then count it towards
 449                         * possible PD.
 450                         */
 451                        if (pos == ulpq->reasm.next) {
 452                            pd_first = pos;
 453                            pd_last = pos;
 454                            pd_len = pos->len;
 455                        } else {
 456                            pd_first = NULL;
 457                            pd_last = NULL;
 458                            pd_len = 0;
 459                        }
 460
 461                        first_frag = pos;
 462                        next_tsn = ctsn + 1;
 463                        break;
 464
 465                case SCTP_DATA_MIDDLE_FRAG:
 466                        if ((first_frag) && (ctsn == next_tsn)) {
 467                                next_tsn++;
 468                                if (pd_first) {
 469                                    pd_last = pos;
 470                                    pd_len += pos->len;
 471                                }
 472                        } else
 473                                first_frag = NULL;
 474                        break;
 475
 476                case SCTP_DATA_LAST_FRAG:
 477                        if (first_frag && (ctsn == next_tsn))
 478                                goto found;
 479                        else
 480                                first_frag = NULL;
 481                        break;
 482                }
 483        }
 484
 485        asoc = ulpq->asoc;
 486        if (pd_first) {
 487                /* Make sure we can enter partial deliver.
 488                 * We can trigger partial delivery only if framgent
 489                 * interleave is set, or the socket is not already
 490                 * in  partial delivery.
 491                 */
 492                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 493                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 494                        goto done;
 495
 496                cevent = sctp_skb2event(pd_first);
 497                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 498                if (pd_point && pd_point <= pd_len) {
 499                        retval = sctp_make_reassembled_event(&ulpq->reasm,
 500                                                             pd_first,
 501                                                             pd_last);
 502                        if (retval)
 503                                sctp_ulpq_set_pd(ulpq);
 504                }
 505        }
 506done:
 507        return retval;
 508found:
 509        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
 510        if (retval)
 511                retval->msg_flags |= MSG_EOR;
 512        goto done;
 513}
 514
 515/* Retrieve the next set of fragments of a partial message. */
 516static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 517{
 518        struct sk_buff *pos, *last_frag, *first_frag;
 519        struct sctp_ulpevent *cevent;
 520        __u32 ctsn, next_tsn;
 521        int is_last;
 522        struct sctp_ulpevent *retval;
 523
 524        /* The chunks are held in the reasm queue sorted by TSN.
 525         * Walk through the queue sequentially and look for the first
 526         * sequence of fragmented chunks.
 527         */
 528
 529        if (skb_queue_empty(&ulpq->reasm))
 530                return NULL;
 531
 532        last_frag = first_frag = NULL;
 533        retval = NULL;
 534        next_tsn = 0;
 535        is_last = 0;
 536
 537        skb_queue_walk(&ulpq->reasm, pos) {
 538                cevent = sctp_skb2event(pos);
 539                ctsn = cevent->tsn;
 540
 541                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 542                case SCTP_DATA_MIDDLE_FRAG:
 543                        if (!first_frag) {
 544                                first_frag = pos;
 545                                next_tsn = ctsn + 1;
 546                                last_frag = pos;
 547                        } else if (next_tsn == ctsn)
 548                                next_tsn++;
 549                        else
 550                                goto done;
 551                        break;
 552                case SCTP_DATA_LAST_FRAG:
 553                        if (!first_frag)
 554                                first_frag = pos;
 555                        else if (ctsn != next_tsn)
 556                                goto done;
 557                        last_frag = pos;
 558                        is_last = 1;
 559                        goto done;
 560                default:
 561                        return NULL;
 562                }
 563        }
 564
 565        /* We have the reassembled event. There is no need to look
 566         * further.
 567         */
 568done:
 569        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 570        if (retval && is_last)
 571                retval->msg_flags |= MSG_EOR;
 572
 573        return retval;
 574}
 575
 576
 577/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 578 * need reassembling.
 579 */
 580static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 581                                                struct sctp_ulpevent *event)
 582{
 583        struct sctp_ulpevent *retval = NULL;
 584
 585        /* Check if this is part of a fragmented message.  */
 586        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 587                event->msg_flags |= MSG_EOR;
 588                return event;
 589        }
 590
 591        sctp_ulpq_store_reasm(ulpq, event);
 592        if (!ulpq->pd_mode)
 593                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 594        else {
 595                __u32 ctsn, ctsnap;
 596
 597                /* Do not even bother unless this is the next tsn to
 598                 * be delivered.
 599                 */
 600                ctsn = event->tsn;
 601                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 602                if (TSN_lte(ctsn, ctsnap))
 603                        retval = sctp_ulpq_retrieve_partial(ulpq);
 604        }
 605
 606        return retval;
 607}
 608
 609/* Retrieve the first part (sequential fragments) for partial delivery.  */
 610static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 611{
 612        struct sk_buff *pos, *last_frag, *first_frag;
 613        struct sctp_ulpevent *cevent;
 614        __u32 ctsn, next_tsn;
 615        struct sctp_ulpevent *retval;
 616
 617        /* The chunks are held in the reasm queue sorted by TSN.
 618         * Walk through the queue sequentially and look for a sequence of
 619         * fragmented chunks that start a datagram.
 620         */
 621
 622        if (skb_queue_empty(&ulpq->reasm))
 623                return NULL;
 624
 625        last_frag = first_frag = NULL;
 626        retval = NULL;
 627        next_tsn = 0;
 628
 629        skb_queue_walk(&ulpq->reasm, pos) {
 630                cevent = sctp_skb2event(pos);
 631                ctsn = cevent->tsn;
 632
 633                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 634                case SCTP_DATA_FIRST_FRAG:
 635                        if (!first_frag) {
 636                                first_frag = pos;
 637                                next_tsn = ctsn + 1;
 638                                last_frag = pos;
 639                        } else
 640                                goto done;
 641                        break;
 642
 643                case SCTP_DATA_MIDDLE_FRAG:
 644                        if (!first_frag)
 645                                return NULL;
 646                        if (ctsn == next_tsn) {
 647                                next_tsn++;
 648                                last_frag = pos;
 649                        } else
 650                                goto done;
 651                        break;
 652                default:
 653                        return NULL;
 654                }
 655        }
 656
 657        /* We have the reassembled event. There is no need to look
 658         * further.
 659         */
 660done:
 661        retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 662        return retval;
 663}
 664
 665/*
 666 * Flush out stale fragments from the reassembly queue when processing
 667 * a Forward TSN.
 668 *
 669 * RFC 3758, Section 3.6
 670 *
 671 * After receiving and processing a FORWARD TSN, the data receiver MUST
 672 * take cautions in updating its re-assembly queue.  The receiver MUST
 673 * remove any partially reassembled message, which is still missing one
 674 * or more TSNs earlier than or equal to the new cumulative TSN point.
 675 * In the event that the receiver has invoked the partial delivery API,
 676 * a notification SHOULD also be generated to inform the upper layer API
 677 * that the message being partially delivered will NOT be completed.
 678 */
 679void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 680{
 681        struct sk_buff *pos, *tmp;
 682        struct sctp_ulpevent *event;
 683        __u32 tsn;
 684
 685        if (skb_queue_empty(&ulpq->reasm))
 686                return;
 687
 688        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 689                event = sctp_skb2event(pos);
 690                tsn = event->tsn;
 691
 692                /* Since the entire message must be abandoned by the
 693                 * sender (item A3 in Section 3.5, RFC 3758), we can
 694                 * free all fragments on the list that are less then
 695                 * or equal to ctsn_point
 696                 */
 697                if (TSN_lte(tsn, fwd_tsn)) {
 698                        __skb_unlink(pos, &ulpq->reasm);
 699                        sctp_ulpevent_free(event);
 700                } else
 701                        break;
 702        }
 703}
 704
 705/*
 706 * Drain the reassembly queue.  If we just cleared parted delivery, it
 707 * is possible that the reassembly queue will contain already reassembled
 708 * messages.  Retrieve any such messages and give them to the user.
 709 */
 710static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 711{
 712        struct sctp_ulpevent *event = NULL;
 713        struct sk_buff_head temp;
 714
 715        if (skb_queue_empty(&ulpq->reasm))
 716                return;
 717
 718        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 719                /* Do ordering if needed.  */
 720                if ((event) && (event->msg_flags & MSG_EOR)){
 721                        skb_queue_head_init(&temp);
 722                        __skb_queue_tail(&temp, sctp_event2skb(event));
 723
 724                        event = sctp_ulpq_order(ulpq, event);
 725                }
 726
 727                /* Send event to the ULP.  'event' is the
 728                 * sctp_ulpevent for  very first SKB on the  temp' list.
 729                 */
 730                if (event)
 731                        sctp_ulpq_tail_event(ulpq, event);
 732        }
 733}
 734
 735
 736/* Helper function to gather skbs that have possibly become
 737 * ordered by an an incoming chunk.
 738 */
 739static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 740                                              struct sctp_ulpevent *event)
 741{
 742        struct sk_buff_head *event_list;
 743        struct sk_buff *pos, *tmp;
 744        struct sctp_ulpevent *cevent;
 745        struct sctp_stream *in;
 746        __u16 sid, csid;
 747        __u16 ssn, cssn;
 748
 749        sid = event->stream;
 750        ssn = event->ssn;
 751        in  = &ulpq->asoc->ssnmap->in;
 752
 753        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 754
 755        /* We are holding the chunks by stream, by SSN.  */
 756        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 757                cevent = (struct sctp_ulpevent *) pos->cb;
 758                csid = cevent->stream;
 759                cssn = cevent->ssn;
 760
 761                /* Have we gone too far?  */
 762                if (csid > sid)
 763                        break;
 764
 765                /* Have we not gone far enough?  */
 766                if (csid < sid)
 767                        continue;
 768
 769                if (cssn != sctp_ssn_peek(in, sid))
 770                        break;
 771
 772                /* Found it, so mark in the ssnmap. */
 773                sctp_ssn_next(in, sid);
 774
 775                __skb_unlink(pos, &ulpq->lobby);
 776
 777                /* Attach all gathered skbs to the event.  */
 778                __skb_queue_tail(event_list, pos);
 779        }
 780}
 781
 782/* Helper function to store chunks needing ordering.  */
 783static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 784                                           struct sctp_ulpevent *event)
 785{
 786        struct sk_buff *pos;
 787        struct sctp_ulpevent *cevent;
 788        __u16 sid, csid;
 789        __u16 ssn, cssn;
 790
 791        pos = skb_peek_tail(&ulpq->lobby);
 792        if (!pos) {
 793                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 794                return;
 795        }
 796
 797        sid = event->stream;
 798        ssn = event->ssn;
 799
 800        cevent = (struct sctp_ulpevent *) pos->cb;
 801        csid = cevent->stream;
 802        cssn = cevent->ssn;
 803        if (sid > csid) {
 804                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 805                return;
 806        }
 807
 808        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 809                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 810                return;
 811        }
 812
 813        /* Find the right place in this list.  We store them by
 814         * stream ID and then by SSN.
 815         */
 816        skb_queue_walk(&ulpq->lobby, pos) {
 817                cevent = (struct sctp_ulpevent *) pos->cb;
 818                csid = cevent->stream;
 819                cssn = cevent->ssn;
 820
 821                if (csid > sid)
 822                        break;
 823                if (csid == sid && SSN_lt(ssn, cssn))
 824                        break;
 825        }
 826
 827
 828        /* Insert before pos. */
 829        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 830}
 831
 832static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 833                                             struct sctp_ulpevent *event)
 834{
 835        __u16 sid, ssn;
 836        struct sctp_stream *in;
 837
 838        /* Check if this message needs ordering.  */
 839        if (SCTP_DATA_UNORDERED & event->msg_flags)
 840                return event;
 841
 842        /* Note: The stream ID must be verified before this routine.  */
 843        sid = event->stream;
 844        ssn = event->ssn;
 845        in  = &ulpq->asoc->ssnmap->in;
 846
 847        /* Is this the expected SSN for this stream ID?  */
 848        if (ssn != sctp_ssn_peek(in, sid)) {
 849                /* We've received something out of order, so find where it
 850                 * needs to be placed.  We order by stream and then by SSN.
 851                 */
 852                sctp_ulpq_store_ordered(ulpq, event);
 853                return NULL;
 854        }
 855
 856        /* Mark that the next chunk has been found.  */
 857        sctp_ssn_next(in, sid);
 858
 859        /* Go find any other chunks that were waiting for
 860         * ordering.
 861         */
 862        sctp_ulpq_retrieve_ordered(ulpq, event);
 863
 864        return event;
 865}
 866
 867/* Helper function to gather skbs that have possibly become
 868 * ordered by forward tsn skipping their dependencies.
 869 */
 870static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 871{
 872        struct sk_buff *pos, *tmp;
 873        struct sctp_ulpevent *cevent;
 874        struct sctp_ulpevent *event;
 875        struct sctp_stream *in;
 876        struct sk_buff_head temp;
 877        struct sk_buff_head *lobby = &ulpq->lobby;
 878        __u16 csid, cssn;
 879
 880        in  = &ulpq->asoc->ssnmap->in;
 881
 882        /* We are holding the chunks by stream, by SSN.  */
 883        skb_queue_head_init(&temp);
 884        event = NULL;
 885        sctp_skb_for_each(pos, lobby, tmp) {
 886                cevent = (struct sctp_ulpevent *) pos->cb;
 887                csid = cevent->stream;
 888                cssn = cevent->ssn;
 889
 890                /* Have we gone too far?  */
 891                if (csid > sid)
 892                        break;
 893
 894                /* Have we not gone far enough?  */
 895                if (csid < sid)
 896                        continue;
 897
 898                /* see if this ssn has been marked by skipping */
 899                if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 900                        break;
 901
 902                __skb_unlink(pos, lobby);
 903                if (!event)
 904                        /* Create a temporary list to collect chunks on.  */
 905                        event = sctp_skb2event(pos);
 906
 907                /* Attach all gathered skbs to the event.  */
 908                __skb_queue_tail(&temp, pos);
 909        }
 910
 911        /* If we didn't reap any data, see if the next expected SSN
 912         * is next on the queue and if so, use that.
 913         */
 914        if (event == NULL && pos != (struct sk_buff *)lobby) {
 915                cevent = (struct sctp_ulpevent *) pos->cb;
 916                csid = cevent->stream;
 917                cssn = cevent->ssn;
 918
 919                if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 920                        sctp_ssn_next(in, csid);
 921                        __skb_unlink(pos, lobby);
 922                        __skb_queue_tail(&temp, pos);
 923                        event = sctp_skb2event(pos);
 924                }
 925        }
 926
 927        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 928         * very first SKB on the 'temp' list.
 929         */
 930        if (event) {
 931                /* see if we have more ordered that we can deliver */
 932                sctp_ulpq_retrieve_ordered(ulpq, event);
 933                sctp_ulpq_tail_event(ulpq, event);
 934        }
 935}
 936
 937/* Skip over an SSN. This is used during the processing of
 938 * Forwared TSN chunk to skip over the abandoned ordered data
 939 */
 940void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 941{
 942        struct sctp_stream *in;
 943
 944        /* Note: The stream ID must be verified before this routine.  */
 945        in  = &ulpq->asoc->ssnmap->in;
 946
 947        /* Is this an old SSN?  If so ignore. */
 948        if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 949                return;
 950
 951        /* Mark that we are no longer expecting this SSN or lower. */
 952        sctp_ssn_skip(in, sid, ssn);
 953
 954        /* Go find any other chunks that were waiting for
 955         * ordering and deliver them if needed.
 956         */
 957        sctp_ulpq_reap_ordered(ulpq, sid);
 958}
 959
 960static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 961                struct sk_buff_head *list, __u16 needed)
 962{
 963        __u16 freed = 0;
 964        __u32 tsn;
 965        struct sk_buff *skb;
 966        struct sctp_ulpevent *event;
 967        struct sctp_tsnmap *tsnmap;
 968
 969        tsnmap = &ulpq->asoc->peer.tsn_map;
 970
 971        while ((skb = __skb_dequeue_tail(list)) != NULL) {
 972                freed += skb_headlen(skb);
 973                event = sctp_skb2event(skb);
 974                tsn = event->tsn;
 975
 976                sctp_ulpevent_free(event);
 977                sctp_tsnmap_renege(tsnmap, tsn);
 978                if (freed >= needed)
 979                        return freed;
 980        }
 981
 982        return freed;
 983}
 984
 985/* Renege 'needed' bytes from the ordering queue. */
 986static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
 987{
 988        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
 989}
 990
 991/* Renege 'needed' bytes from the reassembly queue. */
 992static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
 993{
 994        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
 995}
 996
 997/* Partial deliver the first message as there is pressure on rwnd. */
 998void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 999                                struct sctp_chunk *chunk,
1000                                gfp_t gfp)
1001{
1002        struct sctp_ulpevent *event;
1003        struct sctp_association *asoc;
1004        struct sctp_sock *sp;
1005
1006        asoc = ulpq->asoc;
1007        sp = sctp_sk(asoc->base.sk);
1008
1009        /* If the association is already in Partial Delivery mode
1010         * we have noting to do.
1011         */
1012        if (ulpq->pd_mode)
1013                return;
1014
1015        /* If the user enabled fragment interleave socket option,
1016         * multiple associations can enter partial delivery.
1017         * Otherwise, we can only enter partial delivery if the
1018         * socket is not in partial deliver mode.
1019         */
1020        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1021                /* Is partial delivery possible?  */
1022                event = sctp_ulpq_retrieve_first(ulpq);
1023                /* Send event to the ULP.   */
1024                if (event) {
1025                        sctp_ulpq_tail_event(ulpq, event);
1026                        sctp_ulpq_set_pd(ulpq);
1027                        return;
1028                }
1029        }
1030}
1031
1032/* Renege some packets to make room for an incoming chunk.  */
1033void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1034                      gfp_t gfp)
1035{
1036        struct sctp_association *asoc;
1037        __u16 needed, freed;
1038
1039        asoc = ulpq->asoc;
1040
1041        if (chunk) {
1042                needed = ntohs(chunk->chunk_hdr->length);
1043                needed -= sizeof(sctp_data_chunk_t);
1044        } else
1045                needed = SCTP_DEFAULT_MAXWINDOW;
1046
1047        freed = 0;
1048
1049        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1050                freed = sctp_ulpq_renege_order(ulpq, needed);
1051                if (freed < needed) {
1052                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1053                }
1054        }
1055        /* If able to free enough room, accept this chunk. */
1056        if (chunk && (freed >= needed)) {
1057                __u32 tsn;
1058                tsn = ntohl(chunk->subh.data_hdr->tsn);
1059                sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
1060                sctp_ulpq_tail_data(ulpq, chunk, gfp);
1061
1062                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1063        }
1064
1065        sk_mem_reclaim(asoc->base.sk);
1066}
1067
1068
1069
1070/* Notify the application if an association is aborted and in
1071 * partial delivery mode.  Send up any pending received messages.
1072 */
1073void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1074{
1075        struct sctp_ulpevent *ev = NULL;
1076        struct sock *sk;
1077
1078        if (!ulpq->pd_mode)
1079                return;
1080
1081        sk = ulpq->asoc->base.sk;
1082        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1083                                       &sctp_sk(sk)->subscribe))
1084                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1085                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1086                                              gfp);
1087        if (ev)
1088                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1089
1090        /* If there is data waiting, send it up the socket now. */
1091        if (sctp_ulpq_clear_pd(ulpq) || ev)
1092                sk->sk_data_ready(sk, 0);
1093}
1094