linux/net/sunrpc/xdr.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/xdr.c
   3 *
   4 * Generic XDR support.
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/string.h>
  12#include <linux/kernel.h>
  13#include <linux/pagemap.h>
  14#include <linux/errno.h>
  15#include <linux/sunrpc/xdr.h>
  16#include <linux/sunrpc/msg_prot.h>
  17
  18/*
  19 * XDR functions for basic NFS types
  20 */
  21__be32 *
  22xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  23{
  24        unsigned int    quadlen = XDR_QUADLEN(obj->len);
  25
  26        p[quadlen] = 0;         /* zero trailing bytes */
  27        *p++ = htonl(obj->len);
  28        memcpy(p, obj->data, obj->len);
  29        return p + XDR_QUADLEN(obj->len);
  30}
  31EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  32
  33__be32 *
  34xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  35{
  36        unsigned int    len;
  37
  38        if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
  39                return NULL;
  40        obj->len  = len;
  41        obj->data = (u8 *) p;
  42        return p + XDR_QUADLEN(len);
  43}
  44EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  45
  46/**
  47 * xdr_encode_opaque_fixed - Encode fixed length opaque data
  48 * @p: pointer to current position in XDR buffer.
  49 * @ptr: pointer to data to encode (or NULL)
  50 * @nbytes: size of data.
  51 *
  52 * Copy the array of data of length nbytes at ptr to the XDR buffer
  53 * at position p, then align to the next 32-bit boundary by padding
  54 * with zero bytes (see RFC1832).
  55 * Note: if ptr is NULL, only the padding is performed.
  56 *
  57 * Returns the updated current XDR buffer position
  58 *
  59 */
  60__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  61{
  62        if (likely(nbytes != 0)) {
  63                unsigned int quadlen = XDR_QUADLEN(nbytes);
  64                unsigned int padding = (quadlen << 2) - nbytes;
  65
  66                if (ptr != NULL)
  67                        memcpy(p, ptr, nbytes);
  68                if (padding != 0)
  69                        memset((char *)p + nbytes, 0, padding);
  70                p += quadlen;
  71        }
  72        return p;
  73}
  74EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  75
  76/**
  77 * xdr_encode_opaque - Encode variable length opaque data
  78 * @p: pointer to current position in XDR buffer.
  79 * @ptr: pointer to data to encode (or NULL)
  80 * @nbytes: size of data.
  81 *
  82 * Returns the updated current XDR buffer position
  83 */
  84__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  85{
  86        *p++ = htonl(nbytes);
  87        return xdr_encode_opaque_fixed(p, ptr, nbytes);
  88}
  89EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  90
  91__be32 *
  92xdr_encode_string(__be32 *p, const char *string)
  93{
  94        return xdr_encode_array(p, string, strlen(string));
  95}
  96EXPORT_SYMBOL_GPL(xdr_encode_string);
  97
  98__be32 *
  99xdr_decode_string_inplace(__be32 *p, char **sp,
 100                          unsigned int *lenp, unsigned int maxlen)
 101{
 102        u32 len;
 103
 104        len = ntohl(*p++);
 105        if (len > maxlen)
 106                return NULL;
 107        *lenp = len;
 108        *sp = (char *) p;
 109        return p + XDR_QUADLEN(len);
 110}
 111EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
 112
 113void
 114xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
 115                 unsigned int len)
 116{
 117        struct kvec *tail = xdr->tail;
 118        u32 *p;
 119
 120        xdr->pages = pages;
 121        xdr->page_base = base;
 122        xdr->page_len = len;
 123
 124        p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
 125        tail->iov_base = p;
 126        tail->iov_len = 0;
 127
 128        if (len & 3) {
 129                unsigned int pad = 4 - (len & 3);
 130
 131                *p = 0;
 132                tail->iov_base = (char *)p + (len & 3);
 133                tail->iov_len  = pad;
 134                len += pad;
 135        }
 136        xdr->buflen += len;
 137        xdr->len += len;
 138}
 139EXPORT_SYMBOL_GPL(xdr_encode_pages);
 140
 141void
 142xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
 143                 struct page **pages, unsigned int base, unsigned int len)
 144{
 145        struct kvec *head = xdr->head;
 146        struct kvec *tail = xdr->tail;
 147        char *buf = (char *)head->iov_base;
 148        unsigned int buflen = head->iov_len;
 149
 150        head->iov_len  = offset;
 151
 152        xdr->pages = pages;
 153        xdr->page_base = base;
 154        xdr->page_len = len;
 155
 156        tail->iov_base = buf + offset;
 157        tail->iov_len = buflen - offset;
 158
 159        xdr->buflen += len;
 160}
 161EXPORT_SYMBOL_GPL(xdr_inline_pages);
 162
 163/*
 164 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
 165 *
 166 * _shift_data_right_pages
 167 * @pages: vector of pages containing both the source and dest memory area.
 168 * @pgto_base: page vector address of destination
 169 * @pgfrom_base: page vector address of source
 170 * @len: number of bytes to copy
 171 *
 172 * Note: the addresses pgto_base and pgfrom_base are both calculated in
 173 *       the same way:
 174 *            if a memory area starts at byte 'base' in page 'pages[i]',
 175 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
 176 * Also note: pgfrom_base must be < pgto_base, but the memory areas
 177 *      they point to may overlap.
 178 */
 179static void
 180_shift_data_right_pages(struct page **pages, size_t pgto_base,
 181                size_t pgfrom_base, size_t len)
 182{
 183        struct page **pgfrom, **pgto;
 184        char *vfrom, *vto;
 185        size_t copy;
 186
 187        BUG_ON(pgto_base <= pgfrom_base);
 188
 189        pgto_base += len;
 190        pgfrom_base += len;
 191
 192        pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
 193        pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
 194
 195        pgto_base &= ~PAGE_CACHE_MASK;
 196        pgfrom_base &= ~PAGE_CACHE_MASK;
 197
 198        do {
 199                /* Are any pointers crossing a page boundary? */
 200                if (pgto_base == 0) {
 201                        pgto_base = PAGE_CACHE_SIZE;
 202                        pgto--;
 203                }
 204                if (pgfrom_base == 0) {
 205                        pgfrom_base = PAGE_CACHE_SIZE;
 206                        pgfrom--;
 207                }
 208
 209                copy = len;
 210                if (copy > pgto_base)
 211                        copy = pgto_base;
 212                if (copy > pgfrom_base)
 213                        copy = pgfrom_base;
 214                pgto_base -= copy;
 215                pgfrom_base -= copy;
 216
 217                vto = kmap_atomic(*pgto, KM_USER0);
 218                vfrom = kmap_atomic(*pgfrom, KM_USER1);
 219                memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
 220                flush_dcache_page(*pgto);
 221                kunmap_atomic(vfrom, KM_USER1);
 222                kunmap_atomic(vto, KM_USER0);
 223
 224        } while ((len -= copy) != 0);
 225}
 226
 227/*
 228 * _copy_to_pages
 229 * @pages: array of pages
 230 * @pgbase: page vector address of destination
 231 * @p: pointer to source data
 232 * @len: length
 233 *
 234 * Copies data from an arbitrary memory location into an array of pages
 235 * The copy is assumed to be non-overlapping.
 236 */
 237static void
 238_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
 239{
 240        struct page **pgto;
 241        char *vto;
 242        size_t copy;
 243
 244        pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
 245        pgbase &= ~PAGE_CACHE_MASK;
 246
 247        for (;;) {
 248                copy = PAGE_CACHE_SIZE - pgbase;
 249                if (copy > len)
 250                        copy = len;
 251
 252                vto = kmap_atomic(*pgto, KM_USER0);
 253                memcpy(vto + pgbase, p, copy);
 254                kunmap_atomic(vto, KM_USER0);
 255
 256                len -= copy;
 257                if (len == 0)
 258                        break;
 259
 260                pgbase += copy;
 261                if (pgbase == PAGE_CACHE_SIZE) {
 262                        flush_dcache_page(*pgto);
 263                        pgbase = 0;
 264                        pgto++;
 265                }
 266                p += copy;
 267        }
 268        flush_dcache_page(*pgto);
 269}
 270
 271/*
 272 * _copy_from_pages
 273 * @p: pointer to destination
 274 * @pages: array of pages
 275 * @pgbase: offset of source data
 276 * @len: length
 277 *
 278 * Copies data into an arbitrary memory location from an array of pages
 279 * The copy is assumed to be non-overlapping.
 280 */
 281static void
 282_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 283{
 284        struct page **pgfrom;
 285        char *vfrom;
 286        size_t copy;
 287
 288        pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
 289        pgbase &= ~PAGE_CACHE_MASK;
 290
 291        do {
 292                copy = PAGE_CACHE_SIZE - pgbase;
 293                if (copy > len)
 294                        copy = len;
 295
 296                vfrom = kmap_atomic(*pgfrom, KM_USER0);
 297                memcpy(p, vfrom + pgbase, copy);
 298                kunmap_atomic(vfrom, KM_USER0);
 299
 300                pgbase += copy;
 301                if (pgbase == PAGE_CACHE_SIZE) {
 302                        pgbase = 0;
 303                        pgfrom++;
 304                }
 305                p += copy;
 306
 307        } while ((len -= copy) != 0);
 308}
 309
 310/*
 311 * xdr_shrink_bufhead
 312 * @buf: xdr_buf
 313 * @len: bytes to remove from buf->head[0]
 314 *
 315 * Shrinks XDR buffer's header kvec buf->head[0] by
 316 * 'len' bytes. The extra data is not lost, but is instead
 317 * moved into the inlined pages and/or the tail.
 318 */
 319static void
 320xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
 321{
 322        struct kvec *head, *tail;
 323        size_t copy, offs;
 324        unsigned int pglen = buf->page_len;
 325
 326        tail = buf->tail;
 327        head = buf->head;
 328        BUG_ON (len > head->iov_len);
 329
 330        /* Shift the tail first */
 331        if (tail->iov_len != 0) {
 332                if (tail->iov_len > len) {
 333                        copy = tail->iov_len - len;
 334                        memmove((char *)tail->iov_base + len,
 335                                        tail->iov_base, copy);
 336                }
 337                /* Copy from the inlined pages into the tail */
 338                copy = len;
 339                if (copy > pglen)
 340                        copy = pglen;
 341                offs = len - copy;
 342                if (offs >= tail->iov_len)
 343                        copy = 0;
 344                else if (copy > tail->iov_len - offs)
 345                        copy = tail->iov_len - offs;
 346                if (copy != 0)
 347                        _copy_from_pages((char *)tail->iov_base + offs,
 348                                        buf->pages,
 349                                        buf->page_base + pglen + offs - len,
 350                                        copy);
 351                /* Do we also need to copy data from the head into the tail ? */
 352                if (len > pglen) {
 353                        offs = copy = len - pglen;
 354                        if (copy > tail->iov_len)
 355                                copy = tail->iov_len;
 356                        memcpy(tail->iov_base,
 357                                        (char *)head->iov_base +
 358                                        head->iov_len - offs,
 359                                        copy);
 360                }
 361        }
 362        /* Now handle pages */
 363        if (pglen != 0) {
 364                if (pglen > len)
 365                        _shift_data_right_pages(buf->pages,
 366                                        buf->page_base + len,
 367                                        buf->page_base,
 368                                        pglen - len);
 369                copy = len;
 370                if (len > pglen)
 371                        copy = pglen;
 372                _copy_to_pages(buf->pages, buf->page_base,
 373                                (char *)head->iov_base + head->iov_len - len,
 374                                copy);
 375        }
 376        head->iov_len -= len;
 377        buf->buflen -= len;
 378        /* Have we truncated the message? */
 379        if (buf->len > buf->buflen)
 380                buf->len = buf->buflen;
 381}
 382
 383/*
 384 * xdr_shrink_pagelen
 385 * @buf: xdr_buf
 386 * @len: bytes to remove from buf->pages
 387 *
 388 * Shrinks XDR buffer's page array buf->pages by
 389 * 'len' bytes. The extra data is not lost, but is instead
 390 * moved into the tail.
 391 */
 392static void
 393xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
 394{
 395        struct kvec *tail;
 396        size_t copy;
 397        char *p;
 398        unsigned int pglen = buf->page_len;
 399
 400        tail = buf->tail;
 401        BUG_ON (len > pglen);
 402
 403        /* Shift the tail first */
 404        if (tail->iov_len != 0) {
 405                p = (char *)tail->iov_base + len;
 406                if (tail->iov_len > len) {
 407                        copy = tail->iov_len - len;
 408                        memmove(p, tail->iov_base, copy);
 409                } else
 410                        buf->buflen -= len;
 411                /* Copy from the inlined pages into the tail */
 412                copy = len;
 413                if (copy > tail->iov_len)
 414                        copy = tail->iov_len;
 415                _copy_from_pages((char *)tail->iov_base,
 416                                buf->pages, buf->page_base + pglen - len,
 417                                copy);
 418        }
 419        buf->page_len -= len;
 420        buf->buflen -= len;
 421        /* Have we truncated the message? */
 422        if (buf->len > buf->buflen)
 423                buf->len = buf->buflen;
 424}
 425
 426void
 427xdr_shift_buf(struct xdr_buf *buf, size_t len)
 428{
 429        xdr_shrink_bufhead(buf, len);
 430}
 431EXPORT_SYMBOL_GPL(xdr_shift_buf);
 432
 433/**
 434 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
 435 * @xdr: pointer to xdr_stream struct
 436 * @buf: pointer to XDR buffer in which to encode data
 437 * @p: current pointer inside XDR buffer
 438 *
 439 * Note: at the moment the RPC client only passes the length of our
 440 *       scratch buffer in the xdr_buf's header kvec. Previously this
 441 *       meant we needed to call xdr_adjust_iovec() after encoding the
 442 *       data. With the new scheme, the xdr_stream manages the details
 443 *       of the buffer length, and takes care of adjusting the kvec
 444 *       length for us.
 445 */
 446void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 447{
 448        struct kvec *iov = buf->head;
 449        int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
 450
 451        BUG_ON(scratch_len < 0);
 452        xdr->buf = buf;
 453        xdr->iov = iov;
 454        xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
 455        xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
 456        BUG_ON(iov->iov_len > scratch_len);
 457
 458        if (p != xdr->p && p != NULL) {
 459                size_t len;
 460
 461                BUG_ON(p < xdr->p || p > xdr->end);
 462                len = (char *)p - (char *)xdr->p;
 463                xdr->p = p;
 464                buf->len += len;
 465                iov->iov_len += len;
 466        }
 467}
 468EXPORT_SYMBOL_GPL(xdr_init_encode);
 469
 470/**
 471 * xdr_reserve_space - Reserve buffer space for sending
 472 * @xdr: pointer to xdr_stream
 473 * @nbytes: number of bytes to reserve
 474 *
 475 * Checks that we have enough buffer space to encode 'nbytes' more
 476 * bytes of data. If so, update the total xdr_buf length, and
 477 * adjust the length of the current kvec.
 478 */
 479__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
 480{
 481        __be32 *p = xdr->p;
 482        __be32 *q;
 483
 484        /* align nbytes on the next 32-bit boundary */
 485        nbytes += 3;
 486        nbytes &= ~3;
 487        q = p + (nbytes >> 2);
 488        if (unlikely(q > xdr->end || q < p))
 489                return NULL;
 490        xdr->p = q;
 491        xdr->iov->iov_len += nbytes;
 492        xdr->buf->len += nbytes;
 493        return p;
 494}
 495EXPORT_SYMBOL_GPL(xdr_reserve_space);
 496
 497/**
 498 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
 499 * @xdr: pointer to xdr_stream
 500 * @pages: list of pages
 501 * @base: offset of first byte
 502 * @len: length of data in bytes
 503 *
 504 */
 505void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
 506                 unsigned int len)
 507{
 508        struct xdr_buf *buf = xdr->buf;
 509        struct kvec *iov = buf->tail;
 510        buf->pages = pages;
 511        buf->page_base = base;
 512        buf->page_len = len;
 513
 514        iov->iov_base = (char *)xdr->p;
 515        iov->iov_len  = 0;
 516        xdr->iov = iov;
 517
 518        if (len & 3) {
 519                unsigned int pad = 4 - (len & 3);
 520
 521                BUG_ON(xdr->p >= xdr->end);
 522                iov->iov_base = (char *)xdr->p + (len & 3);
 523                iov->iov_len  += pad;
 524                len += pad;
 525                *xdr->p++ = 0;
 526        }
 527        buf->buflen += len;
 528        buf->len += len;
 529}
 530EXPORT_SYMBOL_GPL(xdr_write_pages);
 531
 532/**
 533 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 534 * @xdr: pointer to xdr_stream struct
 535 * @buf: pointer to XDR buffer from which to decode data
 536 * @p: current pointer inside XDR buffer
 537 */
 538void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 539{
 540        struct kvec *iov = buf->head;
 541        unsigned int len = iov->iov_len;
 542
 543        if (len > buf->len)
 544                len = buf->len;
 545        xdr->buf = buf;
 546        xdr->iov = iov;
 547        xdr->p = p;
 548        xdr->end = (__be32 *)((char *)iov->iov_base + len);
 549}
 550EXPORT_SYMBOL_GPL(xdr_init_decode);
 551
 552/**
 553 * xdr_inline_decode - Retrieve non-page XDR data to decode
 554 * @xdr: pointer to xdr_stream struct
 555 * @nbytes: number of bytes of data to decode
 556 *
 557 * Check if the input buffer is long enough to enable us to decode
 558 * 'nbytes' more bytes of data starting at the current position.
 559 * If so return the current pointer, then update the current
 560 * pointer position.
 561 */
 562__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 563{
 564        __be32 *p = xdr->p;
 565        __be32 *q = p + XDR_QUADLEN(nbytes);
 566
 567        if (unlikely(q > xdr->end || q < p))
 568                return NULL;
 569        xdr->p = q;
 570        return p;
 571}
 572EXPORT_SYMBOL_GPL(xdr_inline_decode);
 573
 574/**
 575 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
 576 * @xdr: pointer to xdr_stream struct
 577 * @len: number of bytes of page data
 578 *
 579 * Moves data beyond the current pointer position from the XDR head[] buffer
 580 * into the page list. Any data that lies beyond current position + "len"
 581 * bytes is moved into the XDR tail[].
 582 */
 583void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
 584{
 585        struct xdr_buf *buf = xdr->buf;
 586        struct kvec *iov;
 587        ssize_t shift;
 588        unsigned int end;
 589        int padding;
 590
 591        /* Realign pages to current pointer position */
 592        iov  = buf->head;
 593        shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
 594        if (shift > 0)
 595                xdr_shrink_bufhead(buf, shift);
 596
 597        /* Truncate page data and move it into the tail */
 598        if (buf->page_len > len)
 599                xdr_shrink_pagelen(buf, buf->page_len - len);
 600        padding = (XDR_QUADLEN(len) << 2) - len;
 601        xdr->iov = iov = buf->tail;
 602        /* Compute remaining message length.  */
 603        end = iov->iov_len;
 604        shift = buf->buflen - buf->len;
 605        if (shift < end)
 606                end -= shift;
 607        else if (shift > 0)
 608                end = 0;
 609        /*
 610         * Position current pointer at beginning of tail, and
 611         * set remaining message length.
 612         */
 613        xdr->p = (__be32 *)((char *)iov->iov_base + padding);
 614        xdr->end = (__be32 *)((char *)iov->iov_base + end);
 615}
 616EXPORT_SYMBOL_GPL(xdr_read_pages);
 617
 618/**
 619 * xdr_enter_page - decode data from the XDR page
 620 * @xdr: pointer to xdr_stream struct
 621 * @len: number of bytes of page data
 622 *
 623 * Moves data beyond the current pointer position from the XDR head[] buffer
 624 * into the page list. Any data that lies beyond current position + "len"
 625 * bytes is moved into the XDR tail[]. The current pointer is then
 626 * repositioned at the beginning of the first XDR page.
 627 */
 628void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 629{
 630        char * kaddr = page_address(xdr->buf->pages[0]);
 631        xdr_read_pages(xdr, len);
 632        /*
 633         * Position current pointer at beginning of tail, and
 634         * set remaining message length.
 635         */
 636        if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
 637                len = PAGE_CACHE_SIZE - xdr->buf->page_base;
 638        xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
 639        xdr->end = (__be32 *)((char *)xdr->p + len);
 640}
 641EXPORT_SYMBOL_GPL(xdr_enter_page);
 642
 643static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
 644
 645void
 646xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
 647{
 648        buf->head[0] = *iov;
 649        buf->tail[0] = empty_iov;
 650        buf->page_len = 0;
 651        buf->buflen = buf->len = iov->iov_len;
 652}
 653EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
 654
 655/* Sets subbuf to the portion of buf of length len beginning base bytes
 656 * from the start of buf. Returns -1 if base of length are out of bounds. */
 657int
 658xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 659                        unsigned int base, unsigned int len)
 660{
 661        subbuf->buflen = subbuf->len = len;
 662        if (base < buf->head[0].iov_len) {
 663                subbuf->head[0].iov_base = buf->head[0].iov_base + base;
 664                subbuf->head[0].iov_len = min_t(unsigned int, len,
 665                                                buf->head[0].iov_len - base);
 666                len -= subbuf->head[0].iov_len;
 667                base = 0;
 668        } else {
 669                subbuf->head[0].iov_base = NULL;
 670                subbuf->head[0].iov_len = 0;
 671                base -= buf->head[0].iov_len;
 672        }
 673
 674        if (base < buf->page_len) {
 675                subbuf->page_len = min(buf->page_len - base, len);
 676                base += buf->page_base;
 677                subbuf->page_base = base & ~PAGE_CACHE_MASK;
 678                subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
 679                len -= subbuf->page_len;
 680                base = 0;
 681        } else {
 682                base -= buf->page_len;
 683                subbuf->page_len = 0;
 684        }
 685
 686        if (base < buf->tail[0].iov_len) {
 687                subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
 688                subbuf->tail[0].iov_len = min_t(unsigned int, len,
 689                                                buf->tail[0].iov_len - base);
 690                len -= subbuf->tail[0].iov_len;
 691                base = 0;
 692        } else {
 693                subbuf->tail[0].iov_base = NULL;
 694                subbuf->tail[0].iov_len = 0;
 695                base -= buf->tail[0].iov_len;
 696        }
 697
 698        if (base || len)
 699                return -1;
 700        return 0;
 701}
 702EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 703
 704static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 705{
 706        unsigned int this_len;
 707
 708        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 709        memcpy(obj, subbuf->head[0].iov_base, this_len);
 710        len -= this_len;
 711        obj += this_len;
 712        this_len = min_t(unsigned int, len, subbuf->page_len);
 713        if (this_len)
 714                _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
 715        len -= this_len;
 716        obj += this_len;
 717        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 718        memcpy(obj, subbuf->tail[0].iov_base, this_len);
 719}
 720
 721/* obj is assumed to point to allocated memory of size at least len: */
 722int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 723{
 724        struct xdr_buf subbuf;
 725        int status;
 726
 727        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 728        if (status != 0)
 729                return status;
 730        __read_bytes_from_xdr_buf(&subbuf, obj, len);
 731        return 0;
 732}
 733EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
 734
 735static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 736{
 737        unsigned int this_len;
 738
 739        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 740        memcpy(subbuf->head[0].iov_base, obj, this_len);
 741        len -= this_len;
 742        obj += this_len;
 743        this_len = min_t(unsigned int, len, subbuf->page_len);
 744        if (this_len)
 745                _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
 746        len -= this_len;
 747        obj += this_len;
 748        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 749        memcpy(subbuf->tail[0].iov_base, obj, this_len);
 750}
 751
 752/* obj is assumed to point to allocated memory of size at least len: */
 753int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 754{
 755        struct xdr_buf subbuf;
 756        int status;
 757
 758        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 759        if (status != 0)
 760                return status;
 761        __write_bytes_to_xdr_buf(&subbuf, obj, len);
 762        return 0;
 763}
 764
 765int
 766xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 767{
 768        __be32  raw;
 769        int     status;
 770
 771        status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
 772        if (status)
 773                return status;
 774        *obj = ntohl(raw);
 775        return 0;
 776}
 777EXPORT_SYMBOL_GPL(xdr_decode_word);
 778
 779int
 780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 781{
 782        __be32  raw = htonl(obj);
 783
 784        return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
 785}
 786EXPORT_SYMBOL_GPL(xdr_encode_word);
 787
 788/* If the netobj starting offset bytes from the start of xdr_buf is contained
 789 * entirely in the head or the tail, set object to point to it; otherwise
 790 * try to find space for it at the end of the tail, copy it there, and
 791 * set obj to point to it. */
 792int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 793{
 794        struct xdr_buf subbuf;
 795
 796        if (xdr_decode_word(buf, offset, &obj->len))
 797                return -EFAULT;
 798        if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
 799                return -EFAULT;
 800
 801        /* Is the obj contained entirely in the head? */
 802        obj->data = subbuf.head[0].iov_base;
 803        if (subbuf.head[0].iov_len == obj->len)
 804                return 0;
 805        /* ..or is the obj contained entirely in the tail? */
 806        obj->data = subbuf.tail[0].iov_base;
 807        if (subbuf.tail[0].iov_len == obj->len)
 808                return 0;
 809
 810        /* use end of tail as storage for obj:
 811         * (We don't copy to the beginning because then we'd have
 812         * to worry about doing a potentially overlapping copy.
 813         * This assumes the object is at most half the length of the
 814         * tail.) */
 815        if (obj->len > buf->buflen - buf->len)
 816                return -ENOMEM;
 817        if (buf->tail[0].iov_len != 0)
 818                obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
 819        else
 820                obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
 821        __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
 822        return 0;
 823}
 824EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
 825
 826/* Returns 0 on success, or else a negative error code. */
 827static int
 828xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
 829                 struct xdr_array2_desc *desc, int encode)
 830{
 831        char *elem = NULL, *c;
 832        unsigned int copied = 0, todo, avail_here;
 833        struct page **ppages = NULL;
 834        int err;
 835
 836        if (encode) {
 837                if (xdr_encode_word(buf, base, desc->array_len) != 0)
 838                        return -EINVAL;
 839        } else {
 840                if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
 841                    desc->array_len > desc->array_maxlen ||
 842                    (unsigned long) base + 4 + desc->array_len *
 843                                    desc->elem_size > buf->len)
 844                        return -EINVAL;
 845        }
 846        base += 4;
 847
 848        if (!desc->xcode)
 849                return 0;
 850
 851        todo = desc->array_len * desc->elem_size;
 852
 853        /* process head */
 854        if (todo && base < buf->head->iov_len) {
 855                c = buf->head->iov_base + base;
 856                avail_here = min_t(unsigned int, todo,
 857                                   buf->head->iov_len - base);
 858                todo -= avail_here;
 859
 860                while (avail_here >= desc->elem_size) {
 861                        err = desc->xcode(desc, c);
 862                        if (err)
 863                                goto out;
 864                        c += desc->elem_size;
 865                        avail_here -= desc->elem_size;
 866                }
 867                if (avail_here) {
 868                        if (!elem) {
 869                                elem = kmalloc(desc->elem_size, GFP_KERNEL);
 870                                err = -ENOMEM;
 871                                if (!elem)
 872                                        goto out;
 873                        }
 874                        if (encode) {
 875                                err = desc->xcode(desc, elem);
 876                                if (err)
 877                                        goto out;
 878                                memcpy(c, elem, avail_here);
 879                        } else
 880                                memcpy(elem, c, avail_here);
 881                        copied = avail_here;
 882                }
 883                base = buf->head->iov_len;  /* align to start of pages */
 884        }
 885
 886        /* process pages array */
 887        base -= buf->head->iov_len;
 888        if (todo && base < buf->page_len) {
 889                unsigned int avail_page;
 890
 891                avail_here = min(todo, buf->page_len - base);
 892                todo -= avail_here;
 893
 894                base += buf->page_base;
 895                ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
 896                base &= ~PAGE_CACHE_MASK;
 897                avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
 898                                        avail_here);
 899                c = kmap(*ppages) + base;
 900
 901                while (avail_here) {
 902                        avail_here -= avail_page;
 903                        if (copied || avail_page < desc->elem_size) {
 904                                unsigned int l = min(avail_page,
 905                                        desc->elem_size - copied);
 906                                if (!elem) {
 907                                        elem = kmalloc(desc->elem_size,
 908                                                       GFP_KERNEL);
 909                                        err = -ENOMEM;
 910                                        if (!elem)
 911                                                goto out;
 912                                }
 913                                if (encode) {
 914                                        if (!copied) {
 915                                                err = desc->xcode(desc, elem);
 916                                                if (err)
 917                                                        goto out;
 918                                        }
 919                                        memcpy(c, elem + copied, l);
 920                                        copied += l;
 921                                        if (copied == desc->elem_size)
 922                                                copied = 0;
 923                                } else {
 924                                        memcpy(elem + copied, c, l);
 925                                        copied += l;
 926                                        if (copied == desc->elem_size) {
 927                                                err = desc->xcode(desc, elem);
 928                                                if (err)
 929                                                        goto out;
 930                                                copied = 0;
 931                                        }
 932                                }
 933                                avail_page -= l;
 934                                c += l;
 935                        }
 936                        while (avail_page >= desc->elem_size) {
 937                                err = desc->xcode(desc, c);
 938                                if (err)
 939                                        goto out;
 940                                c += desc->elem_size;
 941                                avail_page -= desc->elem_size;
 942                        }
 943                        if (avail_page) {
 944                                unsigned int l = min(avail_page,
 945                                            desc->elem_size - copied);
 946                                if (!elem) {
 947                                        elem = kmalloc(desc->elem_size,
 948                                                       GFP_KERNEL);
 949                                        err = -ENOMEM;
 950                                        if (!elem)
 951                                                goto out;
 952                                }
 953                                if (encode) {
 954                                        if (!copied) {
 955                                                err = desc->xcode(desc, elem);
 956                                                if (err)
 957                                                        goto out;
 958                                        }
 959                                        memcpy(c, elem + copied, l);
 960                                        copied += l;
 961                                        if (copied == desc->elem_size)
 962                                                copied = 0;
 963                                } else {
 964                                        memcpy(elem + copied, c, l);
 965                                        copied += l;
 966                                        if (copied == desc->elem_size) {
 967                                                err = desc->xcode(desc, elem);
 968                                                if (err)
 969                                                        goto out;
 970                                                copied = 0;
 971                                        }
 972                                }
 973                        }
 974                        if (avail_here) {
 975                                kunmap(*ppages);
 976                                ppages++;
 977                                c = kmap(*ppages);
 978                        }
 979
 980                        avail_page = min(avail_here,
 981                                 (unsigned int) PAGE_CACHE_SIZE);
 982                }
 983                base = buf->page_len;  /* align to start of tail */
 984        }
 985
 986        /* process tail */
 987        base -= buf->page_len;
 988        if (todo) {
 989                c = buf->tail->iov_base + base;
 990                if (copied) {
 991                        unsigned int l = desc->elem_size - copied;
 992
 993                        if (encode)
 994                                memcpy(c, elem + copied, l);
 995                        else {
 996                                memcpy(elem + copied, c, l);
 997                                err = desc->xcode(desc, elem);
 998                                if (err)
 999                                        goto out;
1000                        }
1001                        todo -= l;
1002                        c += l;
1003                }
1004                while (todo) {
1005                        err = desc->xcode(desc, c);
1006                        if (err)
1007                                goto out;
1008                        c += desc->elem_size;
1009                        todo -= desc->elem_size;
1010                }
1011        }
1012        err = 0;
1013
1014out:
1015        kfree(elem);
1016        if (ppages)
1017                kunmap(*ppages);
1018        return err;
1019}
1020
1021int
1022xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1023                  struct xdr_array2_desc *desc)
1024{
1025        if (base >= buf->len)
1026                return -EINVAL;
1027
1028        return xdr_xcode_array2(buf, base, desc, 0);
1029}
1030EXPORT_SYMBOL_GPL(xdr_decode_array2);
1031
1032int
1033xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1034                  struct xdr_array2_desc *desc)
1035{
1036        if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1037            buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1038                return -EINVAL;
1039
1040        return xdr_xcode_array2(buf, base, desc, 1);
1041}
1042EXPORT_SYMBOL_GPL(xdr_encode_array2);
1043
1044int
1045xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1046                int (*actor)(struct scatterlist *, void *), void *data)
1047{
1048        int i, ret = 0;
1049        unsigned page_len, thislen, page_offset;
1050        struct scatterlist      sg[1];
1051
1052        sg_init_table(sg, 1);
1053
1054        if (offset >= buf->head[0].iov_len) {
1055                offset -= buf->head[0].iov_len;
1056        } else {
1057                thislen = buf->head[0].iov_len - offset;
1058                if (thislen > len)
1059                        thislen = len;
1060                sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1061                ret = actor(sg, data);
1062                if (ret)
1063                        goto out;
1064                offset = 0;
1065                len -= thislen;
1066        }
1067        if (len == 0)
1068                goto out;
1069
1070        if (offset >= buf->page_len) {
1071                offset -= buf->page_len;
1072        } else {
1073                page_len = buf->page_len - offset;
1074                if (page_len > len)
1075                        page_len = len;
1076                len -= page_len;
1077                page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1078                i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1079                thislen = PAGE_CACHE_SIZE - page_offset;
1080                do {
1081                        if (thislen > page_len)
1082                                thislen = page_len;
1083                        sg_set_page(sg, buf->pages[i], thislen, page_offset);
1084                        ret = actor(sg, data);
1085                        if (ret)
1086                                goto out;
1087                        page_len -= thislen;
1088                        i++;
1089                        page_offset = 0;
1090                        thislen = PAGE_CACHE_SIZE;
1091                } while (page_len != 0);
1092                offset = 0;
1093        }
1094        if (len == 0)
1095                goto out;
1096        if (offset < buf->tail[0].iov_len) {
1097                thislen = buf->tail[0].iov_len - offset;
1098                if (thislen > len)
1099                        thislen = len;
1100                sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1101                ret = actor(sg, data);
1102                len -= thislen;
1103        }
1104        if (len != 0)
1105                ret = -EINVAL;
1106out:
1107        return ret;
1108}
1109EXPORT_SYMBOL_GPL(xdr_process_buf);
1110
1111