linux/net/sunrpc/xdr.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/xdr.c
   3 *
   4 * Generic XDR support.
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/pagemap.h>
  15#include <linux/errno.h>
  16#include <linux/sunrpc/xdr.h>
  17#include <linux/sunrpc/msg_prot.h>
  18
  19/*
  20 * XDR functions for basic NFS types
  21 */
  22__be32 *
  23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  24{
  25        unsigned int    quadlen = XDR_QUADLEN(obj->len);
  26
  27        p[quadlen] = 0;         /* zero trailing bytes */
  28        *p++ = cpu_to_be32(obj->len);
  29        memcpy(p, obj->data, obj->len);
  30        return p + XDR_QUADLEN(obj->len);
  31}
  32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  33
  34__be32 *
  35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  36{
  37        unsigned int    len;
  38
  39        if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  40                return NULL;
  41        obj->len  = len;
  42        obj->data = (u8 *) p;
  43        return p + XDR_QUADLEN(len);
  44}
  45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  46
  47/**
  48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
  49 * @p: pointer to current position in XDR buffer.
  50 * @ptr: pointer to data to encode (or NULL)
  51 * @nbytes: size of data.
  52 *
  53 * Copy the array of data of length nbytes at ptr to the XDR buffer
  54 * at position p, then align to the next 32-bit boundary by padding
  55 * with zero bytes (see RFC1832).
  56 * Note: if ptr is NULL, only the padding is performed.
  57 *
  58 * Returns the updated current XDR buffer position
  59 *
  60 */
  61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  62{
  63        if (likely(nbytes != 0)) {
  64                unsigned int quadlen = XDR_QUADLEN(nbytes);
  65                unsigned int padding = (quadlen << 2) - nbytes;
  66
  67                if (ptr != NULL)
  68                        memcpy(p, ptr, nbytes);
  69                if (padding != 0)
  70                        memset((char *)p + nbytes, 0, padding);
  71                p += quadlen;
  72        }
  73        return p;
  74}
  75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  76
  77/**
  78 * xdr_encode_opaque - Encode variable length opaque data
  79 * @p: pointer to current position in XDR buffer.
  80 * @ptr: pointer to data to encode (or NULL)
  81 * @nbytes: size of data.
  82 *
  83 * Returns the updated current XDR buffer position
  84 */
  85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  86{
  87        *p++ = cpu_to_be32(nbytes);
  88        return xdr_encode_opaque_fixed(p, ptr, nbytes);
  89}
  90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  91
  92__be32 *
  93xdr_encode_string(__be32 *p, const char *string)
  94{
  95        return xdr_encode_array(p, string, strlen(string));
  96}
  97EXPORT_SYMBOL_GPL(xdr_encode_string);
  98
  99__be32 *
 100xdr_decode_string_inplace(__be32 *p, char **sp,
 101                          unsigned int *lenp, unsigned int maxlen)
 102{
 103        u32 len;
 104
 105        len = be32_to_cpu(*p++);
 106        if (len > maxlen)
 107                return NULL;
 108        *lenp = len;
 109        *sp = (char *) p;
 110        return p + XDR_QUADLEN(len);
 111}
 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
 113
 114void
 115xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
 116                 unsigned int len)
 117{
 118        struct kvec *tail = xdr->tail;
 119        u32 *p;
 120
 121        xdr->pages = pages;
 122        xdr->page_base = base;
 123        xdr->page_len = len;
 124
 125        p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
 126        tail->iov_base = p;
 127        tail->iov_len = 0;
 128
 129        if (len & 3) {
 130                unsigned int pad = 4 - (len & 3);
 131
 132                *p = 0;
 133                tail->iov_base = (char *)p + (len & 3);
 134                tail->iov_len  = pad;
 135                len += pad;
 136        }
 137        xdr->buflen += len;
 138        xdr->len += len;
 139}
 140EXPORT_SYMBOL_GPL(xdr_encode_pages);
 141
 142void
 143xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
 144                 struct page **pages, unsigned int base, unsigned int len)
 145{
 146        struct kvec *head = xdr->head;
 147        struct kvec *tail = xdr->tail;
 148        char *buf = (char *)head->iov_base;
 149        unsigned int buflen = head->iov_len;
 150
 151        head->iov_len  = offset;
 152
 153        xdr->pages = pages;
 154        xdr->page_base = base;
 155        xdr->page_len = len;
 156
 157        tail->iov_base = buf + offset;
 158        tail->iov_len = buflen - offset;
 159
 160        xdr->buflen += len;
 161}
 162EXPORT_SYMBOL_GPL(xdr_inline_pages);
 163
 164/*
 165 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
 166 *
 167 * _shift_data_right_pages
 168 * @pages: vector of pages containing both the source and dest memory area.
 169 * @pgto_base: page vector address of destination
 170 * @pgfrom_base: page vector address of source
 171 * @len: number of bytes to copy
 172 *
 173 * Note: the addresses pgto_base and pgfrom_base are both calculated in
 174 *       the same way:
 175 *            if a memory area starts at byte 'base' in page 'pages[i]',
 176 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
 177 * Also note: pgfrom_base must be < pgto_base, but the memory areas
 178 *      they point to may overlap.
 179 */
 180static void
 181_shift_data_right_pages(struct page **pages, size_t pgto_base,
 182                size_t pgfrom_base, size_t len)
 183{
 184        struct page **pgfrom, **pgto;
 185        char *vfrom, *vto;
 186        size_t copy;
 187
 188        BUG_ON(pgto_base <= pgfrom_base);
 189
 190        pgto_base += len;
 191        pgfrom_base += len;
 192
 193        pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
 194        pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
 195
 196        pgto_base &= ~PAGE_CACHE_MASK;
 197        pgfrom_base &= ~PAGE_CACHE_MASK;
 198
 199        do {
 200                /* Are any pointers crossing a page boundary? */
 201                if (pgto_base == 0) {
 202                        pgto_base = PAGE_CACHE_SIZE;
 203                        pgto--;
 204                }
 205                if (pgfrom_base == 0) {
 206                        pgfrom_base = PAGE_CACHE_SIZE;
 207                        pgfrom--;
 208                }
 209
 210                copy = len;
 211                if (copy > pgto_base)
 212                        copy = pgto_base;
 213                if (copy > pgfrom_base)
 214                        copy = pgfrom_base;
 215                pgto_base -= copy;
 216                pgfrom_base -= copy;
 217
 218                vto = kmap_atomic(*pgto, KM_USER0);
 219                vfrom = kmap_atomic(*pgfrom, KM_USER1);
 220                memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
 221                flush_dcache_page(*pgto);
 222                kunmap_atomic(vfrom, KM_USER1);
 223                kunmap_atomic(vto, KM_USER0);
 224
 225        } while ((len -= copy) != 0);
 226}
 227
 228/*
 229 * _copy_to_pages
 230 * @pages: array of pages
 231 * @pgbase: page vector address of destination
 232 * @p: pointer to source data
 233 * @len: length
 234 *
 235 * Copies data from an arbitrary memory location into an array of pages
 236 * The copy is assumed to be non-overlapping.
 237 */
 238static void
 239_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
 240{
 241        struct page **pgto;
 242        char *vto;
 243        size_t copy;
 244
 245        pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
 246        pgbase &= ~PAGE_CACHE_MASK;
 247
 248        for (;;) {
 249                copy = PAGE_CACHE_SIZE - pgbase;
 250                if (copy > len)
 251                        copy = len;
 252
 253                vto = kmap_atomic(*pgto, KM_USER0);
 254                memcpy(vto + pgbase, p, copy);
 255                kunmap_atomic(vto, KM_USER0);
 256
 257                len -= copy;
 258                if (len == 0)
 259                        break;
 260
 261                pgbase += copy;
 262                if (pgbase == PAGE_CACHE_SIZE) {
 263                        flush_dcache_page(*pgto);
 264                        pgbase = 0;
 265                        pgto++;
 266                }
 267                p += copy;
 268        }
 269        flush_dcache_page(*pgto);
 270}
 271
 272/*
 273 * _copy_from_pages
 274 * @p: pointer to destination
 275 * @pages: array of pages
 276 * @pgbase: offset of source data
 277 * @len: length
 278 *
 279 * Copies data into an arbitrary memory location from an array of pages
 280 * The copy is assumed to be non-overlapping.
 281 */
 282static void
 283_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 284{
 285        struct page **pgfrom;
 286        char *vfrom;
 287        size_t copy;
 288
 289        pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
 290        pgbase &= ~PAGE_CACHE_MASK;
 291
 292        do {
 293                copy = PAGE_CACHE_SIZE - pgbase;
 294                if (copy > len)
 295                        copy = len;
 296
 297                vfrom = kmap_atomic(*pgfrom, KM_USER0);
 298                memcpy(p, vfrom + pgbase, copy);
 299                kunmap_atomic(vfrom, KM_USER0);
 300
 301                pgbase += copy;
 302                if (pgbase == PAGE_CACHE_SIZE) {
 303                        pgbase = 0;
 304                        pgfrom++;
 305                }
 306                p += copy;
 307
 308        } while ((len -= copy) != 0);
 309}
 310
 311/*
 312 * xdr_shrink_bufhead
 313 * @buf: xdr_buf
 314 * @len: bytes to remove from buf->head[0]
 315 *
 316 * Shrinks XDR buffer's header kvec buf->head[0] by
 317 * 'len' bytes. The extra data is not lost, but is instead
 318 * moved into the inlined pages and/or the tail.
 319 */
 320static void
 321xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
 322{
 323        struct kvec *head, *tail;
 324        size_t copy, offs;
 325        unsigned int pglen = buf->page_len;
 326
 327        tail = buf->tail;
 328        head = buf->head;
 329        BUG_ON (len > head->iov_len);
 330
 331        /* Shift the tail first */
 332        if (tail->iov_len != 0) {
 333                if (tail->iov_len > len) {
 334                        copy = tail->iov_len - len;
 335                        memmove((char *)tail->iov_base + len,
 336                                        tail->iov_base, copy);
 337                }
 338                /* Copy from the inlined pages into the tail */
 339                copy = len;
 340                if (copy > pglen)
 341                        copy = pglen;
 342                offs = len - copy;
 343                if (offs >= tail->iov_len)
 344                        copy = 0;
 345                else if (copy > tail->iov_len - offs)
 346                        copy = tail->iov_len - offs;
 347                if (copy != 0)
 348                        _copy_from_pages((char *)tail->iov_base + offs,
 349                                        buf->pages,
 350                                        buf->page_base + pglen + offs - len,
 351                                        copy);
 352                /* Do we also need to copy data from the head into the tail ? */
 353                if (len > pglen) {
 354                        offs = copy = len - pglen;
 355                        if (copy > tail->iov_len)
 356                                copy = tail->iov_len;
 357                        memcpy(tail->iov_base,
 358                                        (char *)head->iov_base +
 359                                        head->iov_len - offs,
 360                                        copy);
 361                }
 362        }
 363        /* Now handle pages */
 364        if (pglen != 0) {
 365                if (pglen > len)
 366                        _shift_data_right_pages(buf->pages,
 367                                        buf->page_base + len,
 368                                        buf->page_base,
 369                                        pglen - len);
 370                copy = len;
 371                if (len > pglen)
 372                        copy = pglen;
 373                _copy_to_pages(buf->pages, buf->page_base,
 374                                (char *)head->iov_base + head->iov_len - len,
 375                                copy);
 376        }
 377        head->iov_len -= len;
 378        buf->buflen -= len;
 379        /* Have we truncated the message? */
 380        if (buf->len > buf->buflen)
 381                buf->len = buf->buflen;
 382}
 383
 384/*
 385 * xdr_shrink_pagelen
 386 * @buf: xdr_buf
 387 * @len: bytes to remove from buf->pages
 388 *
 389 * Shrinks XDR buffer's page array buf->pages by
 390 * 'len' bytes. The extra data is not lost, but is instead
 391 * moved into the tail.
 392 */
 393static void
 394xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
 395{
 396        struct kvec *tail;
 397        size_t copy;
 398        char *p;
 399        unsigned int pglen = buf->page_len;
 400
 401        tail = buf->tail;
 402        BUG_ON (len > pglen);
 403
 404        /* Shift the tail first */
 405        if (tail->iov_len != 0) {
 406                p = (char *)tail->iov_base + len;
 407                if (tail->iov_len > len) {
 408                        copy = tail->iov_len - len;
 409                        memmove(p, tail->iov_base, copy);
 410                } else
 411                        buf->buflen -= len;
 412                /* Copy from the inlined pages into the tail */
 413                copy = len;
 414                if (copy > tail->iov_len)
 415                        copy = tail->iov_len;
 416                _copy_from_pages((char *)tail->iov_base,
 417                                buf->pages, buf->page_base + pglen - len,
 418                                copy);
 419        }
 420        buf->page_len -= len;
 421        buf->buflen -= len;
 422        /* Have we truncated the message? */
 423        if (buf->len > buf->buflen)
 424                buf->len = buf->buflen;
 425}
 426
 427void
 428xdr_shift_buf(struct xdr_buf *buf, size_t len)
 429{
 430        xdr_shrink_bufhead(buf, len);
 431}
 432EXPORT_SYMBOL_GPL(xdr_shift_buf);
 433
 434/**
 435 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
 436 * @xdr: pointer to xdr_stream struct
 437 * @buf: pointer to XDR buffer in which to encode data
 438 * @p: current pointer inside XDR buffer
 439 *
 440 * Note: at the moment the RPC client only passes the length of our
 441 *       scratch buffer in the xdr_buf's header kvec. Previously this
 442 *       meant we needed to call xdr_adjust_iovec() after encoding the
 443 *       data. With the new scheme, the xdr_stream manages the details
 444 *       of the buffer length, and takes care of adjusting the kvec
 445 *       length for us.
 446 */
 447void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 448{
 449        struct kvec *iov = buf->head;
 450        int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
 451
 452        BUG_ON(scratch_len < 0);
 453        xdr->buf = buf;
 454        xdr->iov = iov;
 455        xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
 456        xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
 457        BUG_ON(iov->iov_len > scratch_len);
 458
 459        if (p != xdr->p && p != NULL) {
 460                size_t len;
 461
 462                BUG_ON(p < xdr->p || p > xdr->end);
 463                len = (char *)p - (char *)xdr->p;
 464                xdr->p = p;
 465                buf->len += len;
 466                iov->iov_len += len;
 467        }
 468}
 469EXPORT_SYMBOL_GPL(xdr_init_encode);
 470
 471/**
 472 * xdr_reserve_space - Reserve buffer space for sending
 473 * @xdr: pointer to xdr_stream
 474 * @nbytes: number of bytes to reserve
 475 *
 476 * Checks that we have enough buffer space to encode 'nbytes' more
 477 * bytes of data. If so, update the total xdr_buf length, and
 478 * adjust the length of the current kvec.
 479 */
 480__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
 481{
 482        __be32 *p = xdr->p;
 483        __be32 *q;
 484
 485        /* align nbytes on the next 32-bit boundary */
 486        nbytes += 3;
 487        nbytes &= ~3;
 488        q = p + (nbytes >> 2);
 489        if (unlikely(q > xdr->end || q < p))
 490                return NULL;
 491        xdr->p = q;
 492        xdr->iov->iov_len += nbytes;
 493        xdr->buf->len += nbytes;
 494        return p;
 495}
 496EXPORT_SYMBOL_GPL(xdr_reserve_space);
 497
 498/**
 499 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
 500 * @xdr: pointer to xdr_stream
 501 * @pages: list of pages
 502 * @base: offset of first byte
 503 * @len: length of data in bytes
 504 *
 505 */
 506void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
 507                 unsigned int len)
 508{
 509        struct xdr_buf *buf = xdr->buf;
 510        struct kvec *iov = buf->tail;
 511        buf->pages = pages;
 512        buf->page_base = base;
 513        buf->page_len = len;
 514
 515        iov->iov_base = (char *)xdr->p;
 516        iov->iov_len  = 0;
 517        xdr->iov = iov;
 518
 519        if (len & 3) {
 520                unsigned int pad = 4 - (len & 3);
 521
 522                BUG_ON(xdr->p >= xdr->end);
 523                iov->iov_base = (char *)xdr->p + (len & 3);
 524                iov->iov_len  += pad;
 525                len += pad;
 526                *xdr->p++ = 0;
 527        }
 528        buf->buflen += len;
 529        buf->len += len;
 530}
 531EXPORT_SYMBOL_GPL(xdr_write_pages);
 532
 533/**
 534 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 535 * @xdr: pointer to xdr_stream struct
 536 * @buf: pointer to XDR buffer from which to decode data
 537 * @p: current pointer inside XDR buffer
 538 */
 539void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 540{
 541        struct kvec *iov = buf->head;
 542        unsigned int len = iov->iov_len;
 543
 544        if (len > buf->len)
 545                len = buf->len;
 546        xdr->buf = buf;
 547        xdr->iov = iov;
 548        xdr->p = p;
 549        xdr->end = (__be32 *)((char *)iov->iov_base + len);
 550}
 551EXPORT_SYMBOL_GPL(xdr_init_decode);
 552
 553/**
 554 * xdr_inline_decode - Retrieve non-page XDR data to decode
 555 * @xdr: pointer to xdr_stream struct
 556 * @nbytes: number of bytes of data to decode
 557 *
 558 * Check if the input buffer is long enough to enable us to decode
 559 * 'nbytes' more bytes of data starting at the current position.
 560 * If so return the current pointer, then update the current
 561 * pointer position.
 562 */
 563__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 564{
 565        __be32 *p = xdr->p;
 566        __be32 *q = p + XDR_QUADLEN(nbytes);
 567
 568        if (unlikely(q > xdr->end || q < p))
 569                return NULL;
 570        xdr->p = q;
 571        return p;
 572}
 573EXPORT_SYMBOL_GPL(xdr_inline_decode);
 574
 575/**
 576 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
 577 * @xdr: pointer to xdr_stream struct
 578 * @len: number of bytes of page data
 579 *
 580 * Moves data beyond the current pointer position from the XDR head[] buffer
 581 * into the page list. Any data that lies beyond current position + "len"
 582 * bytes is moved into the XDR tail[].
 583 */
 584void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
 585{
 586        struct xdr_buf *buf = xdr->buf;
 587        struct kvec *iov;
 588        ssize_t shift;
 589        unsigned int end;
 590        int padding;
 591
 592        /* Realign pages to current pointer position */
 593        iov  = buf->head;
 594        shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
 595        if (shift > 0)
 596                xdr_shrink_bufhead(buf, shift);
 597
 598        /* Truncate page data and move it into the tail */
 599        if (buf->page_len > len)
 600                xdr_shrink_pagelen(buf, buf->page_len - len);
 601        padding = (XDR_QUADLEN(len) << 2) - len;
 602        xdr->iov = iov = buf->tail;
 603        /* Compute remaining message length.  */
 604        end = iov->iov_len;
 605        shift = buf->buflen - buf->len;
 606        if (shift < end)
 607                end -= shift;
 608        else if (shift > 0)
 609                end = 0;
 610        /*
 611         * Position current pointer at beginning of tail, and
 612         * set remaining message length.
 613         */
 614        xdr->p = (__be32 *)((char *)iov->iov_base + padding);
 615        xdr->end = (__be32 *)((char *)iov->iov_base + end);
 616}
 617EXPORT_SYMBOL_GPL(xdr_read_pages);
 618
 619/**
 620 * xdr_enter_page - decode data from the XDR page
 621 * @xdr: pointer to xdr_stream struct
 622 * @len: number of bytes of page data
 623 *
 624 * Moves data beyond the current pointer position from the XDR head[] buffer
 625 * into the page list. Any data that lies beyond current position + "len"
 626 * bytes is moved into the XDR tail[]. The current pointer is then
 627 * repositioned at the beginning of the first XDR page.
 628 */
 629void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 630{
 631        char * kaddr = page_address(xdr->buf->pages[0]);
 632        xdr_read_pages(xdr, len);
 633        /*
 634         * Position current pointer at beginning of tail, and
 635         * set remaining message length.
 636         */
 637        if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
 638                len = PAGE_CACHE_SIZE - xdr->buf->page_base;
 639        xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
 640        xdr->end = (__be32 *)((char *)xdr->p + len);
 641}
 642EXPORT_SYMBOL_GPL(xdr_enter_page);
 643
 644static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
 645
 646void
 647xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
 648{
 649        buf->head[0] = *iov;
 650        buf->tail[0] = empty_iov;
 651        buf->page_len = 0;
 652        buf->buflen = buf->len = iov->iov_len;
 653}
 654EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
 655
 656/* Sets subbuf to the portion of buf of length len beginning base bytes
 657 * from the start of buf. Returns -1 if base of length are out of bounds. */
 658int
 659xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 660                        unsigned int base, unsigned int len)
 661{
 662        subbuf->buflen = subbuf->len = len;
 663        if (base < buf->head[0].iov_len) {
 664                subbuf->head[0].iov_base = buf->head[0].iov_base + base;
 665                subbuf->head[0].iov_len = min_t(unsigned int, len,
 666                                                buf->head[0].iov_len - base);
 667                len -= subbuf->head[0].iov_len;
 668                base = 0;
 669        } else {
 670                subbuf->head[0].iov_base = NULL;
 671                subbuf->head[0].iov_len = 0;
 672                base -= buf->head[0].iov_len;
 673        }
 674
 675        if (base < buf->page_len) {
 676                subbuf->page_len = min(buf->page_len - base, len);
 677                base += buf->page_base;
 678                subbuf->page_base = base & ~PAGE_CACHE_MASK;
 679                subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
 680                len -= subbuf->page_len;
 681                base = 0;
 682        } else {
 683                base -= buf->page_len;
 684                subbuf->page_len = 0;
 685        }
 686
 687        if (base < buf->tail[0].iov_len) {
 688                subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
 689                subbuf->tail[0].iov_len = min_t(unsigned int, len,
 690                                                buf->tail[0].iov_len - base);
 691                len -= subbuf->tail[0].iov_len;
 692                base = 0;
 693        } else {
 694                subbuf->tail[0].iov_base = NULL;
 695                subbuf->tail[0].iov_len = 0;
 696                base -= buf->tail[0].iov_len;
 697        }
 698
 699        if (base || len)
 700                return -1;
 701        return 0;
 702}
 703EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 704
 705static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 706{
 707        unsigned int this_len;
 708
 709        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 710        memcpy(obj, subbuf->head[0].iov_base, this_len);
 711        len -= this_len;
 712        obj += this_len;
 713        this_len = min_t(unsigned int, len, subbuf->page_len);
 714        if (this_len)
 715                _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
 716        len -= this_len;
 717        obj += this_len;
 718        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 719        memcpy(obj, subbuf->tail[0].iov_base, this_len);
 720}
 721
 722/* obj is assumed to point to allocated memory of size at least len: */
 723int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 724{
 725        struct xdr_buf subbuf;
 726        int status;
 727
 728        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 729        if (status != 0)
 730                return status;
 731        __read_bytes_from_xdr_buf(&subbuf, obj, len);
 732        return 0;
 733}
 734EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
 735
 736static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 737{
 738        unsigned int this_len;
 739
 740        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 741        memcpy(subbuf->head[0].iov_base, obj, this_len);
 742        len -= this_len;
 743        obj += this_len;
 744        this_len = min_t(unsigned int, len, subbuf->page_len);
 745        if (this_len)
 746                _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
 747        len -= this_len;
 748        obj += this_len;
 749        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 750        memcpy(subbuf->tail[0].iov_base, obj, this_len);
 751}
 752
 753/* obj is assumed to point to allocated memory of size at least len: */
 754int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 755{
 756        struct xdr_buf subbuf;
 757        int status;
 758
 759        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 760        if (status != 0)
 761                return status;
 762        __write_bytes_to_xdr_buf(&subbuf, obj, len);
 763        return 0;
 764}
 765EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
 766
 767int
 768xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 769{
 770        __be32  raw;
 771        int     status;
 772
 773        status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
 774        if (status)
 775                return status;
 776        *obj = be32_to_cpu(raw);
 777        return 0;
 778}
 779EXPORT_SYMBOL_GPL(xdr_decode_word);
 780
 781int
 782xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 783{
 784        __be32  raw = cpu_to_be32(obj);
 785
 786        return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
 787}
 788EXPORT_SYMBOL_GPL(xdr_encode_word);
 789
 790/* If the netobj starting offset bytes from the start of xdr_buf is contained
 791 * entirely in the head or the tail, set object to point to it; otherwise
 792 * try to find space for it at the end of the tail, copy it there, and
 793 * set obj to point to it. */
 794int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 795{
 796        struct xdr_buf subbuf;
 797
 798        if (xdr_decode_word(buf, offset, &obj->len))
 799                return -EFAULT;
 800        if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
 801                return -EFAULT;
 802
 803        /* Is the obj contained entirely in the head? */
 804        obj->data = subbuf.head[0].iov_base;
 805        if (subbuf.head[0].iov_len == obj->len)
 806                return 0;
 807        /* ..or is the obj contained entirely in the tail? */
 808        obj->data = subbuf.tail[0].iov_base;
 809        if (subbuf.tail[0].iov_len == obj->len)
 810                return 0;
 811
 812        /* use end of tail as storage for obj:
 813         * (We don't copy to the beginning because then we'd have
 814         * to worry about doing a potentially overlapping copy.
 815         * This assumes the object is at most half the length of the
 816         * tail.) */
 817        if (obj->len > buf->buflen - buf->len)
 818                return -ENOMEM;
 819        if (buf->tail[0].iov_len != 0)
 820                obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
 821        else
 822                obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
 823        __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
 824        return 0;
 825}
 826EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
 827
 828/* Returns 0 on success, or else a negative error code. */
 829static int
 830xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
 831                 struct xdr_array2_desc *desc, int encode)
 832{
 833        char *elem = NULL, *c;
 834        unsigned int copied = 0, todo, avail_here;
 835        struct page **ppages = NULL;
 836        int err;
 837
 838        if (encode) {
 839                if (xdr_encode_word(buf, base, desc->array_len) != 0)
 840                        return -EINVAL;
 841        } else {
 842                if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
 843                    desc->array_len > desc->array_maxlen ||
 844                    (unsigned long) base + 4 + desc->array_len *
 845                                    desc->elem_size > buf->len)
 846                        return -EINVAL;
 847        }
 848        base += 4;
 849
 850        if (!desc->xcode)
 851                return 0;
 852
 853        todo = desc->array_len * desc->elem_size;
 854
 855        /* process head */
 856        if (todo && base < buf->head->iov_len) {
 857                c = buf->head->iov_base + base;
 858                avail_here = min_t(unsigned int, todo,
 859                                   buf->head->iov_len - base);
 860                todo -= avail_here;
 861
 862                while (avail_here >= desc->elem_size) {
 863                        err = desc->xcode(desc, c);
 864                        if (err)
 865                                goto out;
 866                        c += desc->elem_size;
 867                        avail_here -= desc->elem_size;
 868                }
 869                if (avail_here) {
 870                        if (!elem) {
 871                                elem = kmalloc(desc->elem_size, GFP_KERNEL);
 872                                err = -ENOMEM;
 873                                if (!elem)
 874                                        goto out;
 875                        }
 876                        if (encode) {
 877                                err = desc->xcode(desc, elem);
 878                                if (err)
 879                                        goto out;
 880                                memcpy(c, elem, avail_here);
 881                        } else
 882                                memcpy(elem, c, avail_here);
 883                        copied = avail_here;
 884                }
 885                base = buf->head->iov_len;  /* align to start of pages */
 886        }
 887
 888        /* process pages array */
 889        base -= buf->head->iov_len;
 890        if (todo && base < buf->page_len) {
 891                unsigned int avail_page;
 892
 893                avail_here = min(todo, buf->page_len - base);
 894                todo -= avail_here;
 895
 896                base += buf->page_base;
 897                ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
 898                base &= ~PAGE_CACHE_MASK;
 899                avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
 900                                        avail_here);
 901                c = kmap(*ppages) + base;
 902
 903                while (avail_here) {
 904                        avail_here -= avail_page;
 905                        if (copied || avail_page < desc->elem_size) {
 906                                unsigned int l = min(avail_page,
 907                                        desc->elem_size - copied);
 908                                if (!elem) {
 909                                        elem = kmalloc(desc->elem_size,
 910                                                       GFP_KERNEL);
 911                                        err = -ENOMEM;
 912                                        if (!elem)
 913                                                goto out;
 914                                }
 915                                if (encode) {
 916                                        if (!copied) {
 917                                                err = desc->xcode(desc, elem);
 918                                                if (err)
 919                                                        goto out;
 920                                        }
 921                                        memcpy(c, elem + copied, l);
 922                                        copied += l;
 923                                        if (copied == desc->elem_size)
 924                                                copied = 0;
 925                                } else {
 926                                        memcpy(elem + copied, c, l);
 927                                        copied += l;
 928                                        if (copied == desc->elem_size) {
 929                                                err = desc->xcode(desc, elem);
 930                                                if (err)
 931                                                        goto out;
 932                                                copied = 0;
 933                                        }
 934                                }
 935                                avail_page -= l;
 936                                c += l;
 937                        }
 938                        while (avail_page >= desc->elem_size) {
 939                                err = desc->xcode(desc, c);
 940                                if (err)
 941                                        goto out;
 942                                c += desc->elem_size;
 943                                avail_page -= desc->elem_size;
 944                        }
 945                        if (avail_page) {
 946                                unsigned int l = min(avail_page,
 947                                            desc->elem_size - copied);
 948                                if (!elem) {
 949                                        elem = kmalloc(desc->elem_size,
 950                                                       GFP_KERNEL);
 951                                        err = -ENOMEM;
 952                                        if (!elem)
 953                                                goto out;
 954                                }
 955                                if (encode) {
 956                                        if (!copied) {
 957                                                err = desc->xcode(desc, elem);
 958                                                if (err)
 959                                                        goto out;
 960                                        }
 961                                        memcpy(c, elem + copied, l);
 962                                        copied += l;
 963                                        if (copied == desc->elem_size)
 964                                                copied = 0;
 965                                } else {
 966                                        memcpy(elem + copied, c, l);
 967                                        copied += l;
 968                                        if (copied == desc->elem_size) {
 969                                                err = desc->xcode(desc, elem);
 970                                                if (err)
 971                                                        goto out;
 972                                                copied = 0;
 973                                        }
 974                                }
 975                        }
 976                        if (avail_here) {
 977                                kunmap(*ppages);
 978                                ppages++;
 979                                c = kmap(*ppages);
 980                        }
 981
 982                        avail_page = min(avail_here,
 983                                 (unsigned int) PAGE_CACHE_SIZE);
 984                }
 985                base = buf->page_len;  /* align to start of tail */
 986        }
 987
 988        /* process tail */
 989        base -= buf->page_len;
 990        if (todo) {
 991                c = buf->tail->iov_base + base;
 992                if (copied) {
 993                        unsigned int l = desc->elem_size - copied;
 994
 995                        if (encode)
 996                                memcpy(c, elem + copied, l);
 997                        else {
 998                                memcpy(elem + copied, c, l);
 999                                err = desc->xcode(desc, elem);
1000                                if (err)
1001                                        goto out;
1002                        }
1003                        todo -= l;
1004                        c += l;
1005                }
1006                while (todo) {
1007                        err = desc->xcode(desc, c);
1008                        if (err)
1009                                goto out;
1010                        c += desc->elem_size;
1011                        todo -= desc->elem_size;
1012                }
1013        }
1014        err = 0;
1015
1016out:
1017        kfree(elem);
1018        if (ppages)
1019                kunmap(*ppages);
1020        return err;
1021}
1022
1023int
1024xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1025                  struct xdr_array2_desc *desc)
1026{
1027        if (base >= buf->len)
1028                return -EINVAL;
1029
1030        return xdr_xcode_array2(buf, base, desc, 0);
1031}
1032EXPORT_SYMBOL_GPL(xdr_decode_array2);
1033
1034int
1035xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1036                  struct xdr_array2_desc *desc)
1037{
1038        if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1039            buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1040                return -EINVAL;
1041
1042        return xdr_xcode_array2(buf, base, desc, 1);
1043}
1044EXPORT_SYMBOL_GPL(xdr_encode_array2);
1045
1046int
1047xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1048                int (*actor)(struct scatterlist *, void *), void *data)
1049{
1050        int i, ret = 0;
1051        unsigned page_len, thislen, page_offset;
1052        struct scatterlist      sg[1];
1053
1054        sg_init_table(sg, 1);
1055
1056        if (offset >= buf->head[0].iov_len) {
1057                offset -= buf->head[0].iov_len;
1058        } else {
1059                thislen = buf->head[0].iov_len - offset;
1060                if (thislen > len)
1061                        thislen = len;
1062                sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1063                ret = actor(sg, data);
1064                if (ret)
1065                        goto out;
1066                offset = 0;
1067                len -= thislen;
1068        }
1069        if (len == 0)
1070                goto out;
1071
1072        if (offset >= buf->page_len) {
1073                offset -= buf->page_len;
1074        } else {
1075                page_len = buf->page_len - offset;
1076                if (page_len > len)
1077                        page_len = len;
1078                len -= page_len;
1079                page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1080                i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1081                thislen = PAGE_CACHE_SIZE - page_offset;
1082                do {
1083                        if (thislen > page_len)
1084                                thislen = page_len;
1085                        sg_set_page(sg, buf->pages[i], thislen, page_offset);
1086                        ret = actor(sg, data);
1087                        if (ret)
1088                                goto out;
1089                        page_len -= thislen;
1090                        i++;
1091                        page_offset = 0;
1092                        thislen = PAGE_CACHE_SIZE;
1093                } while (page_len != 0);
1094                offset = 0;
1095        }
1096        if (len == 0)
1097                goto out;
1098        if (offset < buf->tail[0].iov_len) {
1099                thislen = buf->tail[0].iov_len - offset;
1100                if (thislen > len)
1101                        thislen = len;
1102                sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1103                ret = actor(sg, data);
1104                len -= thislen;
1105        }
1106        if (len != 0)
1107                ret = -EINVAL;
1108out:
1109        return ret;
1110}
1111EXPORT_SYMBOL_GPL(xdr_process_buf);
1112
1113