linux/net/sunrpc/socklib.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/socklib.c
   3 *
   4 * Common socket helper routines for RPC client and server
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/netdevice.h>
  11#include <linux/skbuff.h>
  12#include <linux/types.h>
  13#include <linux/pagemap.h>
  14#include <linux/udp.h>
  15#include <linux/sunrpc/xdr.h>
  16
  17
  18/**
  19 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  20 * @desc: sk_buff copy helper
  21 * @to: copy destination
  22 * @len: number of bytes to copy
  23 *
  24 * Possibly called several times to iterate over an sk_buff and copy
  25 * data out of it.
  26 */
  27size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  28{
  29        if (len > desc->count)
  30                len = desc->count;
  31        if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
  32                return 0;
  33        desc->count -= len;
  34        desc->offset += len;
  35        return len;
  36}
  37EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
  38
  39/**
  40 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
  41 * @desc: sk_buff copy helper
  42 * @to: copy destination
  43 * @len: number of bytes to copy
  44 *
  45 * Same as skb_read_bits, but calculate a checksum at the same time.
  46 */
  47static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  48{
  49        unsigned int pos;
  50        __wsum csum2;
  51
  52        if (len > desc->count)
  53                len = desc->count;
  54        pos = desc->offset;
  55        csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
  56        desc->csum = csum_block_add(desc->csum, csum2, pos);
  57        desc->count -= len;
  58        desc->offset += len;
  59        return len;
  60}
  61
  62/**
  63 * xdr_partial_copy_from_skb - copy data out of an skb
  64 * @xdr: target XDR buffer
  65 * @base: starting offset
  66 * @desc: sk_buff copy helper
  67 * @copy_actor: virtual method for copying data
  68 *
  69 */
  70ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
  71{
  72        struct page     **ppage = xdr->pages;
  73        unsigned int    len, pglen = xdr->page_len;
  74        ssize_t         copied = 0;
  75        size_t          ret;
  76
  77        len = xdr->head[0].iov_len;
  78        if (base < len) {
  79                len -= base;
  80                ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
  81                copied += ret;
  82                if (ret != len || !desc->count)
  83                        goto out;
  84                base = 0;
  85        } else
  86                base -= len;
  87
  88        if (unlikely(pglen == 0))
  89                goto copy_tail;
  90        if (unlikely(base >= pglen)) {
  91                base -= pglen;
  92                goto copy_tail;
  93        }
  94        if (base || xdr->page_base) {
  95                pglen -= base;
  96                base += xdr->page_base;
  97                ppage += base >> PAGE_CACHE_SHIFT;
  98                base &= ~PAGE_CACHE_MASK;
  99        }
 100        do {
 101                char *kaddr;
 102
 103                /* ACL likes to be lazy in allocating pages - ACLs
 104                 * are small by default but can get huge. */
 105                if (unlikely(*ppage == NULL)) {
 106                        *ppage = alloc_page(GFP_ATOMIC);
 107                        if (unlikely(*ppage == NULL)) {
 108                                if (copied == 0)
 109                                        copied = -ENOMEM;
 110                                goto out;
 111                        }
 112                }
 113
 114                len = PAGE_CACHE_SIZE;
 115                kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
 116                if (base) {
 117                        len -= base;
 118                        if (pglen < len)
 119                                len = pglen;
 120                        ret = copy_actor(desc, kaddr + base, len);
 121                        base = 0;
 122                } else {
 123                        if (pglen < len)
 124                                len = pglen;
 125                        ret = copy_actor(desc, kaddr, len);
 126                }
 127                flush_dcache_page(*ppage);
 128                kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
 129                copied += ret;
 130                if (ret != len || !desc->count)
 131                        goto out;
 132                ppage++;
 133        } while ((pglen -= len) != 0);
 134copy_tail:
 135        len = xdr->tail[0].iov_len;
 136        if (base < len)
 137                copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
 138out:
 139        return copied;
 140}
 141EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
 142
 143/**
 144 * csum_partial_copy_to_xdr - checksum and copy data
 145 * @xdr: target XDR buffer
 146 * @skb: source skb
 147 *
 148 * We have set things up such that we perform the checksum of the UDP
 149 * packet in parallel with the copies into the RPC client iovec.  -DaveM
 150 */
 151int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 152{
 153        struct xdr_skb_reader   desc;
 154
 155        desc.skb = skb;
 156        desc.offset = sizeof(struct udphdr);
 157        desc.count = skb->len - desc.offset;
 158
 159        if (skb_csum_unnecessary(skb))
 160                goto no_checksum;
 161
 162        desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
 163        if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
 164                return -1;
 165        if (desc.offset != skb->len) {
 166                __wsum csum2;
 167                csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
 168                desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
 169        }
 170        if (desc.count)
 171                return -1;
 172        if (csum_fold(desc.csum))
 173                return -1;
 174        if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 175                netdev_rx_csum_fault(skb->dev);
 176        return 0;
 177no_checksum:
 178        if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
 179                return -1;
 180        if (desc.count)
 181                return -1;
 182        return 0;
 183}
 184EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
 185