linux/net/core/iovec.c
<<
>>
Prefs
   1/*
   2 *      iovec manipulation routines.
   3 *
   4 *
   5 *              This program is free software; you can redistribute it and/or
   6 *              modify it under the terms of the GNU General Public License
   7 *              as published by the Free Software Foundation; either version
   8 *              2 of the License, or (at your option) any later version.
   9 *
  10 *      Fixes:
  11 *              Andrew Lunn     :       Errors in iovec copying.
  12 *              Pedro Roque     :       Added memcpy_fromiovecend and
  13 *                                      csum_..._fromiovecend.
  14 *              Andi Kleen      :       fixed error handling for 2.1
  15 *              Alexey Kuznetsov:       2.1 optimisations
  16 *              Andi Kleen      :       Fix csum*fromiovecend for IPv6.
  17 */
  18
  19#include <linux/errno.h>
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/net.h>
  24#include <linux/in6.h>
  25#include <asm/uaccess.h>
  26#include <asm/byteorder.h>
  27#include <net/checksum.h>
  28#include <net/sock.h>
  29
  30/*
  31 *      Verify iovec. The caller must ensure that the iovec is big enough
  32 *      to hold the message iovec.
  33 *
  34 *      Save time not doing access_ok. copy_*_user will make this work
  35 *      in any case.
  36 */
  37
  38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
  39{
  40        int size, ct, err;
  41
  42        if (m->msg_namelen) {
  43                if (mode == VERIFY_READ) {
  44                        void __user *namep;
  45                        namep = (void __user __force *) m->msg_name;
  46                        err = move_addr_to_kernel(namep, m->msg_namelen,
  47                                                  address);
  48                        if (err < 0)
  49                                return err;
  50                }
  51                m->msg_name = address;
  52        } else {
  53                m->msg_name = NULL;
  54        }
  55
  56        size = m->msg_iovlen * sizeof(struct iovec);
  57        if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
  58                return -EFAULT;
  59
  60        m->msg_iov = iov;
  61        err = 0;
  62
  63        for (ct = 0; ct < m->msg_iovlen; ct++) {
  64                size_t len = iov[ct].iov_len;
  65
  66                if (len > INT_MAX - err) {
  67                        len = INT_MAX - err;
  68                        iov[ct].iov_len = len;
  69                }
  70                err += len;
  71        }
  72
  73        return err;
  74}
  75
  76/*
  77 *      Copy kernel to iovec. Returns -EFAULT on error.
  78 *
  79 *      Note: this modifies the original iovec.
  80 */
  81
  82int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
  83{
  84        while (len > 0) {
  85                if (iov->iov_len) {
  86                        int copy = min_t(unsigned int, iov->iov_len, len);
  87                        if (copy_to_user(iov->iov_base, kdata, copy))
  88                                return -EFAULT;
  89                        kdata += copy;
  90                        len -= copy;
  91                        iov->iov_len -= copy;
  92                        iov->iov_base += copy;
  93                }
  94                iov++;
  95        }
  96
  97        return 0;
  98}
  99EXPORT_SYMBOL(memcpy_toiovec);
 100
 101/*
 102 *      Copy kernel to iovec. Returns -EFAULT on error.
 103 */
 104
 105int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
 106                      int offset, int len)
 107{
 108        int copy;
 109        for (; len > 0; ++iov) {
 110                /* Skip over the finished iovecs */
 111                if (unlikely(offset >= iov->iov_len)) {
 112                        offset -= iov->iov_len;
 113                        continue;
 114                }
 115                copy = min_t(unsigned int, iov->iov_len - offset, len);
 116                if (copy_to_user(iov->iov_base + offset, kdata, copy))
 117                        return -EFAULT;
 118                offset = 0;
 119                kdata += copy;
 120                len -= copy;
 121        }
 122
 123        return 0;
 124}
 125EXPORT_SYMBOL(memcpy_toiovecend);
 126
 127/*
 128 *      Copy iovec to kernel. Returns -EFAULT on error.
 129 *
 130 *      Note: this modifies the original iovec.
 131 */
 132
 133int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
 134{
 135        while (len > 0) {
 136                if (iov->iov_len) {
 137                        int copy = min_t(unsigned int, len, iov->iov_len);
 138                        if (copy_from_user(kdata, iov->iov_base, copy))
 139                                return -EFAULT;
 140                        len -= copy;
 141                        kdata += copy;
 142                        iov->iov_base += copy;
 143                        iov->iov_len -= copy;
 144                }
 145                iov++;
 146        }
 147
 148        return 0;
 149}
 150EXPORT_SYMBOL(memcpy_fromiovec);
 151
 152/*
 153 *      Copy iovec from kernel. Returns -EFAULT on error.
 154 */
 155
 156int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
 157                        int offset, int len)
 158{
 159        /* Skip over the finished iovecs */
 160        while (offset >= iov->iov_len) {
 161                offset -= iov->iov_len;
 162                iov++;
 163        }
 164
 165        while (len > 0) {
 166                u8 __user *base = iov->iov_base + offset;
 167                int copy = min_t(unsigned int, len, iov->iov_len - offset);
 168
 169                offset = 0;
 170                if (copy_from_user(kdata, base, copy))
 171                        return -EFAULT;
 172                len -= copy;
 173                kdata += copy;
 174                iov++;
 175        }
 176
 177        return 0;
 178}
 179EXPORT_SYMBOL(memcpy_fromiovecend);
 180
 181/*
 182 *      And now for the all-in-one: copy and checksum from a user iovec
 183 *      directly to a datagram
 184 *      Calls to csum_partial but the last must be in 32 bit chunks
 185 *
 186 *      ip_build_xmit must ensure that when fragmenting only the last
 187 *      call to this function will be unaligned also.
 188 */
 189int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
 190                                 int offset, unsigned int len, __wsum *csump)
 191{
 192        __wsum csum = *csump;
 193        int partial_cnt = 0, err = 0;
 194
 195        /* Skip over the finished iovecs */
 196        while (offset >= iov->iov_len) {
 197                offset -= iov->iov_len;
 198                iov++;
 199        }
 200
 201        while (len > 0) {
 202                u8 __user *base = iov->iov_base + offset;
 203                int copy = min_t(unsigned int, len, iov->iov_len - offset);
 204
 205                offset = 0;
 206
 207                /* There is a remnant from previous iov. */
 208                if (partial_cnt) {
 209                        int par_len = 4 - partial_cnt;
 210
 211                        /* iov component is too short ... */
 212                        if (par_len > copy) {
 213                                if (copy_from_user(kdata, base, copy))
 214                                        goto out_fault;
 215                                kdata += copy;
 216                                base += copy;
 217                                partial_cnt += copy;
 218                                len -= copy;
 219                                iov++;
 220                                if (len)
 221                                        continue;
 222                                *csump = csum_partial(kdata - partial_cnt,
 223                                                         partial_cnt, csum);
 224                                goto out;
 225                        }
 226                        if (copy_from_user(kdata, base, par_len))
 227                                goto out_fault;
 228                        csum = csum_partial(kdata - partial_cnt, 4, csum);
 229                        kdata += par_len;
 230                        base  += par_len;
 231                        copy  -= par_len;
 232                        len   -= par_len;
 233                        partial_cnt = 0;
 234                }
 235
 236                if (len > copy) {
 237                        partial_cnt = copy % 4;
 238                        if (partial_cnt) {
 239                                copy -= partial_cnt;
 240                                if (copy_from_user(kdata + copy, base + copy,
 241                                                partial_cnt))
 242                                        goto out_fault;
 243                        }
 244                }
 245
 246                if (copy) {
 247                        csum = csum_and_copy_from_user(base, kdata, copy,
 248                                                        csum, &err);
 249                        if (err)
 250                                goto out;
 251                }
 252                len   -= copy + partial_cnt;
 253                kdata += copy + partial_cnt;
 254                iov++;
 255        }
 256        *csump = csum;
 257out:
 258        return err;
 259
 260out_fault:
 261        err = -EFAULT;
 262        goto out;
 263}
 264EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
 265
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.