linux/lib/scatterlist.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
   3 *
   4 * Scatterlist handling helpers.
   5 *
   6 * This source code is licensed under the GNU General Public License,
   7 * Version 2. See the file COPYING for more details.
   8 */
   9#include <linux/export.h>
  10#include <linux/slab.h>
  11#include <linux/scatterlist.h>
  12#include <linux/highmem.h>
  13#include <linux/kmemleak.h>
  14
  15/**
  16 * sg_next - return the next scatterlist entry in a list
  17 * @sg:         The current sg entry
  18 *
  19 * Description:
  20 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
  21 *   of a chained scatterlist, it could jump to the start of a new
  22 *   scatterlist array.
  23 *
  24 **/
  25struct scatterlist *sg_next(struct scatterlist *sg)
  26{
  27#ifdef CONFIG_DEBUG_SG
  28        BUG_ON(sg->sg_magic != SG_MAGIC);
  29#endif
  30        if (sg_is_last(sg))
  31                return NULL;
  32
  33        sg++;
  34        if (unlikely(sg_is_chain(sg)))
  35                sg = sg_chain_ptr(sg);
  36
  37        return sg;
  38}
  39EXPORT_SYMBOL(sg_next);
  40
  41/**
  42 * sg_last - return the last scatterlist entry in a list
  43 * @sgl:        First entry in the scatterlist
  44 * @nents:      Number of entries in the scatterlist
  45 *
  46 * Description:
  47 *   Should only be used casually, it (currently) scans the entire list
  48 *   to get the last entry.
  49 *
  50 *   Note that the @sgl@ pointer passed in need not be the first one,
  51 *   the important bit is that @nents@ denotes the number of entries that
  52 *   exist from @sgl@.
  53 *
  54 **/
  55struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
  56{
  57#ifndef ARCH_HAS_SG_CHAIN
  58        struct scatterlist *ret = &sgl[nents - 1];
  59#else
  60        struct scatterlist *sg, *ret = NULL;
  61        unsigned int i;
  62
  63        for_each_sg(sgl, sg, nents, i)
  64                ret = sg;
  65
  66#endif
  67#ifdef CONFIG_DEBUG_SG
  68        BUG_ON(sgl[0].sg_magic != SG_MAGIC);
  69        BUG_ON(!sg_is_last(ret));
  70#endif
  71        return ret;
  72}
  73EXPORT_SYMBOL(sg_last);
  74
  75/**
  76 * sg_init_table - Initialize SG table
  77 * @sgl:           The SG table
  78 * @nents:         Number of entries in table
  79 *
  80 * Notes:
  81 *   If this is part of a chained sg table, sg_mark_end() should be
  82 *   used only on the last table part.
  83 *
  84 **/
  85void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  86{
  87        memset(sgl, 0, sizeof(*sgl) * nents);
  88#ifdef CONFIG_DEBUG_SG
  89        {
  90                unsigned int i;
  91                for (i = 0; i < nents; i++)
  92                        sgl[i].sg_magic = SG_MAGIC;
  93        }
  94#endif
  95        sg_mark_end(&sgl[nents - 1]);
  96}
  97EXPORT_SYMBOL(sg_init_table);
  98
  99/**
 100 * sg_init_one - Initialize a single entry sg list
 101 * @sg:          SG entry
 102 * @buf:         Virtual address for IO
 103 * @buflen:      IO length
 104 *
 105 **/
 106void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
 107{
 108        sg_init_table(sg, 1);
 109        sg_set_buf(sg, buf, buflen);
 110}
 111EXPORT_SYMBOL(sg_init_one);
 112
 113/*
 114 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
 115 * helpers.
 116 */
 117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
 118{
 119        if (nents == SG_MAX_SINGLE_ALLOC) {
 120                /*
 121                 * Kmemleak doesn't track page allocations as they are not
 122                 * commonly used (in a raw form) for kernel data structures.
 123                 * As we chain together a list of pages and then a normal
 124                 * kmalloc (tracked by kmemleak), in order to for that last
 125                 * allocation not to become decoupled (and thus a
 126                 * false-positive) we need to inform kmemleak of all the
 127                 * intermediate allocations.
 128                 */
 129                void *ptr = (void *) __get_free_page(gfp_mask);
 130                kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
 131                return ptr;
 132        } else
 133                return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
 134}
 135
 136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
 137{
 138        if (nents == SG_MAX_SINGLE_ALLOC) {
 139                kmemleak_free(sg);
 140                free_page((unsigned long) sg);
 141        } else
 142                kfree(sg);
 143}
 144
 145/**
 146 * __sg_free_table - Free a previously mapped sg table
 147 * @table:      The sg table header to use
 148 * @max_ents:   The maximum number of entries per single scatterlist
 149 * @free_fn:    Free function
 150 *
 151 *  Description:
 152 *    Free an sg table previously allocated and setup with
 153 *    __sg_alloc_table().  The @max_ents value must be identical to
 154 *    that previously used with __sg_alloc_table().
 155 *
 156 **/
 157void __sg_free_table(struct sg_table *table, unsigned int max_ents,
 158                     sg_free_fn *free_fn)
 159{
 160        struct scatterlist *sgl, *next;
 161
 162        if (unlikely(!table->sgl))
 163                return;
 164
 165        sgl = table->sgl;
 166        while (table->orig_nents) {
 167                unsigned int alloc_size = table->orig_nents;
 168                unsigned int sg_size;
 169
 170                /*
 171                 * If we have more than max_ents segments left,
 172                 * then assign 'next' to the sg table after the current one.
 173                 * sg_size is then one less than alloc size, since the last
 174                 * element is the chain pointer.
 175                 */
 176                if (alloc_size > max_ents) {
 177                        next = sg_chain_ptr(&sgl[max_ents - 1]);
 178                        alloc_size = max_ents;
 179                        sg_size = alloc_size - 1;
 180                } else {
 181                        sg_size = alloc_size;
 182                        next = NULL;
 183                }
 184
 185                table->orig_nents -= sg_size;
 186                free_fn(sgl, alloc_size);
 187                sgl = next;
 188        }
 189
 190        table->sgl = NULL;
 191}
 192EXPORT_SYMBOL(__sg_free_table);
 193
 194/**
 195 * sg_free_table - Free a previously allocated sg table
 196 * @table:      The mapped sg table header
 197 *
 198 **/
 199void sg_free_table(struct sg_table *table)
 200{
 201        __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
 202}
 203EXPORT_SYMBOL(sg_free_table);
 204
 205/**
 206 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 207 * @table:      The sg table header to use
 208 * @nents:      Number of entries in sg list
 209 * @max_ents:   The maximum number of entries the allocator returns per call
 210 * @gfp_mask:   GFP allocation mask
 211 * @alloc_fn:   Allocator to use
 212 *
 213 * Description:
 214 *   This function returns a @table @nents long. The allocator is
 215 *   defined to return scatterlist chunks of maximum size @max_ents.
 216 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 217 *   chained in units of @max_ents.
 218 *
 219 * Notes:
 220 *   If this function returns non-0 (eg failure), the caller must call
 221 *   __sg_free_table() to cleanup any leftover allocations.
 222 *
 223 **/
 224int __sg_alloc_table(struct sg_table *table, unsigned int nents,
 225                     unsigned int max_ents, gfp_t gfp_mask,
 226                     sg_alloc_fn *alloc_fn)
 227{
 228        struct scatterlist *sg, *prv;
 229        unsigned int left;
 230
 231#ifndef ARCH_HAS_SG_CHAIN
 232        BUG_ON(nents > max_ents);
 233#endif
 234
 235        memset(table, 0, sizeof(*table));
 236
 237        left = nents;
 238        prv = NULL;
 239        do {
 240                unsigned int sg_size, alloc_size = left;
 241
 242                if (alloc_size > max_ents) {
 243                        alloc_size = max_ents;
 244                        sg_size = alloc_size - 1;
 245                } else
 246                        sg_size = alloc_size;
 247
 248                left -= sg_size;
 249
 250                sg = alloc_fn(alloc_size, gfp_mask);
 251                if (unlikely(!sg)) {
 252                        /*
 253                         * Adjust entry count to reflect that the last
 254                         * entry of the previous table won't be used for
 255                         * linkage.  Without this, sg_kfree() may get
 256                         * confused.
 257                         */
 258                        if (prv)
 259                                table->nents = ++table->orig_nents;
 260
 261                        return -ENOMEM;
 262                }
 263
 264                sg_init_table(sg, alloc_size);
 265                table->nents = table->orig_nents += sg_size;
 266
 267                /*
 268                 * If this is the first mapping, assign the sg table header.
 269                 * If this is not the first mapping, chain previous part.
 270                 */
 271                if (prv)
 272                        sg_chain(prv, max_ents, sg);
 273                else
 274                        table->sgl = sg;
 275
 276                /*
 277                 * If no more entries after this one, mark the end
 278                 */
 279                if (!left)
 280                        sg_mark_end(&sg[sg_size - 1]);
 281
 282                prv = sg;
 283        } while (left);
 284
 285        return 0;
 286}
 287EXPORT_SYMBOL(__sg_alloc_table);
 288
 289/**
 290 * sg_alloc_table - Allocate and initialize an sg table
 291 * @table:      The sg table header to use
 292 * @nents:      Number of entries in sg list
 293 * @gfp_mask:   GFP allocation mask
 294 *
 295 *  Description:
 296 *    Allocate and initialize an sg table. If @nents@ is larger than
 297 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
 298 *
 299 **/
 300int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
 301{
 302        int ret;
 303
 304        ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
 305                               gfp_mask, sg_kmalloc);
 306        if (unlikely(ret))
 307                __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
 308
 309        return ret;
 310}
 311EXPORT_SYMBOL(sg_alloc_table);
 312
 313/**
 314 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
 315 *                             an array of pages
 316 * @sgt:        The sg table header to use
 317 * @pages:      Pointer to an array of page pointers
 318 * @n_pages:    Number of pages in the pages array
 319 * @offset:     Offset from start of the first page to the start of a buffer
 320 * @size:       Number of valid bytes in the buffer (after offset)
 321 * @gfp_mask:   GFP allocation mask
 322 *
 323 *  Description:
 324 *    Allocate and initialize an sg table from a list of pages. Contiguous
 325 *    ranges of the pages are squashed into a single scatterlist node. A user
 326 *    may provide an offset at a start and a size of valid data in a buffer
 327 *    specified by the page array. The returned sg table is released by
 328 *    sg_free_table.
 329 *
 330 * Returns:
 331 *   0 on success, negative error on failure
 332 */
 333int sg_alloc_table_from_pages(struct sg_table *sgt,
 334        struct page **pages, unsigned int n_pages,
 335        unsigned long offset, unsigned long size,
 336        gfp_t gfp_mask)
 337{
 338        unsigned int chunks;
 339        unsigned int i;
 340        unsigned int cur_page;
 341        int ret;
 342        struct scatterlist *s;
 343
 344        /* compute number of contiguous chunks */
 345        chunks = 1;
 346        for (i = 1; i < n_pages; ++i)
 347                if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
 348                        ++chunks;
 349
 350        ret = sg_alloc_table(sgt, chunks, gfp_mask);
 351        if (unlikely(ret))
 352                return ret;
 353
 354        /* merging chunks and putting them into the scatterlist */
 355        cur_page = 0;
 356        for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
 357                unsigned long chunk_size;
 358                unsigned int j;
 359
 360                /* look for the end of the current chunk */
 361                for (j = cur_page + 1; j < n_pages; ++j)
 362                        if (page_to_pfn(pages[j]) !=
 363                            page_to_pfn(pages[j - 1]) + 1)
 364                                break;
 365
 366                chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
 367                sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
 368                size -= chunk_size;
 369                offset = 0;
 370                cur_page = j;
 371        }
 372
 373        return 0;
 374}
 375EXPORT_SYMBOL(sg_alloc_table_from_pages);
 376
 377/**
 378 * sg_miter_start - start mapping iteration over a sg list
 379 * @miter: sg mapping iter to be started
 380 * @sgl: sg list to iterate over
 381 * @nents: number of sg entries
 382 *
 383 * Description:
 384 *   Starts mapping iterator @miter.
 385 *
 386 * Context:
 387 *   Don't care.
 388 */
 389void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 390                    unsigned int nents, unsigned int flags)
 391{
 392        memset(miter, 0, sizeof(struct sg_mapping_iter));
 393
 394        miter->__sg = sgl;
 395        miter->__nents = nents;
 396        miter->__offset = 0;
 397        WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
 398        miter->__flags = flags;
 399}
 400EXPORT_SYMBOL(sg_miter_start);
 401
 402/**
 403 * sg_miter_next - proceed mapping iterator to the next mapping
 404 * @miter: sg mapping iter to proceed
 405 *
 406 * Description:
 407 *   Proceeds @miter@ to the next mapping.  @miter@ should have been
 408 *   started using sg_miter_start().  On successful return,
 409 *   @miter@->page, @miter@->addr and @miter@->length point to the
 410 *   current mapping.
 411 *
 412 * Context:
 413 *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
 414 *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
 415 *
 416 * Returns:
 417 *   true if @miter contains the next mapping.  false if end of sg
 418 *   list is reached.
 419 */
 420bool sg_miter_next(struct sg_mapping_iter *miter)
 421{
 422        unsigned int off, len;
 423
 424        /* check for end and drop resources from the last iteration */
 425        if (!miter->__nents)
 426                return false;
 427
 428        sg_miter_stop(miter);
 429
 430        /* get to the next sg if necessary.  __offset is adjusted by stop */
 431        while (miter->__offset == miter->__sg->length) {
 432                if (--miter->__nents) {
 433                        miter->__sg = sg_next(miter->__sg);
 434                        miter->__offset = 0;
 435                } else
 436                        return false;
 437        }
 438
 439        /* map the next page */
 440        off = miter->__sg->offset + miter->__offset;
 441        len = miter->__sg->length - miter->__offset;
 442
 443        miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
 444        off &= ~PAGE_MASK;
 445        miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
 446        miter->consumed = miter->length;
 447
 448        if (miter->__flags & SG_MITER_ATOMIC)
 449                miter->addr = kmap_atomic(miter->page) + off;
 450        else
 451                miter->addr = kmap(miter->page) + off;
 452
 453        return true;
 454}
 455EXPORT_SYMBOL(sg_miter_next);
 456
 457/**
 458 * sg_miter_stop - stop mapping iteration
 459 * @miter: sg mapping iter to be stopped
 460 *
 461 * Description:
 462 *   Stops mapping iterator @miter.  @miter should have been started
 463 *   started using sg_miter_start().  A stopped iteration can be
 464 *   resumed by calling sg_miter_next() on it.  This is useful when
 465 *   resources (kmap) need to be released during iteration.
 466 *
 467 * Context:
 468 *   IRQ disabled if the SG_MITER_ATOMIC is set.  Don't care otherwise.
 469 */
 470void sg_miter_stop(struct sg_mapping_iter *miter)
 471{
 472        WARN_ON(miter->consumed > miter->length);
 473
 474        /* drop resources from the last iteration */
 475        if (miter->addr) {
 476                miter->__offset += miter->consumed;
 477
 478                if (miter->__flags & SG_MITER_TO_SG)
 479                        flush_kernel_dcache_page(miter->page);
 480
 481                if (miter->__flags & SG_MITER_ATOMIC) {
 482                        WARN_ON(!irqs_disabled());
 483                        kunmap_atomic(miter->addr);
 484                } else
 485                        kunmap(miter->page);
 486
 487                miter->page = NULL;
 488                miter->addr = NULL;
 489                miter->length = 0;
 490                miter->consumed = 0;
 491        }
 492}
 493EXPORT_SYMBOL(sg_miter_stop);
 494
 495/**
 496 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 497 * @sgl:                 The SG list
 498 * @nents:               Number of SG entries
 499 * @buf:                 Where to copy from
 500 * @buflen:              The number of bytes to copy
 501 * @to_buffer:           transfer direction (non zero == from an sg list to a
 502 *                       buffer, 0 == from a buffer to an sg list
 503 *
 504 * Returns the number of copied bytes.
 505 *
 506 **/
 507static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
 508                             void *buf, size_t buflen, int to_buffer)
 509{
 510        unsigned int offset = 0;
 511        struct sg_mapping_iter miter;
 512        unsigned long flags;
 513        unsigned int sg_flags = SG_MITER_ATOMIC;
 514
 515        if (to_buffer)
 516                sg_flags |= SG_MITER_FROM_SG;
 517        else
 518                sg_flags |= SG_MITER_TO_SG;
 519
 520        sg_miter_start(&miter, sgl, nents, sg_flags);
 521
 522        local_irq_save(flags);
 523
 524        while (sg_miter_next(&miter) && offset < buflen) {
 525                unsigned int len;
 526
 527                len = min(miter.length, buflen - offset);
 528
 529                if (to_buffer)
 530                        memcpy(buf + offset, miter.addr, len);
 531                else
 532                        memcpy(miter.addr, buf + offset, len);
 533
 534                offset += len;
 535        }
 536
 537        sg_miter_stop(&miter);
 538
 539        local_irq_restore(flags);
 540        return offset;
 541}
 542
 543/**
 544 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
 545 * @sgl:                 The SG list
 546 * @nents:               Number of SG entries
 547 * @buf:                 Where to copy from
 548 * @buflen:              The number of bytes to copy
 549 *
 550 * Returns the number of copied bytes.
 551 *
 552 **/
 553size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 554                           void *buf, size_t buflen)
 555{
 556        return sg_copy_buffer(sgl, nents, buf, buflen, 0);
 557}
 558EXPORT_SYMBOL(sg_copy_from_buffer);
 559
 560/**
 561 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
 562 * @sgl:                 The SG list
 563 * @nents:               Number of SG entries
 564 * @buf:                 Where to copy to
 565 * @buflen:              The number of bytes to copy
 566 *
 567 * Returns the number of copied bytes.
 568 *
 569 **/
 570size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 571                         void *buf, size_t buflen)
 572{
 573        return sg_copy_buffer(sgl, nents, buf, buflen, 1);
 574}
 575EXPORT_SYMBOL(sg_copy_to_buffer);
 576
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.