linux/drivers/dma/dmatest.c
<<
>>
Prefs
   1/*
   2 * DMA Engine test module
   3 *
   4 * Copyright (C) 2007 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/freezer.h>
  14#include <linux/init.h>
  15#include <linux/kthread.h>
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <linux/random.h>
  19#include <linux/slab.h>
  20#include <linux/wait.h>
  21
  22static unsigned int test_buf_size = 16384;
  23module_param(test_buf_size, uint, S_IRUGO);
  24MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  25
  26static char test_channel[20];
  27module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  28MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  29
  30static char test_device[20];
  31module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  32MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  33
  34static unsigned int threads_per_chan = 1;
  35module_param(threads_per_chan, uint, S_IRUGO);
  36MODULE_PARM_DESC(threads_per_chan,
  37                "Number of threads to start per channel (default: 1)");
  38
  39static unsigned int max_channels;
  40module_param(max_channels, uint, S_IRUGO);
  41MODULE_PARM_DESC(max_channels,
  42                "Maximum number of channels to use (default: all)");
  43
  44static unsigned int iterations;
  45module_param(iterations, uint, S_IRUGO);
  46MODULE_PARM_DESC(iterations,
  47                "Iterations before stopping test (default: infinite)");
  48
  49static unsigned int xor_sources = 3;
  50module_param(xor_sources, uint, S_IRUGO);
  51MODULE_PARM_DESC(xor_sources,
  52                "Number of xor source buffers (default: 3)");
  53
  54static unsigned int pq_sources = 3;
  55module_param(pq_sources, uint, S_IRUGO);
  56MODULE_PARM_DESC(pq_sources,
  57                "Number of p+q source buffers (default: 3)");
  58
  59static int timeout = 3000;
  60module_param(timeout, uint, S_IRUGO);
  61MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  62                 "Pass -1 for infinite timeout");
  63
  64/*
  65 * Initialization patterns. All bytes in the source buffer has bit 7
  66 * set, all bytes in the destination buffer has bit 7 cleared.
  67 *
  68 * Bit 6 is set for all bytes which are to be copied by the DMA
  69 * engine. Bit 5 is set for all bytes which are to be overwritten by
  70 * the DMA engine.
  71 *
  72 * The remaining bits are the inverse of a counter which increments by
  73 * one for each byte address.
  74 */
  75#define PATTERN_SRC             0x80
  76#define PATTERN_DST             0x00
  77#define PATTERN_COPY            0x40
  78#define PATTERN_OVERWRITE       0x20
  79#define PATTERN_COUNT_MASK      0x1f
  80
  81struct dmatest_thread {
  82        struct list_head        node;
  83        struct task_struct      *task;
  84        struct dma_chan         *chan;
  85        u8                      **srcs;
  86        u8                      **dsts;
  87        enum dma_transaction_type type;
  88};
  89
  90struct dmatest_chan {
  91        struct list_head        node;
  92        struct dma_chan         *chan;
  93        struct list_head        threads;
  94};
  95
  96/*
  97 * These are protected by dma_list_mutex since they're only used by
  98 * the DMA filter function callback
  99 */
 100static LIST_HEAD(dmatest_channels);
 101static unsigned int nr_channels;
 102
 103static bool dmatest_match_channel(struct dma_chan *chan)
 104{
 105        if (test_channel[0] == '\0')
 106                return true;
 107        return strcmp(dma_chan_name(chan), test_channel) == 0;
 108}
 109
 110static bool dmatest_match_device(struct dma_device *device)
 111{
 112        if (test_device[0] == '\0')
 113                return true;
 114        return strcmp(dev_name(device->dev), test_device) == 0;
 115}
 116
 117static unsigned long dmatest_random(void)
 118{
 119        unsigned long buf;
 120
 121        get_random_bytes(&buf, sizeof(buf));
 122        return buf;
 123}
 124
 125static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
 126{
 127        unsigned int i;
 128        u8 *buf;
 129
 130        for (; (buf = *bufs); bufs++) {
 131                for (i = 0; i < start; i++)
 132                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 133                for ( ; i < start + len; i++)
 134                        buf[i] = PATTERN_SRC | PATTERN_COPY
 135                                | (~i & PATTERN_COUNT_MASK);
 136                for ( ; i < test_buf_size; i++)
 137                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 138                buf++;
 139        }
 140}
 141
 142static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
 143{
 144        unsigned int i;
 145        u8 *buf;
 146
 147        for (; (buf = *bufs); bufs++) {
 148                for (i = 0; i < start; i++)
 149                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 150                for ( ; i < start + len; i++)
 151                        buf[i] = PATTERN_DST | PATTERN_OVERWRITE
 152                                | (~i & PATTERN_COUNT_MASK);
 153                for ( ; i < test_buf_size; i++)
 154                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 155        }
 156}
 157
 158static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
 159                unsigned int counter, bool is_srcbuf)
 160{
 161        u8              diff = actual ^ pattern;
 162        u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
 163        const char      *thread_name = current->comm;
 164
 165        if (is_srcbuf)
 166                pr_warning("%s: srcbuf[0x%x] overwritten!"
 167                                " Expected %02x, got %02x\n",
 168                                thread_name, index, expected, actual);
 169        else if ((pattern & PATTERN_COPY)
 170                        && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
 171                pr_warning("%s: dstbuf[0x%x] not copied!"
 172                                " Expected %02x, got %02x\n",
 173                                thread_name, index, expected, actual);
 174        else if (diff & PATTERN_SRC)
 175                pr_warning("%s: dstbuf[0x%x] was copied!"
 176                                " Expected %02x, got %02x\n",
 177                                thread_name, index, expected, actual);
 178        else
 179                pr_warning("%s: dstbuf[0x%x] mismatch!"
 180                                " Expected %02x, got %02x\n",
 181                                thread_name, index, expected, actual);
 182}
 183
 184static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 185                unsigned int end, unsigned int counter, u8 pattern,
 186                bool is_srcbuf)
 187{
 188        unsigned int i;
 189        unsigned int error_count = 0;
 190        u8 actual;
 191        u8 expected;
 192        u8 *buf;
 193        unsigned int counter_orig = counter;
 194
 195        for (; (buf = *bufs); bufs++) {
 196                counter = counter_orig;
 197                for (i = start; i < end; i++) {
 198                        actual = buf[i];
 199                        expected = pattern | (~counter & PATTERN_COUNT_MASK);
 200                        if (actual != expected) {
 201                                if (error_count < 32)
 202                                        dmatest_mismatch(actual, pattern, i,
 203                                                         counter, is_srcbuf);
 204                                error_count++;
 205                        }
 206                        counter++;
 207                }
 208        }
 209
 210        if (error_count > 32)
 211                pr_warning("%s: %u errors suppressed\n",
 212                        current->comm, error_count - 32);
 213
 214        return error_count;
 215}
 216
 217/* poor man's completion - we want to use wait_event_freezable() on it */
 218struct dmatest_done {
 219        bool                    done;
 220        wait_queue_head_t       *wait;
 221};
 222
 223static void dmatest_callback(void *arg)
 224{
 225        struct dmatest_done *done = arg;
 226
 227        done->done = true;
 228        wake_up_all(done->wait);
 229}
 230
 231static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
 232                             unsigned int count)
 233{
 234        while (count--)
 235                dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
 236}
 237
 238static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
 239                             unsigned int count)
 240{
 241        while (count--)
 242                dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
 243}
 244
 245static unsigned int min_odd(unsigned int x, unsigned int y)
 246{
 247        unsigned int val = min(x, y);
 248
 249        return val % 2 ? val : val - 1;
 250}
 251
 252/*
 253 * This function repeatedly tests DMA transfers of various lengths and
 254 * offsets for a given operation type until it is told to exit by
 255 * kthread_stop(). There may be multiple threads running this function
 256 * in parallel for a single channel, and there may be multiple channels
 257 * being tested in parallel.
 258 *
 259 * Before each test, the source and destination buffer is initialized
 260 * with a known pattern. This pattern is different depending on
 261 * whether it's in an area which is supposed to be copied or
 262 * overwritten, and different in the source and destination buffers.
 263 * So if the DMA engine doesn't copy exactly what we tell it to copy,
 264 * we'll notice.
 265 */
 266static int dmatest_func(void *data)
 267{
 268        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 269        struct dmatest_thread   *thread = data;
 270        struct dmatest_done     done = { .wait = &done_wait };
 271        struct dma_chan         *chan;
 272        struct dma_device       *dev;
 273        const char              *thread_name;
 274        unsigned int            src_off, dst_off, len;
 275        unsigned int            error_count;
 276        unsigned int            failed_tests = 0;
 277        unsigned int            total_tests = 0;
 278        dma_cookie_t            cookie;
 279        enum dma_status         status;
 280        enum dma_ctrl_flags     flags;
 281        u8                      pq_coefs[pq_sources + 1];
 282        int                     ret;
 283        int                     src_cnt;
 284        int                     dst_cnt;
 285        int                     i;
 286
 287        thread_name = current->comm;
 288        set_freezable();
 289
 290        ret = -ENOMEM;
 291
 292        smp_rmb();
 293        chan = thread->chan;
 294        dev = chan->device;
 295        if (thread->type == DMA_MEMCPY)
 296                src_cnt = dst_cnt = 1;
 297        else if (thread->type == DMA_XOR) {
 298                /* force odd to ensure dst = src */
 299                src_cnt = min_odd(xor_sources | 1, dev->max_xor);
 300                dst_cnt = 1;
 301        } else if (thread->type == DMA_PQ) {
 302                /* force odd to ensure dst = src */
 303                src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
 304                dst_cnt = 2;
 305                for (i = 0; i < src_cnt; i++)
 306                        pq_coefs[i] = 1;
 307        } else
 308                goto err_srcs;
 309
 310        thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
 311        if (!thread->srcs)
 312                goto err_srcs;
 313        for (i = 0; i < src_cnt; i++) {
 314                thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
 315                if (!thread->srcs[i])
 316                        goto err_srcbuf;
 317        }
 318        thread->srcs[i] = NULL;
 319
 320        thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
 321        if (!thread->dsts)
 322                goto err_dsts;
 323        for (i = 0; i < dst_cnt; i++) {
 324                thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
 325                if (!thread->dsts[i])
 326                        goto err_dstbuf;
 327        }
 328        thread->dsts[i] = NULL;
 329
 330        set_user_nice(current, 10);
 331
 332        /*
 333         * src buffers are freed by the DMAEngine code with dma_unmap_single()
 334         * dst buffers are freed by ourselves below
 335         */
 336        flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
 337              | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
 338
 339        while (!kthread_should_stop()
 340               && !(iterations && total_tests >= iterations)) {
 341                struct dma_async_tx_descriptor *tx = NULL;
 342                dma_addr_t dma_srcs[src_cnt];
 343                dma_addr_t dma_dsts[dst_cnt];
 344                u8 align = 0;
 345
 346                total_tests++;
 347
 348                /* honor alignment restrictions */
 349                if (thread->type == DMA_MEMCPY)
 350                        align = dev->copy_align;
 351                else if (thread->type == DMA_XOR)
 352                        align = dev->xor_align;
 353                else if (thread->type == DMA_PQ)
 354                        align = dev->pq_align;
 355
 356                if (1 << align > test_buf_size) {
 357                        pr_err("%u-byte buffer too small for %d-byte alignment\n",
 358                               test_buf_size, 1 << align);
 359                        break;
 360                }
 361
 362                len = dmatest_random() % test_buf_size + 1;
 363                len = (len >> align) << align;
 364                if (!len)
 365                        len = 1 << align;
 366                src_off = dmatest_random() % (test_buf_size - len + 1);
 367                dst_off = dmatest_random() % (test_buf_size - len + 1);
 368
 369                src_off = (src_off >> align) << align;
 370                dst_off = (dst_off >> align) << align;
 371
 372                dmatest_init_srcs(thread->srcs, src_off, len);
 373                dmatest_init_dsts(thread->dsts, dst_off, len);
 374
 375                for (i = 0; i < src_cnt; i++) {
 376                        u8 *buf = thread->srcs[i] + src_off;
 377
 378                        dma_srcs[i] = dma_map_single(dev->dev, buf, len,
 379                                                     DMA_TO_DEVICE);
 380                        ret = dma_mapping_error(dev->dev, dma_srcs[i]);
 381                        if (ret) {
 382                                unmap_src(dev->dev, dma_srcs, len, i);
 383                                pr_warn("%s: #%u: mapping error %d with "
 384                                        "src_off=0x%x len=0x%x\n",
 385                                        thread_name, total_tests - 1, ret,
 386                                        src_off, len);
 387                                failed_tests++;
 388                                continue;
 389                        }
 390                }
 391                /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
 392                for (i = 0; i < dst_cnt; i++) {
 393                        dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
 394                                                     test_buf_size,
 395                                                     DMA_BIDIRECTIONAL);
 396                        ret = dma_mapping_error(dev->dev, dma_dsts[i]);
 397                        if (ret) {
 398                                unmap_src(dev->dev, dma_srcs, len, src_cnt);
 399                                unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
 400                                pr_warn("%s: #%u: mapping error %d with "
 401                                        "dst_off=0x%x len=0x%x\n",
 402                                        thread_name, total_tests - 1, ret,
 403                                        dst_off, test_buf_size);
 404                                failed_tests++;
 405                                continue;
 406                        }
 407                }
 408
 409                if (thread->type == DMA_MEMCPY)
 410                        tx = dev->device_prep_dma_memcpy(chan,
 411                                                         dma_dsts[0] + dst_off,
 412                                                         dma_srcs[0], len,
 413                                                         flags);
 414                else if (thread->type == DMA_XOR)
 415                        tx = dev->device_prep_dma_xor(chan,
 416                                                      dma_dsts[0] + dst_off,
 417                                                      dma_srcs, src_cnt,
 418                                                      len, flags);
 419                else if (thread->type == DMA_PQ) {
 420                        dma_addr_t dma_pq[dst_cnt];
 421
 422                        for (i = 0; i < dst_cnt; i++)
 423                                dma_pq[i] = dma_dsts[i] + dst_off;
 424                        tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
 425                                                     src_cnt, pq_coefs,
 426                                                     len, flags);
 427                }
 428
 429                if (!tx) {
 430                        unmap_src(dev->dev, dma_srcs, len, src_cnt);
 431                        unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
 432                        pr_warning("%s: #%u: prep error with src_off=0x%x "
 433                                        "dst_off=0x%x len=0x%x\n",
 434                                        thread_name, total_tests - 1,
 435                                        src_off, dst_off, len);
 436                        msleep(100);
 437                        failed_tests++;
 438                        continue;
 439                }
 440
 441                done.done = false;
 442                tx->callback = dmatest_callback;
 443                tx->callback_param = &done;
 444                cookie = tx->tx_submit(tx);
 445
 446                if (dma_submit_error(cookie)) {
 447                        pr_warning("%s: #%u: submit error %d with src_off=0x%x "
 448                                        "dst_off=0x%x len=0x%x\n",
 449                                        thread_name, total_tests - 1, cookie,
 450                                        src_off, dst_off, len);
 451                        msleep(100);
 452                        failed_tests++;
 453                        continue;
 454                }
 455                dma_async_issue_pending(chan);
 456
 457                wait_event_freezable_timeout(done_wait, done.done,
 458                                             msecs_to_jiffies(timeout));
 459
 460                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 461
 462                if (!done.done) {
 463                        /*
 464                         * We're leaving the timed out dma operation with
 465                         * dangling pointer to done_wait.  To make this
 466                         * correct, we'll need to allocate wait_done for
 467                         * each test iteration and perform "who's gonna
 468                         * free it this time?" dancing.  For now, just
 469                         * leave it dangling.
 470                         */
 471                        pr_warning("%s: #%u: test timed out\n",
 472                                   thread_name, total_tests - 1);
 473                        failed_tests++;
 474                        continue;
 475                } else if (status != DMA_SUCCESS) {
 476                        pr_warning("%s: #%u: got completion callback,"
 477                                   " but status is \'%s\'\n",
 478                                   thread_name, total_tests - 1,
 479                                   status == DMA_ERROR ? "error" : "in progress");
 480                        failed_tests++;
 481                        continue;
 482                }
 483
 484                /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
 485                unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
 486
 487                error_count = 0;
 488
 489                pr_debug("%s: verifying source buffer...\n", thread_name);
 490                error_count += dmatest_verify(thread->srcs, 0, src_off,
 491                                0, PATTERN_SRC, true);
 492                error_count += dmatest_verify(thread->srcs, src_off,
 493                                src_off + len, src_off,
 494                                PATTERN_SRC | PATTERN_COPY, true);
 495                error_count += dmatest_verify(thread->srcs, src_off + len,
 496                                test_buf_size, src_off + len,
 497                                PATTERN_SRC, true);
 498
 499                pr_debug("%s: verifying dest buffer...\n",
 500                                thread->task->comm);
 501                error_count += dmatest_verify(thread->dsts, 0, dst_off,
 502                                0, PATTERN_DST, false);
 503                error_count += dmatest_verify(thread->dsts, dst_off,
 504                                dst_off + len, src_off,
 505                                PATTERN_SRC | PATTERN_COPY, false);
 506                error_count += dmatest_verify(thread->dsts, dst_off + len,
 507                                test_buf_size, dst_off + len,
 508                                PATTERN_DST, false);
 509
 510                if (error_count) {
 511                        pr_warning("%s: #%u: %u errors with "
 512                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 513                                thread_name, total_tests - 1, error_count,
 514                                src_off, dst_off, len);
 515                        failed_tests++;
 516                } else {
 517                        pr_debug("%s: #%u: No errors with "
 518                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 519                                thread_name, total_tests - 1,
 520                                src_off, dst_off, len);
 521                }
 522        }
 523
 524        ret = 0;
 525        for (i = 0; thread->dsts[i]; i++)
 526                kfree(thread->dsts[i]);
 527err_dstbuf:
 528        kfree(thread->dsts);
 529err_dsts:
 530        for (i = 0; thread->srcs[i]; i++)
 531                kfree(thread->srcs[i]);
 532err_srcbuf:
 533        kfree(thread->srcs);
 534err_srcs:
 535        pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
 536                        thread_name, total_tests, failed_tests, ret);
 537
 538        /* terminate all transfers on specified channels */
 539        if (ret)
 540                dmaengine_terminate_all(chan);
 541
 542        if (iterations > 0)
 543                while (!kthread_should_stop()) {
 544                        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
 545                        interruptible_sleep_on(&wait_dmatest_exit);
 546                }
 547
 548        return ret;
 549}
 550
 551static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
 552{
 553        struct dmatest_thread   *thread;
 554        struct dmatest_thread   *_thread;
 555        int                     ret;
 556
 557        list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
 558                ret = kthread_stop(thread->task);
 559                pr_debug("dmatest: thread %s exited with status %d\n",
 560                                thread->task->comm, ret);
 561                list_del(&thread->node);
 562                kfree(thread);
 563        }
 564
 565        /* terminate all transfers on specified channels */
 566        dmaengine_terminate_all(dtc->chan);
 567
 568        kfree(dtc);
 569}
 570
 571static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
 572{
 573        struct dmatest_thread *thread;
 574        struct dma_chan *chan = dtc->chan;
 575        char *op;
 576        unsigned int i;
 577
 578        if (type == DMA_MEMCPY)
 579                op = "copy";
 580        else if (type == DMA_XOR)
 581                op = "xor";
 582        else if (type == DMA_PQ)
 583                op = "pq";
 584        else
 585                return -EINVAL;
 586
 587        for (i = 0; i < threads_per_chan; i++) {
 588                thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 589                if (!thread) {
 590                        pr_warning("dmatest: No memory for %s-%s%u\n",
 591                                   dma_chan_name(chan), op, i);
 592
 593                        break;
 594                }
 595                thread->chan = dtc->chan;
 596                thread->type = type;
 597                smp_wmb();
 598                thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
 599                                dma_chan_name(chan), op, i);
 600                if (IS_ERR(thread->task)) {
 601                        pr_warning("dmatest: Failed to run thread %s-%s%u\n",
 602                                        dma_chan_name(chan), op, i);
 603                        kfree(thread);
 604                        break;
 605                }
 606
 607                /* srcbuf and dstbuf are allocated by the thread itself */
 608
 609                list_add_tail(&thread->node, &dtc->threads);
 610        }
 611
 612        return i;
 613}
 614
 615static int dmatest_add_channel(struct dma_chan *chan)
 616{
 617        struct dmatest_chan     *dtc;
 618        struct dma_device       *dma_dev = chan->device;
 619        unsigned int            thread_count = 0;
 620        int cnt;
 621
 622        dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 623        if (!dtc) {
 624                pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
 625                return -ENOMEM;
 626        }
 627
 628        dtc->chan = chan;
 629        INIT_LIST_HEAD(&dtc->threads);
 630
 631        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 632                cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
 633                thread_count += cnt > 0 ? cnt : 0;
 634        }
 635        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 636                cnt = dmatest_add_threads(dtc, DMA_XOR);
 637                thread_count += cnt > 0 ? cnt : 0;
 638        }
 639        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 640                cnt = dmatest_add_threads(dtc, DMA_PQ);
 641                thread_count += cnt > 0 ? cnt : 0;
 642        }
 643
 644        pr_info("dmatest: Started %u threads using %s\n",
 645                thread_count, dma_chan_name(chan));
 646
 647        list_add_tail(&dtc->node, &dmatest_channels);
 648        nr_channels++;
 649
 650        return 0;
 651}
 652
 653static bool filter(struct dma_chan *chan, void *param)
 654{
 655        if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
 656                return false;
 657        else
 658                return true;
 659}
 660
 661static int __init dmatest_init(void)
 662{
 663        dma_cap_mask_t mask;
 664        struct dma_chan *chan;
 665        int err = 0;
 666
 667        dma_cap_zero(mask);
 668        dma_cap_set(DMA_MEMCPY, mask);
 669        for (;;) {
 670                chan = dma_request_channel(mask, filter, NULL);
 671                if (chan) {
 672                        err = dmatest_add_channel(chan);
 673                        if (err) {
 674                                dma_release_channel(chan);
 675                                break; /* add_channel failed, punt */
 676                        }
 677                } else
 678                        break; /* no more channels available */
 679                if (max_channels && nr_channels >= max_channels)
 680                        break; /* we have all we need */
 681        }
 682
 683        return err;
 684}
 685/* when compiled-in wait for drivers to load first */
 686late_initcall(dmatest_init);
 687
 688static void __exit dmatest_exit(void)
 689{
 690        struct dmatest_chan *dtc, *_dtc;
 691        struct dma_chan *chan;
 692
 693        list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
 694                list_del(&dtc->node);
 695                chan = dtc->chan;
 696                dmatest_cleanup_channel(dtc);
 697                pr_debug("dmatest: dropped channel %s\n",
 698                         dma_chan_name(chan));
 699                dma_release_channel(chan);
 700        }
 701}
 702module_exit(dmatest_exit);
 703
 704MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 705MODULE_LICENSE("GPL v2");
 706
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.