linux/drivers/dma/dmatest.c
<<
>>
Prefs
   1/*
   2 * DMA Engine test module
   3 *
   4 * Copyright (C) 2007 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/freezer.h>
  14#include <linux/init.h>
  15#include <linux/kthread.h>
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <linux/random.h>
  19#include <linux/slab.h>
  20#include <linux/wait.h>
  21
  22static unsigned int test_buf_size = 16384;
  23module_param(test_buf_size, uint, S_IRUGO);
  24MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  25
  26static char test_channel[20];
  27module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  28MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  29
  30static char test_device[20];
  31module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  32MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  33
  34static unsigned int threads_per_chan = 1;
  35module_param(threads_per_chan, uint, S_IRUGO);
  36MODULE_PARM_DESC(threads_per_chan,
  37                "Number of threads to start per channel (default: 1)");
  38
  39static unsigned int max_channels;
  40module_param(max_channels, uint, S_IRUGO);
  41MODULE_PARM_DESC(max_channels,
  42                "Maximum number of channels to use (default: all)");
  43
  44static unsigned int iterations;
  45module_param(iterations, uint, S_IRUGO);
  46MODULE_PARM_DESC(iterations,
  47                "Iterations before stopping test (default: infinite)");
  48
  49static unsigned int xor_sources = 3;
  50module_param(xor_sources, uint, S_IRUGO);
  51MODULE_PARM_DESC(xor_sources,
  52                "Number of xor source buffers (default: 3)");
  53
  54static unsigned int pq_sources = 3;
  55module_param(pq_sources, uint, S_IRUGO);
  56MODULE_PARM_DESC(pq_sources,
  57                "Number of p+q source buffers (default: 3)");
  58
  59static int timeout = 3000;
  60module_param(timeout, uint, S_IRUGO);
  61MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  62                 "Pass -1 for infinite timeout");
  63
  64/*
  65 * Initialization patterns. All bytes in the source buffer has bit 7
  66 * set, all bytes in the destination buffer has bit 7 cleared.
  67 *
  68 * Bit 6 is set for all bytes which are to be copied by the DMA
  69 * engine. Bit 5 is set for all bytes which are to be overwritten by
  70 * the DMA engine.
  71 *
  72 * The remaining bits are the inverse of a counter which increments by
  73 * one for each byte address.
  74 */
  75#define PATTERN_SRC             0x80
  76#define PATTERN_DST             0x00
  77#define PATTERN_COPY            0x40
  78#define PATTERN_OVERWRITE       0x20
  79#define PATTERN_COUNT_MASK      0x1f
  80
  81struct dmatest_thread {
  82        struct list_head        node;
  83        struct task_struct      *task;
  84        struct dma_chan         *chan;
  85        u8                      **srcs;
  86        u8                      **dsts;
  87        enum dma_transaction_type type;
  88};
  89
  90struct dmatest_chan {
  91        struct list_head        node;
  92        struct dma_chan         *chan;
  93        struct list_head        threads;
  94};
  95
  96/*
  97 * These are protected by dma_list_mutex since they're only used by
  98 * the DMA filter function callback
  99 */
 100static LIST_HEAD(dmatest_channels);
 101static unsigned int nr_channels;
 102
 103static bool dmatest_match_channel(struct dma_chan *chan)
 104{
 105        if (test_channel[0] == '\0')
 106                return true;
 107        return strcmp(dma_chan_name(chan), test_channel) == 0;
 108}
 109
 110static bool dmatest_match_device(struct dma_device *device)
 111{
 112        if (test_device[0] == '\0')
 113                return true;
 114        return strcmp(dev_name(device->dev), test_device) == 0;
 115}
 116
 117static unsigned long dmatest_random(void)
 118{
 119        unsigned long buf;
 120
 121        get_random_bytes(&buf, sizeof(buf));
 122        return buf;
 123}
 124
 125static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
 126{
 127        unsigned int i;
 128        u8 *buf;
 129
 130        for (; (buf = *bufs); bufs++) {
 131                for (i = 0; i < start; i++)
 132                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 133                for ( ; i < start + len; i++)
 134                        buf[i] = PATTERN_SRC | PATTERN_COPY
 135                                | (~i & PATTERN_COUNT_MASK);
 136                for ( ; i < test_buf_size; i++)
 137                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 138                buf++;
 139        }
 140}
 141
 142static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
 143{
 144        unsigned int i;
 145        u8 *buf;
 146
 147        for (; (buf = *bufs); bufs++) {
 148                for (i = 0; i < start; i++)
 149                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 150                for ( ; i < start + len; i++)
 151                        buf[i] = PATTERN_DST | PATTERN_OVERWRITE
 152                                | (~i & PATTERN_COUNT_MASK);
 153                for ( ; i < test_buf_size; i++)
 154                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 155        }
 156}
 157
 158static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
 159                unsigned int counter, bool is_srcbuf)
 160{
 161        u8              diff = actual ^ pattern;
 162        u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
 163        const char      *thread_name = current->comm;
 164
 165        if (is_srcbuf)
 166                pr_warning("%s: srcbuf[0x%x] overwritten!"
 167                                " Expected %02x, got %02x\n",
 168                                thread_name, index, expected, actual);
 169        else if ((pattern & PATTERN_COPY)
 170                        && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
 171                pr_warning("%s: dstbuf[0x%x] not copied!"
 172                                " Expected %02x, got %02x\n",
 173                                thread_name, index, expected, actual);
 174        else if (diff & PATTERN_SRC)
 175                pr_warning("%s: dstbuf[0x%x] was copied!"
 176                                " Expected %02x, got %02x\n",
 177                                thread_name, index, expected, actual);
 178        else
 179                pr_warning("%s: dstbuf[0x%x] mismatch!"
 180                                " Expected %02x, got %02x\n",
 181                                thread_name, index, expected, actual);
 182}
 183
 184static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 185                unsigned int end, unsigned int counter, u8 pattern,
 186                bool is_srcbuf)
 187{
 188        unsigned int i;
 189        unsigned int error_count = 0;
 190        u8 actual;
 191        u8 expected;
 192        u8 *buf;
 193        unsigned int counter_orig = counter;
 194
 195        for (; (buf = *bufs); bufs++) {
 196                counter = counter_orig;
 197                for (i = start; i < end; i++) {
 198                        actual = buf[i];
 199                        expected = pattern | (~counter & PATTERN_COUNT_MASK);
 200                        if (actual != expected) {
 201                                if (error_count < 32)
 202                                        dmatest_mismatch(actual, pattern, i,
 203                                                         counter, is_srcbuf);
 204                                error_count++;
 205                        }
 206                        counter++;
 207                }
 208        }
 209
 210        if (error_count > 32)
 211                pr_warning("%s: %u errors suppressed\n",
 212                        current->comm, error_count - 32);
 213
 214        return error_count;
 215}
 216
 217/* poor man's completion - we want to use wait_event_freezable() on it */
 218struct dmatest_done {
 219        bool                    done;
 220        wait_queue_head_t       *wait;
 221};
 222
 223static void dmatest_callback(void *arg)
 224{
 225        struct dmatest_done *done = arg;
 226
 227        done->done = true;
 228        wake_up_all(done->wait);
 229}
 230
 231static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
 232                             unsigned int count)
 233{
 234        while (count--)
 235                dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
 236}
 237
 238static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
 239                             unsigned int count)
 240{
 241        while (count--)
 242                dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
 243}
 244
 245/*
 246 * This function repeatedly tests DMA transfers of various lengths and
 247 * offsets for a given operation type until it is told to exit by
 248 * kthread_stop(). There may be multiple threads running this function
 249 * in parallel for a single channel, and there may be multiple channels
 250 * being tested in parallel.
 251 *
 252 * Before each test, the source and destination buffer is initialized
 253 * with a known pattern. This pattern is different depending on
 254 * whether it's in an area which is supposed to be copied or
 255 * overwritten, and different in the source and destination buffers.
 256 * So if the DMA engine doesn't copy exactly what we tell it to copy,
 257 * we'll notice.
 258 */
 259static int dmatest_func(void *data)
 260{
 261        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 262        struct dmatest_thread   *thread = data;
 263        struct dmatest_done     done = { .wait = &done_wait };
 264        struct dma_chan         *chan;
 265        const char              *thread_name;
 266        unsigned int            src_off, dst_off, len;
 267        unsigned int            error_count;
 268        unsigned int            failed_tests = 0;
 269        unsigned int            total_tests = 0;
 270        dma_cookie_t            cookie;
 271        enum dma_status         status;
 272        enum dma_ctrl_flags     flags;
 273        u8                      pq_coefs[pq_sources + 1];
 274        int                     ret;
 275        int                     src_cnt;
 276        int                     dst_cnt;
 277        int                     i;
 278
 279        thread_name = current->comm;
 280        set_freezable();
 281
 282        ret = -ENOMEM;
 283
 284        smp_rmb();
 285        chan = thread->chan;
 286        if (thread->type == DMA_MEMCPY)
 287                src_cnt = dst_cnt = 1;
 288        else if (thread->type == DMA_XOR) {
 289                src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
 290                dst_cnt = 1;
 291        } else if (thread->type == DMA_PQ) {
 292                src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
 293                dst_cnt = 2;
 294                for (i = 0; i < src_cnt; i++)
 295                        pq_coefs[i] = 1;
 296        } else
 297                goto err_srcs;
 298
 299        thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
 300        if (!thread->srcs)
 301                goto err_srcs;
 302        for (i = 0; i < src_cnt; i++) {
 303                thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
 304                if (!thread->srcs[i])
 305                        goto err_srcbuf;
 306        }
 307        thread->srcs[i] = NULL;
 308
 309        thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
 310        if (!thread->dsts)
 311                goto err_dsts;
 312        for (i = 0; i < dst_cnt; i++) {
 313                thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
 314                if (!thread->dsts[i])
 315                        goto err_dstbuf;
 316        }
 317        thread->dsts[i] = NULL;
 318
 319        set_user_nice(current, 10);
 320
 321        /*
 322         * src buffers are freed by the DMAEngine code with dma_unmap_single()
 323         * dst buffers are freed by ourselves below
 324         */
 325        flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
 326              | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
 327
 328        while (!kthread_should_stop()
 329               && !(iterations && total_tests >= iterations)) {
 330                struct dma_device *dev = chan->device;
 331                struct dma_async_tx_descriptor *tx = NULL;
 332                dma_addr_t dma_srcs[src_cnt];
 333                dma_addr_t dma_dsts[dst_cnt];
 334                u8 align = 0;
 335
 336                total_tests++;
 337
 338                /* honor alignment restrictions */
 339                if (thread->type == DMA_MEMCPY)
 340                        align = dev->copy_align;
 341                else if (thread->type == DMA_XOR)
 342                        align = dev->xor_align;
 343                else if (thread->type == DMA_PQ)
 344                        align = dev->pq_align;
 345
 346                if (1 << align > test_buf_size) {
 347                        pr_err("%u-byte buffer too small for %d-byte alignment\n",
 348                               test_buf_size, 1 << align);
 349                        break;
 350                }
 351
 352                len = dmatest_random() % test_buf_size + 1;
 353                len = (len >> align) << align;
 354                if (!len)
 355                        len = 1 << align;
 356                src_off = dmatest_random() % (test_buf_size - len + 1);
 357                dst_off = dmatest_random() % (test_buf_size - len + 1);
 358
 359                src_off = (src_off >> align) << align;
 360                dst_off = (dst_off >> align) << align;
 361
 362                dmatest_init_srcs(thread->srcs, src_off, len);
 363                dmatest_init_dsts(thread->dsts, dst_off, len);
 364
 365                for (i = 0; i < src_cnt; i++) {
 366                        u8 *buf = thread->srcs[i] + src_off;
 367
 368                        dma_srcs[i] = dma_map_single(dev->dev, buf, len,
 369                                                     DMA_TO_DEVICE);
 370                        ret = dma_mapping_error(dev->dev, dma_srcs[i]);
 371                        if (ret) {
 372                                unmap_src(dev->dev, dma_srcs, len, i);
 373                                pr_warn("%s: #%u: mapping error %d with "
 374                                        "src_off=0x%x len=0x%x\n",
 375                                        thread_name, total_tests - 1, ret,
 376                                        src_off, len);
 377                                failed_tests++;
 378                                continue;
 379                        }
 380                }
 381                /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
 382                for (i = 0; i < dst_cnt; i++) {
 383                        dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
 384                                                     test_buf_size,
 385                                                     DMA_BIDIRECTIONAL);
 386                        ret = dma_mapping_error(dev->dev, dma_dsts[i]);
 387                        if (ret) {
 388                                unmap_src(dev->dev, dma_srcs, len, src_cnt);
 389                                unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
 390                                pr_warn("%s: #%u: mapping error %d with "
 391                                        "dst_off=0x%x len=0x%x\n",
 392                                        thread_name, total_tests - 1, ret,
 393                                        dst_off, test_buf_size);
 394                                failed_tests++;
 395                                continue;
 396                        }
 397                }
 398
 399                if (thread->type == DMA_MEMCPY)
 400                        tx = dev->device_prep_dma_memcpy(chan,
 401                                                         dma_dsts[0] + dst_off,
 402                                                         dma_srcs[0], len,
 403                                                         flags);
 404                else if (thread->type == DMA_XOR)
 405                        tx = dev->device_prep_dma_xor(chan,
 406                                                      dma_dsts[0] + dst_off,
 407                                                      dma_srcs, src_cnt,
 408                                                      len, flags);
 409                else if (thread->type == DMA_PQ) {
 410                        dma_addr_t dma_pq[dst_cnt];
 411
 412                        for (i = 0; i < dst_cnt; i++)
 413                                dma_pq[i] = dma_dsts[i] + dst_off;
 414                        tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
 415                                                     src_cnt, pq_coefs,
 416                                                     len, flags);
 417                }
 418
 419                if (!tx) {
 420                        unmap_src(dev->dev, dma_srcs, len, src_cnt);
 421                        unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
 422                        pr_warning("%s: #%u: prep error with src_off=0x%x "
 423                                        "dst_off=0x%x len=0x%x\n",
 424                                        thread_name, total_tests - 1,
 425                                        src_off, dst_off, len);
 426                        msleep(100);
 427                        failed_tests++;
 428                        continue;
 429                }
 430
 431                done.done = false;
 432                tx->callback = dmatest_callback;
 433                tx->callback_param = &done;
 434                cookie = tx->tx_submit(tx);
 435
 436                if (dma_submit_error(cookie)) {
 437                        pr_warning("%s: #%u: submit error %d with src_off=0x%x "
 438                                        "dst_off=0x%x len=0x%x\n",
 439                                        thread_name, total_tests - 1, cookie,
 440                                        src_off, dst_off, len);
 441                        msleep(100);
 442                        failed_tests++;
 443                        continue;
 444                }
 445                dma_async_issue_pending(chan);
 446
 447                wait_event_freezable_timeout(done_wait, done.done,
 448                                             msecs_to_jiffies(timeout));
 449
 450                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 451
 452                if (!done.done) {
 453                        /*
 454                         * We're leaving the timed out dma operation with
 455                         * dangling pointer to done_wait.  To make this
 456                         * correct, we'll need to allocate wait_done for
 457                         * each test iteration and perform "who's gonna
 458                         * free it this time?" dancing.  For now, just
 459                         * leave it dangling.
 460                         */
 461                        pr_warning("%s: #%u: test timed out\n",
 462                                   thread_name, total_tests - 1);
 463                        failed_tests++;
 464                        continue;
 465                } else if (status != DMA_SUCCESS) {
 466                        pr_warning("%s: #%u: got completion callback,"
 467                                   " but status is \'%s\'\n",
 468                                   thread_name, total_tests - 1,
 469                                   status == DMA_ERROR ? "error" : "in progress");
 470                        failed_tests++;
 471                        continue;
 472                }
 473
 474                /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
 475                unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
 476
 477                error_count = 0;
 478
 479                pr_debug("%s: verifying source buffer...\n", thread_name);
 480                error_count += dmatest_verify(thread->srcs, 0, src_off,
 481                                0, PATTERN_SRC, true);
 482                error_count += dmatest_verify(thread->srcs, src_off,
 483                                src_off + len, src_off,
 484                                PATTERN_SRC | PATTERN_COPY, true);
 485                error_count += dmatest_verify(thread->srcs, src_off + len,
 486                                test_buf_size, src_off + len,
 487                                PATTERN_SRC, true);
 488
 489                pr_debug("%s: verifying dest buffer...\n",
 490                                thread->task->comm);
 491                error_count += dmatest_verify(thread->dsts, 0, dst_off,
 492                                0, PATTERN_DST, false);
 493                error_count += dmatest_verify(thread->dsts, dst_off,
 494                                dst_off + len, src_off,
 495                                PATTERN_SRC | PATTERN_COPY, false);
 496                error_count += dmatest_verify(thread->dsts, dst_off + len,
 497                                test_buf_size, dst_off + len,
 498                                PATTERN_DST, false);
 499
 500                if (error_count) {
 501                        pr_warning("%s: #%u: %u errors with "
 502                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 503                                thread_name, total_tests - 1, error_count,
 504                                src_off, dst_off, len);
 505                        failed_tests++;
 506                } else {
 507                        pr_debug("%s: #%u: No errors with "
 508                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 509                                thread_name, total_tests - 1,
 510                                src_off, dst_off, len);
 511                }
 512        }
 513
 514        ret = 0;
 515        for (i = 0; thread->dsts[i]; i++)
 516                kfree(thread->dsts[i]);
 517err_dstbuf:
 518        kfree(thread->dsts);
 519err_dsts:
 520        for (i = 0; thread->srcs[i]; i++)
 521                kfree(thread->srcs[i]);
 522err_srcbuf:
 523        kfree(thread->srcs);
 524err_srcs:
 525        pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
 526                        thread_name, total_tests, failed_tests, ret);
 527
 528        /* terminate all transfers on specified channels */
 529        chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
 530        if (iterations > 0)
 531                while (!kthread_should_stop()) {
 532                        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
 533                        interruptible_sleep_on(&wait_dmatest_exit);
 534                }
 535
 536        return ret;
 537}
 538
 539static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
 540{
 541        struct dmatest_thread   *thread;
 542        struct dmatest_thread   *_thread;
 543        int                     ret;
 544
 545        list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
 546                ret = kthread_stop(thread->task);
 547                pr_debug("dmatest: thread %s exited with status %d\n",
 548                                thread->task->comm, ret);
 549                list_del(&thread->node);
 550                kfree(thread);
 551        }
 552
 553        /* terminate all transfers on specified channels */
 554        dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
 555
 556        kfree(dtc);
 557}
 558
 559static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
 560{
 561        struct dmatest_thread *thread;
 562        struct dma_chan *chan = dtc->chan;
 563        char *op;
 564        unsigned int i;
 565
 566        if (type == DMA_MEMCPY)
 567                op = "copy";
 568        else if (type == DMA_XOR)
 569                op = "xor";
 570        else if (type == DMA_PQ)
 571                op = "pq";
 572        else
 573                return -EINVAL;
 574
 575        for (i = 0; i < threads_per_chan; i++) {
 576                thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 577                if (!thread) {
 578                        pr_warning("dmatest: No memory for %s-%s%u\n",
 579                                   dma_chan_name(chan), op, i);
 580
 581                        break;
 582                }
 583                thread->chan = dtc->chan;
 584                thread->type = type;
 585                smp_wmb();
 586                thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
 587                                dma_chan_name(chan), op, i);
 588                if (IS_ERR(thread->task)) {
 589                        pr_warning("dmatest: Failed to run thread %s-%s%u\n",
 590                                        dma_chan_name(chan), op, i);
 591                        kfree(thread);
 592                        break;
 593                }
 594
 595                /* srcbuf and dstbuf are allocated by the thread itself */
 596
 597                list_add_tail(&thread->node, &dtc->threads);
 598        }
 599
 600        return i;
 601}
 602
 603static int dmatest_add_channel(struct dma_chan *chan)
 604{
 605        struct dmatest_chan     *dtc;
 606        struct dma_device       *dma_dev = chan->device;
 607        unsigned int            thread_count = 0;
 608        int cnt;
 609
 610        dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 611        if (!dtc) {
 612                pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
 613                return -ENOMEM;
 614        }
 615
 616        dtc->chan = chan;
 617        INIT_LIST_HEAD(&dtc->threads);
 618
 619        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 620                cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
 621                thread_count += cnt > 0 ? cnt : 0;
 622        }
 623        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 624                cnt = dmatest_add_threads(dtc, DMA_XOR);
 625                thread_count += cnt > 0 ? cnt : 0;
 626        }
 627        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 628                cnt = dmatest_add_threads(dtc, DMA_PQ);
 629                thread_count += cnt > 0 ? cnt : 0;
 630        }
 631
 632        pr_info("dmatest: Started %u threads using %s\n",
 633                thread_count, dma_chan_name(chan));
 634
 635        list_add_tail(&dtc->node, &dmatest_channels);
 636        nr_channels++;
 637
 638        return 0;
 639}
 640
 641static bool filter(struct dma_chan *chan, void *param)
 642{
 643        if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
 644                return false;
 645        else
 646                return true;
 647}
 648
 649static int __init dmatest_init(void)
 650{
 651        dma_cap_mask_t mask;
 652        struct dma_chan *chan;
 653        int err = 0;
 654
 655        dma_cap_zero(mask);
 656        dma_cap_set(DMA_MEMCPY, mask);
 657        for (;;) {
 658                chan = dma_request_channel(mask, filter, NULL);
 659                if (chan) {
 660                        err = dmatest_add_channel(chan);
 661                        if (err) {
 662                                dma_release_channel(chan);
 663                                break; /* add_channel failed, punt */
 664                        }
 665                } else
 666                        break; /* no more channels available */
 667                if (max_channels && nr_channels >= max_channels)
 668                        break; /* we have all we need */
 669        }
 670
 671        return err;
 672}
 673/* when compiled-in wait for drivers to load first */
 674late_initcall(dmatest_init);
 675
 676static void __exit dmatest_exit(void)
 677{
 678        struct dmatest_chan *dtc, *_dtc;
 679        struct dma_chan *chan;
 680
 681        list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
 682                list_del(&dtc->node);
 683                chan = dtc->chan;
 684                dmatest_cleanup_channel(dtc);
 685                pr_debug("dmatest: dropped channel %s\n",
 686                         dma_chan_name(chan));
 687                dma_release_channel(chan);
 688        }
 689}
 690module_exit(dmatest_exit);
 691
 692MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 693MODULE_LICENSE("GPL v2");
 694
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.