1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/interrupt.h>
28#include <linux/mm.h>
29#include <linux/dma-mapping.h>
30#include <linux/async_tx.h>
31
32
33
34
35
36
37
38
39
40
41
42
43struct dma_async_tx_descriptor *
44async_memset(struct page *dest, int val, unsigned int offset,
45 size_t len, enum async_tx_flags flags,
46 struct dma_async_tx_descriptor *depend_tx,
47 dma_async_tx_callback cb_fn, void *cb_param)
48{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
50 &dest, 1, NULL, 0, len);
51 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL;
53
54 if (device) {
55 dma_addr_t dma_dest;
56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
57
58 dma_dest = dma_map_page(device->dev, dest, offset, len,
59 DMA_FROM_DEVICE);
60
61 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
62 dma_prep_flags);
63 }
64
65 if (tx) {
66 pr_debug("%s: (async) len: %zu\n", __func__, len);
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
68 } else {
69 void *dest_buf;
70 pr_debug("%s: (sync) len: %zu\n", __func__, len);
71
72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73
74
75 async_tx_quiesce(&depend_tx);
76
77 memset(dest_buf, val, len);
78
79 async_tx_sync_epilog(cb_fn, cb_param);
80 }
81
82 return tx;
83}
84EXPORT_SYMBOL_GPL(async_memset);
85
86static int __init async_memset_init(void)
87{
88 return 0;
89}
90
91static void __exit async_memset_exit(void)
92{
93 do { } while (0);
94}
95
96module_init(async_memset_init);
97module_exit(async_memset_exit);
98
99MODULE_AUTHOR("Intel Corporation");
100MODULE_DESCRIPTION("asynchronous memset api");
101MODULE_LICENSE("GPL");
102