1
2
3
4
5
6
7
8#include <linux/delay.h>
9#include <linux/of_dma.h>
10#include "rsnd.h"
11
12
13
14
15#define PDMASAR 0x00
16#define PDMADAR 0x04
17#define PDMACHCR 0x0c
18
19
20#define PDMACHCR_DE (1 << 0)
21
22
23struct rsnd_dmaen {
24 struct dma_chan *chan;
25 dma_cookie_t cookie;
26 unsigned int dma_len;
27};
28
29struct rsnd_dmapp {
30 int dmapp_id;
31 u32 chcr;
32};
33
34struct rsnd_dma {
35 struct rsnd_mod mod;
36 struct rsnd_mod *mod_from;
37 struct rsnd_mod *mod_to;
38 dma_addr_t src_addr;
39 dma_addr_t dst_addr;
40 union {
41 struct rsnd_dmaen en;
42 struct rsnd_dmapp pp;
43 } dma;
44};
45
46struct rsnd_dma_ctrl {
47 void __iomem *base;
48 int dmaen_num;
49 int dmapp_num;
50};
51
52#define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
53#define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
54#define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
55#define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
56
57
58static struct rsnd_mod_ops mem_ops = {
59 .name = "mem",
60};
61
62static struct rsnd_mod mem = {
63};
64
65
66
67
68static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
69 struct rsnd_dai_stream *io)
70{
71 if (rsnd_io_is_working(io))
72 rsnd_dai_period_elapsed(io);
73}
74
75static void rsnd_dmaen_complete(void *data)
76{
77 struct rsnd_mod *mod = data;
78
79 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
80}
81
82static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
83 struct rsnd_mod *mod_from,
84 struct rsnd_mod *mod_to)
85{
86 if ((!mod_from && !mod_to) ||
87 (mod_from && mod_to))
88 return NULL;
89
90 if (mod_from)
91 return rsnd_mod_dma_req(io, mod_from);
92 else
93 return rsnd_mod_dma_req(io, mod_to);
94}
95
96static int rsnd_dmaen_stop(struct rsnd_mod *mod,
97 struct rsnd_dai_stream *io,
98 struct rsnd_priv *priv)
99{
100 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
101 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
102
103 if (dmaen->chan)
104 dmaengine_terminate_all(dmaen->chan);
105
106 return 0;
107}
108
109static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
110 struct rsnd_dai_stream *io,
111 struct rsnd_priv *priv)
112{
113 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
114 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
115
116
117
118
119
120
121 if (dmaen->chan)
122 dma_release_channel(dmaen->chan);
123
124 dmaen->chan = NULL;
125
126 return 0;
127}
128
129static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
130 struct rsnd_dai_stream *io,
131 struct rsnd_priv *priv)
132{
133 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
134 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
135 struct device *dev = rsnd_priv_to_dev(priv);
136
137
138 if (dmaen->chan)
139 return 0;
140
141
142
143
144
145
146 dmaen->chan = rsnd_dmaen_request_channel(io,
147 dma->mod_from,
148 dma->mod_to);
149 if (IS_ERR_OR_NULL(dmaen->chan)) {
150 dmaen->chan = NULL;
151 dev_err(dev, "can't get dma channel\n");
152 return -EIO;
153 }
154
155 return 0;
156}
157
158static int rsnd_dmaen_start(struct rsnd_mod *mod,
159 struct rsnd_dai_stream *io,
160 struct rsnd_priv *priv)
161{
162 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
163 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
164 struct snd_pcm_substream *substream = io->substream;
165 struct device *dev = rsnd_priv_to_dev(priv);
166 struct dma_async_tx_descriptor *desc;
167 struct dma_slave_config cfg = {};
168 enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
169 int is_play = rsnd_io_is_play(io);
170 int ret;
171
172
173
174
175
176
177 if (rsnd_runtime_channel_original(io) == 1) {
178 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
179 int bits = snd_pcm_format_physical_width(runtime->format);
180
181 switch (bits) {
182 case 8:
183 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
184 break;
185 case 16:
186 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
187 break;
188 case 32:
189 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
190 break;
191 default:
192 dev_err(dev, "invalid format width %d\n", bits);
193 return -EINVAL;
194 }
195 }
196
197 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
198 cfg.src_addr = dma->src_addr;
199 cfg.dst_addr = dma->dst_addr;
200 cfg.src_addr_width = buswidth;
201 cfg.dst_addr_width = buswidth;
202
203 dev_dbg(dev, "%s %pad -> %pad\n",
204 rsnd_mod_name(mod),
205 &cfg.src_addr, &cfg.dst_addr);
206
207 ret = dmaengine_slave_config(dmaen->chan, &cfg);
208 if (ret < 0)
209 return ret;
210
211 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
212 substream->runtime->dma_addr,
213 snd_pcm_lib_buffer_bytes(substream),
214 snd_pcm_lib_period_bytes(substream),
215 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
216 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
217
218 if (!desc) {
219 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
220 return -EIO;
221 }
222
223 desc->callback = rsnd_dmaen_complete;
224 desc->callback_param = rsnd_mod_get(dma);
225
226 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
227
228 dmaen->cookie = dmaengine_submit(desc);
229 if (dmaen->cookie < 0) {
230 dev_err(dev, "dmaengine_submit() fail\n");
231 return -EIO;
232 }
233
234 dma_async_issue_pending(dmaen->chan);
235
236 return 0;
237}
238
239struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
240 struct rsnd_mod *mod, char *name)
241{
242 struct dma_chan *chan = NULL;
243 struct device_node *np;
244 int i = 0;
245
246 for_each_child_of_node(of_node, np) {
247 if (i == rsnd_mod_id_raw(mod) && (!chan))
248 chan = of_dma_request_slave_channel(np, name);
249 i++;
250 }
251
252
253 of_node_put(of_node);
254
255 return chan;
256}
257
258static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
259 struct rsnd_dma *dma,
260 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
261{
262 struct rsnd_priv *priv = rsnd_io_to_priv(io);
263 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
264 struct dma_chan *chan;
265
266
267 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
268 if (IS_ERR_OR_NULL(chan)) {
269
270 if (PTR_ERR(chan) == -EPROBE_DEFER)
271 return PTR_ERR(chan);
272
273
274
275
276
277
278
279 return -EAGAIN;
280 }
281
282
283
284
285
286
287 io->dmac_dev = chan->device->dev;
288
289 dma_release_channel(chan);
290
291 dmac->dmaen_num++;
292
293 return 0;
294}
295
296static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
297 struct rsnd_dai_stream *io,
298 snd_pcm_uframes_t *pointer)
299{
300 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
301 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
302 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
303 struct dma_tx_state state;
304 enum dma_status status;
305 unsigned int pos = 0;
306
307 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
308 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
309 if (state.residue > 0 && state.residue <= dmaen->dma_len)
310 pos = dmaen->dma_len - state.residue;
311 }
312 *pointer = bytes_to_frames(runtime, pos);
313
314 return 0;
315}
316
317static struct rsnd_mod_ops rsnd_dmaen_ops = {
318 .name = "audmac",
319 .prepare = rsnd_dmaen_prepare,
320 .cleanup = rsnd_dmaen_cleanup,
321 .start = rsnd_dmaen_start,
322 .stop = rsnd_dmaen_stop,
323 .pointer = rsnd_dmaen_pointer,
324 .get_status = rsnd_mod_get_status,
325};
326
327
328
329
330static const u8 gen2_id_table_ssiu[] = {
331
332 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
333
334 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
335
336 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
337
338 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
339
340 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
341
342 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343
344 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345
346 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
347
348 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
349
350 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
351};
352static const u8 gen2_id_table_scu[] = {
353 0x2d,
354 0x2e,
355 0x2f,
356 0x30,
357 0x31,
358 0x32,
359 0x33,
360 0x34,
361 0x35,
362 0x36,
363};
364static const u8 gen2_id_table_cmd[] = {
365 0x37,
366 0x38,
367};
368
369static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
370 struct rsnd_mod *mod)
371{
372 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
373 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
374 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
375 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
376 const u8 *entry = NULL;
377 int id = 255;
378 int size = 0;
379
380 if ((mod == ssi) ||
381 (mod == ssiu)) {
382 int busif = rsnd_mod_id_sub(ssiu);
383
384 entry = gen2_id_table_ssiu;
385 size = ARRAY_SIZE(gen2_id_table_ssiu);
386 id = (rsnd_mod_id(mod) * 8) + busif;
387 } else if (mod == src) {
388 entry = gen2_id_table_scu;
389 size = ARRAY_SIZE(gen2_id_table_scu);
390 id = rsnd_mod_id(mod);
391 } else if (mod == dvc) {
392 entry = gen2_id_table_cmd;
393 size = ARRAY_SIZE(gen2_id_table_cmd);
394 id = rsnd_mod_id(mod);
395 }
396
397 if ((!entry) || (size <= id)) {
398 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
399
400 dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
401
402
403 return 0x00;
404 }
405
406 return entry[id];
407}
408
409static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
410 struct rsnd_mod *mod_from,
411 struct rsnd_mod *mod_to)
412{
413 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
414 (rsnd_dmapp_get_id(io, mod_to) << 16);
415}
416
417#define rsnd_dmapp_addr(dmac, dma, reg) \
418 (dmac->base + 0x20 + reg + \
419 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
420static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
421{
422 struct rsnd_mod *mod = rsnd_mod_get(dma);
423 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
424 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
425 struct device *dev = rsnd_priv_to_dev(priv);
426
427 dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
428
429 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
430}
431
432static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
433{
434 struct rsnd_mod *mod = rsnd_mod_get(dma);
435 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
436 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
437
438 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
439}
440
441static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
442{
443 struct rsnd_mod *mod = rsnd_mod_get(dma);
444 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
445 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
446 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
447 u32 val = ioread32(addr);
448
449 val &= ~mask;
450 val |= (data & mask);
451
452 iowrite32(val, addr);
453}
454
455static int rsnd_dmapp_stop(struct rsnd_mod *mod,
456 struct rsnd_dai_stream *io,
457 struct rsnd_priv *priv)
458{
459 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
460 int i;
461
462 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
463
464 for (i = 0; i < 1024; i++) {
465 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
466 return 0;
467 udelay(1);
468 }
469
470 return -EIO;
471}
472
473static int rsnd_dmapp_start(struct rsnd_mod *mod,
474 struct rsnd_dai_stream *io,
475 struct rsnd_priv *priv)
476{
477 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
478 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
479
480 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
481 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
482 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
483
484 return 0;
485}
486
487static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
488 struct rsnd_dma *dma,
489 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
490{
491 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
492 struct rsnd_priv *priv = rsnd_io_to_priv(io);
493 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
494 struct device *dev = rsnd_priv_to_dev(priv);
495
496 dmapp->dmapp_id = dmac->dmapp_num;
497 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
498
499 dmac->dmapp_num++;
500
501 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
502 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
503
504 return 0;
505}
506
507static struct rsnd_mod_ops rsnd_dmapp_ops = {
508 .name = "audmac-pp",
509 .start = rsnd_dmapp_start,
510 .stop = rsnd_dmapp_stop,
511 .quit = rsnd_dmapp_stop,
512 .get_status = rsnd_mod_get_status,
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
535#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
536
537#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
538#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
539
540#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
541#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
542
543#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
544#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
545
546#define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
547#define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
548
549#define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
550#define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
551
552static dma_addr_t
553rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
554 struct rsnd_mod *mod,
555 int is_play, int is_from)
556{
557 struct rsnd_priv *priv = rsnd_io_to_priv(io);
558 struct device *dev = rsnd_priv_to_dev(priv);
559 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
560 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
561 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
562 !!(rsnd_io_to_mod_ssiu(io) == mod);
563 int use_src = !!rsnd_io_to_mod_src(io);
564 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
565 !!rsnd_io_to_mod_mix(io) ||
566 !!rsnd_io_to_mod_ctu(io);
567 int id = rsnd_mod_id(mod);
568 int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
569 struct dma_addr {
570 dma_addr_t out_addr;
571 dma_addr_t in_addr;
572 } dma_addrs[3][2][3] = {
573
574
575 {{{ 0, 0 },
576 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
577 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
578
579 {{ 0, 0, },
580 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
581 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
582 },
583
584
585 {{{ RDMA_SSI_O_N(ssi, id), 0 },
586 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
587 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
588
589 {{ 0, RDMA_SSI_I_N(ssi, id) },
590 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
591 { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
592 },
593
594
595 {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
596 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
597 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
598
599 {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
600 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
601 { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
602 };
603
604
605
606
607
608
609
610 if ((id == 9) && (busif >= 4))
611 dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
612 id, busif);
613
614
615 if (use_cmd && !use_src)
616 dev_err(dev, "DVC is selected without SRC\n");
617
618
619 if (is_ssi && rsnd_ssi_use_busif(io))
620 is_ssi++;
621
622 return (is_from) ?
623 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
624 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
625}
626
627static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
628 struct rsnd_mod *mod,
629 int is_play, int is_from)
630{
631 struct rsnd_priv *priv = rsnd_io_to_priv(io);
632
633
634
635
636 if (rsnd_is_gen1(priv))
637 return 0;
638
639 if (!mod)
640 return 0;
641
642 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
643}
644
645#define MOD_MAX (RSND_MOD_MAX + 1)
646static void rsnd_dma_of_path(struct rsnd_mod *this,
647 struct rsnd_dai_stream *io,
648 int is_play,
649 struct rsnd_mod **mod_from,
650 struct rsnd_mod **mod_to)
651{
652 struct rsnd_mod *ssi;
653 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
654 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
655 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
656 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
657 struct rsnd_mod *mod[MOD_MAX];
658 struct rsnd_mod *mod_start, *mod_end;
659 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
660 struct device *dev = rsnd_priv_to_dev(priv);
661 int nr, i, idx;
662
663
664
665
666
667
668
669
670
671
672
673 if (rsnd_ssiu_of_node(priv)) {
674 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
675
676
677 ssi = ssiu;
678 if (this == rsnd_io_to_mod_ssi(io))
679 this = ssiu;
680 } else {
681
682 ssi = rsnd_io_to_mod_ssi(io);
683 }
684
685 if (!ssi)
686 return;
687
688 nr = 0;
689 for (i = 0; i < MOD_MAX; i++) {
690 mod[i] = NULL;
691 nr += !!rsnd_io_to_mod(io, i);
692 }
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 mod_start = (is_play) ? NULL : ssi;
710 mod_end = (is_play) ? ssi : NULL;
711
712 idx = 0;
713 mod[idx++] = mod_start;
714 for (i = 1; i < nr; i++) {
715 if (src) {
716 mod[idx++] = src;
717 src = NULL;
718 } else if (ctu) {
719 mod[idx++] = ctu;
720 ctu = NULL;
721 } else if (mix) {
722 mod[idx++] = mix;
723 mix = NULL;
724 } else if (dvc) {
725 mod[idx++] = dvc;
726 dvc = NULL;
727 }
728 }
729 mod[idx] = mod_end;
730
731
732
733
734
735
736
737 if ((this == ssi) == (is_play)) {
738 *mod_from = mod[idx - 1];
739 *mod_to = mod[idx];
740 } else {
741 *mod_from = mod[0];
742 *mod_to = mod[1];
743 }
744
745 dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
746 for (i = 0; i <= idx; i++) {
747 dev_dbg(dev, " %s%s\n",
748 rsnd_mod_name(mod[i] ? mod[i] : &mem),
749 (mod[i] == *mod_from) ? " from" :
750 (mod[i] == *mod_to) ? " to" : "");
751 }
752}
753
754static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
755 struct rsnd_mod **dma_mod)
756{
757 struct rsnd_mod *mod_from = NULL;
758 struct rsnd_mod *mod_to = NULL;
759 struct rsnd_priv *priv = rsnd_io_to_priv(io);
760 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
761 struct device *dev = rsnd_priv_to_dev(priv);
762 struct rsnd_dma *dma;
763 struct rsnd_mod_ops *ops;
764 enum rsnd_mod_type type;
765 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
766 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
767 int is_play = rsnd_io_is_play(io);
768 int ret, dma_id;
769
770
771
772
773
774
775
776 if (!dmac)
777 return -EAGAIN;
778
779 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
780
781
782 if (mod_from && mod_to) {
783 ops = &rsnd_dmapp_ops;
784 attach = rsnd_dmapp_attach;
785 dma_id = dmac->dmapp_num;
786 type = RSND_MOD_AUDMAPP;
787 } else {
788 ops = &rsnd_dmaen_ops;
789 attach = rsnd_dmaen_attach;
790 dma_id = dmac->dmaen_num;
791 type = RSND_MOD_AUDMA;
792 }
793
794
795 if (rsnd_is_gen1(priv)) {
796 ops = &rsnd_dmaen_ops;
797 attach = rsnd_dmaen_attach;
798 dma_id = dmac->dmaen_num;
799 type = RSND_MOD_AUDMA;
800 }
801
802 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
803 if (!dma)
804 return -ENOMEM;
805
806 *dma_mod = rsnd_mod_get(dma);
807
808 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
809 type, dma_id);
810 if (ret < 0)
811 return ret;
812
813 dev_dbg(dev, "%s %s -> %s\n",
814 rsnd_mod_name(*dma_mod),
815 rsnd_mod_name(mod_from ? mod_from : &mem),
816 rsnd_mod_name(mod_to ? mod_to : &mem));
817
818 ret = attach(io, dma, mod_from, mod_to);
819 if (ret < 0)
820 return ret;
821
822 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
823 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
824 dma->mod_from = mod_from;
825 dma->mod_to = mod_to;
826
827 return 0;
828}
829
830int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
831 struct rsnd_mod **dma_mod)
832{
833 if (!(*dma_mod)) {
834 int ret = rsnd_dma_alloc(io, mod, dma_mod);
835
836 if (ret < 0)
837 return ret;
838 }
839
840 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
841}
842
843int rsnd_dma_probe(struct rsnd_priv *priv)
844{
845 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
846 struct device *dev = rsnd_priv_to_dev(priv);
847 struct rsnd_dma_ctrl *dmac;
848 struct resource *res;
849
850
851
852
853 if (rsnd_is_gen1(priv))
854 return 0;
855
856
857
858
859 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
860 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
861 if (!dmac || !res) {
862 dev_err(dev, "dma allocate failed\n");
863 return 0;
864 }
865
866 dmac->dmapp_num = 0;
867 dmac->base = devm_ioremap_resource(dev, res);
868 if (IS_ERR(dmac->base))
869 return PTR_ERR(dmac->base);
870
871 priv->dma = dmac;
872
873
874 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
875}
876