1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/interrupt.h>
24#include <linux/mutex.h>
25
26#include <asm/io.h>
27#include <sound/core.h>
28#include "mixart.h"
29#include "mixart_hwdep.h"
30#include "mixart_core.h"
31
32
33#define MSG_TIMEOUT_JIFFIES (400 * HZ) / 1000
34
35#define MSG_DESCRIPTOR_SIZE 0x24
36#define MSG_HEADER_SIZE (MSG_DESCRIPTOR_SIZE + 4)
37
38#define MSG_DEFAULT_SIZE 512
39
40#define MSG_TYPE_MASK 0x00000003
41#define MSG_TYPE_NOTIFY 0
42#define MSG_TYPE_COMMAND 1
43#define MSG_TYPE_REQUEST 2
44#define MSG_TYPE_ANSWER 3
45#define MSG_CANCEL_NOTIFY_MASK 0x80000000
46
47
48static int retrieve_msg_frame(struct mixart_mgr *mgr, u32 *msg_frame)
49{
50
51 u32 headptr, tailptr;
52
53 tailptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL));
54 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_HEAD));
55
56 if (tailptr == headptr)
57 return 0;
58
59 if (tailptr < MSG_OUTBOUND_POST_STACK)
60 return 0;
61 if (tailptr >= MSG_OUTBOUND_POST_STACK + MSG_BOUND_STACK_SIZE)
62 return 0;
63
64 *msg_frame = readl_be(MIXART_MEM(mgr, tailptr));
65
66
67 tailptr += 4;
68 if( tailptr >= (MSG_OUTBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) )
69 tailptr = MSG_OUTBOUND_POST_STACK;
70 writel_be(tailptr, MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL));
71
72 return 1;
73}
74
75static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
76 u32 msg_frame_address )
77{
78 unsigned long flags;
79 u32 headptr;
80 u32 size;
81 int err;
82#ifndef __BIG_ENDIAN
83 unsigned int i;
84#endif
85
86 spin_lock_irqsave(&mgr->msg_lock, flags);
87 err = 0;
88
89
90 size = readl_be(MIXART_MEM(mgr, msg_frame_address));
91 resp->message_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 4));
92 resp->uid.object_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 8));
93 resp->uid.desc = readl_be(MIXART_MEM(mgr, msg_frame_address + 12));
94
95 if( (size < MSG_DESCRIPTOR_SIZE) || (resp->size < (size - MSG_DESCRIPTOR_SIZE))) {
96 err = -EINVAL;
97 snd_printk(KERN_ERR "problem with response size = %d\n", size);
98 goto _clean_exit;
99 }
100 size -= MSG_DESCRIPTOR_SIZE;
101
102 memcpy_fromio(resp->data, MIXART_MEM(mgr, msg_frame_address + MSG_HEADER_SIZE ), size);
103 resp->size = size;
104
105
106#ifndef __BIG_ENDIAN
107 size /= 4;
108 for(i=0; i < size; i++) {
109 ((u32*)resp->data)[i] = be32_to_cpu(((u32*)resp->data)[i]);
110 }
111#endif
112
113
114
115
116 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
117
118 if( (headptr < MSG_OUTBOUND_FREE_STACK) || ( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) {
119 err = -EINVAL;
120 goto _clean_exit;
121 }
122
123
124 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr));
125
126
127 headptr += 4;
128 if( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) )
129 headptr = MSG_OUTBOUND_FREE_STACK;
130
131 writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
132
133 _clean_exit:
134 spin_unlock_irqrestore(&mgr->msg_lock, flags);
135
136 return err;
137}
138
139
140
141
142
143
144static int send_msg( struct mixart_mgr *mgr,
145 struct mixart_msg *msg,
146 int max_answersize,
147 int mark_pending,
148 u32 *msg_event)
149{
150 u32 headptr, tailptr;
151 u32 msg_frame_address;
152 int err, i;
153
154 if (snd_BUG_ON(msg->size % 4))
155 return -EINVAL;
156
157 err = 0;
158
159
160 tailptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL));
161 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_HEAD));
162
163 if (tailptr == headptr) {
164 snd_printk(KERN_ERR "error: no message frame available\n");
165 return -EBUSY;
166 }
167
168 if( (tailptr < MSG_INBOUND_FREE_STACK) || (tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) {
169 return -EINVAL;
170 }
171
172 msg_frame_address = readl_be(MIXART_MEM(mgr, tailptr));
173 writel(0, MIXART_MEM(mgr, tailptr));
174
175
176 tailptr += 4;
177 if( tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) )
178 tailptr = MSG_INBOUND_FREE_STACK;
179
180 writel_be(tailptr, MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL));
181
182
183
184
185 writel_be( msg->size + MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address) );
186 writel_be( msg->message_id , MIXART_MEM(mgr, msg_frame_address + 4) );
187 writel_be( msg->uid.object_id, MIXART_MEM(mgr, msg_frame_address + 8) );
188 writel_be( msg->uid.desc, MIXART_MEM(mgr, msg_frame_address + 12) );
189 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 16) );
190 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 20) );
191 writel_be( msg->size, MIXART_MEM(mgr, msg_frame_address + 24) );
192 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 28) );
193 writel_be( 0, MIXART_MEM(mgr, msg_frame_address + 32) );
194 writel_be( MSG_DESCRIPTOR_SIZE + max_answersize, MIXART_MEM(mgr, msg_frame_address + 36) );
195
196
197 for( i=0; i < msg->size; i+=4 ) {
198 writel_be( *(u32*)(msg->data + i), MIXART_MEM(mgr, MSG_HEADER_SIZE + msg_frame_address + i) );
199 }
200
201 if( mark_pending ) {
202 if( *msg_event ) {
203
204 mgr->pending_event = *msg_event;
205 }
206 else {
207
208 mgr->pending_event = msg_frame_address;
209
210
211 *msg_event = msg_frame_address;
212 }
213 }
214
215
216 msg_frame_address |= MSG_TYPE_REQUEST;
217
218
219 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD));
220
221 if( (headptr < MSG_INBOUND_POST_STACK) || (headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE))) {
222 return -EINVAL;
223 }
224
225 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr));
226
227
228 headptr += 4;
229 if( headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) )
230 headptr = MSG_INBOUND_POST_STACK;
231
232 writel_be(headptr, MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD));
233
234 return 0;
235}
236
237
238int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data)
239{
240 struct mixart_msg resp;
241 u32 msg_frame = 0;
242 int err;
243 wait_queue_t wait;
244 long timeout;
245
246 mutex_lock(&mgr->msg_mutex);
247
248 init_waitqueue_entry(&wait, current);
249
250 spin_lock_irq(&mgr->msg_lock);
251
252 err = send_msg(mgr, request, max_resp_size, 1, &msg_frame);
253 if (err) {
254 spin_unlock_irq(&mgr->msg_lock);
255 mutex_unlock(&mgr->msg_mutex);
256 return err;
257 }
258
259 set_current_state(TASK_UNINTERRUPTIBLE);
260 add_wait_queue(&mgr->msg_sleep, &wait);
261 spin_unlock_irq(&mgr->msg_lock);
262 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
263 remove_wait_queue(&mgr->msg_sleep, &wait);
264
265 if (! timeout) {
266
267 mutex_unlock(&mgr->msg_mutex);
268 snd_printk(KERN_ERR "error: no response on msg %x\n", msg_frame);
269 return -EIO;
270 }
271
272
273 resp.message_id = 0;
274 resp.uid = (struct mixart_uid){0,0};
275 resp.data = resp_data;
276 resp.size = max_resp_size;
277
278 err = get_msg(mgr, &resp, msg_frame);
279
280 if( request->message_id != resp.message_id )
281 snd_printk(KERN_ERR "RESPONSE ERROR!\n");
282
283 mutex_unlock(&mgr->msg_mutex);
284 return err;
285}
286
287
288int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
289 struct mixart_msg *request, u32 notif_event)
290{
291 int err;
292 wait_queue_t wait;
293 long timeout;
294
295 if (snd_BUG_ON(!notif_event))
296 return -EINVAL;
297 if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY))
298 return -EINVAL;
299 if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK))
300 return -EINVAL;
301
302 mutex_lock(&mgr->msg_mutex);
303
304 init_waitqueue_entry(&wait, current);
305
306 spin_lock_irq(&mgr->msg_lock);
307
308 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, ¬if_event);
309 if(err) {
310 spin_unlock_irq(&mgr->msg_lock);
311 mutex_unlock(&mgr->msg_mutex);
312 return err;
313 }
314
315 set_current_state(TASK_UNINTERRUPTIBLE);
316 add_wait_queue(&mgr->msg_sleep, &wait);
317 spin_unlock_irq(&mgr->msg_lock);
318 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
319 remove_wait_queue(&mgr->msg_sleep, &wait);
320
321 if (! timeout) {
322
323 mutex_unlock(&mgr->msg_mutex);
324 snd_printk(KERN_ERR "error: notification %x not received\n", notif_event);
325 return -EIO;
326 }
327
328 mutex_unlock(&mgr->msg_mutex);
329 return 0;
330}
331
332
333int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request)
334{
335 u32 message_frame;
336 unsigned long flags;
337 int err;
338
339
340 spin_lock_irqsave(&mgr->msg_lock, flags);
341 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 0, &message_frame);
342 spin_unlock_irqrestore(&mgr->msg_lock, flags);
343
344
345 atomic_inc(&mgr->msg_processed);
346
347 return err;
348}
349
350
351
352static u32 mixart_msg_data[MSG_DEFAULT_SIZE / 4];
353
354
355void snd_mixart_msg_tasklet(unsigned long arg)
356{
357 struct mixart_mgr *mgr = ( struct mixart_mgr*)(arg);
358 struct mixart_msg resp;
359 u32 msg, addr, type;
360 int err;
361
362 spin_lock(&mgr->lock);
363
364 while (mgr->msg_fifo_readptr != mgr->msg_fifo_writeptr) {
365 msg = mgr->msg_fifo[mgr->msg_fifo_readptr];
366 mgr->msg_fifo_readptr++;
367 mgr->msg_fifo_readptr %= MSG_FIFO_SIZE;
368
369
370 addr = msg & ~MSG_TYPE_MASK;
371 type = msg & MSG_TYPE_MASK;
372
373 switch (type) {
374 case MSG_TYPE_ANSWER:
375
376 resp.message_id = 0;
377 resp.data = mixart_msg_data;
378 resp.size = sizeof(mixart_msg_data);
379 err = get_msg(mgr, &resp, addr);
380 if( err < 0 ) {
381 snd_printk(KERN_ERR "tasklet: error(%d) reading mf %x\n", err, msg);
382 break;
383 }
384
385 switch(resp.message_id) {
386 case MSG_STREAM_START_INPUT_STAGE_PACKET:
387 case MSG_STREAM_START_OUTPUT_STAGE_PACKET:
388 case MSG_STREAM_STOP_INPUT_STAGE_PACKET:
389 case MSG_STREAM_STOP_OUTPUT_STAGE_PACKET:
390 if(mixart_msg_data[0])
391 snd_printk(KERN_ERR "tasklet : error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n", mixart_msg_data[0]);
392 break;
393 default:
394 snd_printdd("tasklet received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n",
395 msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size);
396 break;
397 }
398 break;
399 case MSG_TYPE_NOTIFY:
400
401 case MSG_TYPE_COMMAND:
402
403 default:
404 snd_printk(KERN_ERR "tasklet doesn't know what to do with message %x\n", msg);
405 }
406
407
408 atomic_dec(&mgr->msg_processed);
409
410 }
411
412 spin_unlock(&mgr->lock);
413}
414
415
416irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
417{
418 struct mixart_mgr *mgr = dev_id;
419 int err;
420 struct mixart_msg resp;
421
422 u32 msg;
423 u32 it_reg;
424
425 spin_lock(&mgr->lock);
426
427 it_reg = readl_le(MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET));
428 if( !(it_reg & MIXART_OIDI) ) {
429
430 spin_unlock(&mgr->lock);
431 return IRQ_NONE;
432 }
433
434
435 writel_le(MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG(mgr, MIXART_PCI_OMIMR_OFFSET));
436
437
438 it_reg = readl(MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET));
439 writel(it_reg, MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET));
440
441
442 writel_le( MIXART_OIDI, MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET) );
443
444
445 while (retrieve_msg_frame(mgr, &msg)) {
446
447 switch (msg & MSG_TYPE_MASK) {
448 case MSG_TYPE_COMMAND:
449 resp.message_id = 0;
450 resp.data = mixart_msg_data;
451 resp.size = sizeof(mixart_msg_data);
452 err = get_msg(mgr, &resp, msg & ~MSG_TYPE_MASK);
453 if( err < 0 ) {
454 snd_printk(KERN_ERR "interrupt: error(%d) reading mf %x\n", err, msg);
455 break;
456 }
457
458 if(resp.message_id == MSG_SERVICES_TIMER_NOTIFY) {
459 int i;
460 struct mixart_timer_notify *notify;
461 notify = (struct mixart_timer_notify *)mixart_msg_data;
462
463 for(i=0; i<notify->stream_count; i++) {
464
465 u32 buffer_id = notify->streams[i].buffer_id;
466 unsigned int chip_number = (buffer_id & MIXART_NOTIFY_CARD_MASK) >> MIXART_NOTIFY_CARD_OFFSET;
467 unsigned int pcm_number = (buffer_id & MIXART_NOTIFY_PCM_MASK ) >> MIXART_NOTIFY_PCM_OFFSET;
468 unsigned int sub_number = buffer_id & MIXART_NOTIFY_SUBS_MASK;
469 unsigned int is_capture = ((buffer_id & MIXART_NOTIFY_CAPT_MASK) != 0);
470
471 struct snd_mixart *chip = mgr->chip[chip_number];
472 struct mixart_stream *stream;
473
474 if ((chip_number >= mgr->num_cards) || (pcm_number >= MIXART_PCM_TOTAL) || (sub_number >= MIXART_PLAYBACK_STREAMS)) {
475 snd_printk(KERN_ERR "error MSG_SERVICES_TIMER_NOTIFY buffer_id (%x) pos(%d)\n",
476 buffer_id, notify->streams[i].sample_pos_low_part);
477 break;
478 }
479
480 if (is_capture)
481 stream = &chip->capture_stream[pcm_number];
482 else
483 stream = &chip->playback_stream[pcm_number][sub_number];
484
485 if (stream->substream && (stream->status == MIXART_STREAM_STATUS_RUNNING)) {
486 struct snd_pcm_runtime *runtime = stream->substream->runtime;
487 int elapsed = 0;
488 u64 sample_count = ((u64)notify->streams[i].sample_pos_high_part) << 32;
489 sample_count |= notify->streams[i].sample_pos_low_part;
490
491 while (1) {
492 u64 new_elapse_pos = stream->abs_period_elapsed + runtime->period_size;
493
494 if (new_elapse_pos > sample_count) {
495 break;
496 }
497 else {
498 elapsed = 1;
499 stream->buf_periods++;
500 if (stream->buf_periods >= runtime->periods)
501 stream->buf_periods = 0;
502
503 stream->abs_period_elapsed = new_elapse_pos;
504 }
505 }
506 stream->buf_period_frag = (u32)( sample_count - stream->abs_period_elapsed );
507
508 if(elapsed) {
509 spin_unlock(&mgr->lock);
510 snd_pcm_period_elapsed(stream->substream);
511 spin_lock(&mgr->lock);
512 }
513 }
514 }
515 break;
516 }
517 if(resp.message_id == MSG_SERVICES_REPORT_TRACES) {
518 if(resp.size > 1) {
519#ifndef __BIG_ENDIAN
520
521 int i;
522 for(i=0; i<(resp.size/4); i++) {
523 (mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]);
524 }
525#endif
526 ((char*)mixart_msg_data)[resp.size - 1] = 0;
527 snd_printdd("MIXART TRACE : %s\n", (char*)mixart_msg_data);
528 }
529 break;
530 }
531
532 snd_printdd("command %x not handled\n", resp.message_id);
533 break;
534
535 case MSG_TYPE_NOTIFY:
536 if(msg & MSG_CANCEL_NOTIFY_MASK) {
537 msg &= ~MSG_CANCEL_NOTIFY_MASK;
538 snd_printk(KERN_ERR "canceled notification %x !\n", msg);
539 }
540
541 case MSG_TYPE_ANSWER:
542
543 spin_lock(&mgr->msg_lock);
544 if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) {
545 wake_up(&mgr->msg_sleep);
546 mgr->pending_event = 0;
547 }
548
549 else {
550 mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg;
551 mgr->msg_fifo_writeptr++;
552 mgr->msg_fifo_writeptr %= MSG_FIFO_SIZE;
553 tasklet_schedule(&mgr->msg_taskq);
554 }
555 spin_unlock(&mgr->msg_lock);
556 break;
557 case MSG_TYPE_REQUEST:
558 default:
559 snd_printdd("interrupt received request %x\n", msg);
560
561 break;
562 }
563 }
564
565
566 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
567
568 spin_unlock(&mgr->lock);
569
570 return IRQ_HANDLED;
571}
572
573
574void snd_mixart_init_mailbox(struct mixart_mgr *mgr)
575{
576 writel( 0, MIXART_MEM( mgr, MSG_HOST_RSC_PROTECTION ) );
577 writel( 0, MIXART_MEM( mgr, MSG_AGENT_RSC_PROTECTION ) );
578
579
580 if(mgr->irq >= 0) {
581 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
582 }
583 return;
584}
585
586void snd_mixart_exit_mailbox(struct mixart_mgr *mgr)
587{
588
589 writel_le( MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
590 return;
591}
592
593void snd_mixart_reset_board(struct mixart_mgr *mgr)
594{
595
596 writel_be( 1, MIXART_REG(mgr, MIXART_BA1_BRUTAL_RESET_OFFSET) );
597 return;
598}
599