1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/sched.h>
14#include <linux/delay.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/bio.h>
18#include <linux/dma-mapping.h>
19#include <linux/crc7.h>
20#include <linux/crc-itu-t.h>
21#include <linux/scatterlist.h>
22
23#include <linux/mmc/host.h>
24#include <linux/mmc/mmc.h>
25#include <linux/mmc/slot-gpio.h>
26
27#include <linux/spi/spi.h>
28#include <linux/spi/mmc_spi.h>
29
30#include <asm/unaligned.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66#define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
67#define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
68#define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
69#define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
70
71
72
73
74#define SPI_TOKEN_SINGLE 0xfe
75#define SPI_TOKEN_MULTI_WRITE 0xfc
76#define SPI_TOKEN_STOP_TRAN 0xfd
77
78#define MMC_SPI_BLOCKSIZE 512
79
80#define MMC_SPI_R1B_TIMEOUT_MS 3000
81#define MMC_SPI_INIT_TIMEOUT_MS 3000
82
83
84
85
86
87
88
89
90#define MMC_SPI_BLOCKSATONCE 128
91
92
93
94
95
96
97
98
99struct scratch {
100 u8 status[29];
101 u8 data_token;
102 __be16 crc_val;
103};
104
105struct mmc_spi_host {
106 struct mmc_host *mmc;
107 struct spi_device *spi;
108
109 unsigned char power_mode;
110 u16 powerup_msecs;
111
112 struct mmc_spi_platform_data *pdata;
113
114
115 struct spi_transfer token, t, crc, early_status;
116 struct spi_message m;
117
118
119 struct spi_transfer status;
120 struct spi_message readback;
121
122
123 struct device *dma_dev;
124
125
126 struct scratch *data;
127 dma_addr_t data_dma;
128
129
130
131
132
133 void *ones;
134 dma_addr_t ones_dma;
135};
136
137
138
139
140
141
142
143
144static inline int mmc_cs_off(struct mmc_spi_host *host)
145{
146
147 return spi_setup(host->spi);
148}
149
150static int
151mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
152{
153 int status;
154
155 if (len > sizeof(*host->data)) {
156 WARN_ON(1);
157 return -EIO;
158 }
159
160 host->status.len = len;
161
162 if (host->dma_dev)
163 dma_sync_single_for_device(host->dma_dev,
164 host->data_dma, sizeof(*host->data),
165 DMA_FROM_DEVICE);
166
167 status = spi_sync_locked(host->spi, &host->readback);
168
169 if (host->dma_dev)
170 dma_sync_single_for_cpu(host->dma_dev,
171 host->data_dma, sizeof(*host->data),
172 DMA_FROM_DEVICE);
173
174 return status;
175}
176
177static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
178 unsigned n, u8 byte)
179{
180 u8 *cp = host->data->status;
181 unsigned long start = jiffies;
182
183 while (1) {
184 int status;
185 unsigned i;
186
187 status = mmc_spi_readbytes(host, n);
188 if (status < 0)
189 return status;
190
191 for (i = 0; i < n; i++) {
192 if (cp[i] != byte)
193 return cp[i];
194 }
195
196 if (time_is_before_jiffies(start + timeout))
197 break;
198
199
200
201
202
203 if (time_is_before_jiffies(start + 1))
204 schedule();
205 }
206 return -ETIMEDOUT;
207}
208
209static inline int
210mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
211{
212 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
213}
214
215static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
216{
217 return mmc_spi_skip(host, timeout, 1, 0xff);
218}
219
220
221
222
223
224
225
226
227
228
229
230static char *maptype(struct mmc_command *cmd)
231{
232 switch (mmc_spi_resp_type(cmd)) {
233 case MMC_RSP_SPI_R1: return "R1";
234 case MMC_RSP_SPI_R1B: return "R1B";
235 case MMC_RSP_SPI_R2: return "R2/R5";
236 case MMC_RSP_SPI_R3: return "R3/R4/R7";
237 default: return "?";
238 }
239}
240
241
242static int mmc_spi_response_get(struct mmc_spi_host *host,
243 struct mmc_command *cmd, int cs_on)
244{
245 unsigned long timeout_ms;
246 u8 *cp = host->data->status;
247 u8 *end = cp + host->t.len;
248 int value = 0;
249 int bitshift;
250 u8 leftover = 0;
251 unsigned short rotator;
252 int i;
253 char tag[32];
254
255 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
256 cmd->opcode, maptype(cmd));
257
258
259
260
261
262
263
264 cp += 8;
265 while (cp < end && *cp == 0xff)
266 cp++;
267
268
269 if (cp == end) {
270 cp = host->data->status;
271 end = cp+1;
272
273
274
275
276
277
278
279
280
281
282
283
284 for (i = 2; i < 16; i++) {
285 value = mmc_spi_readbytes(host, 1);
286 if (value < 0)
287 goto done;
288 if (*cp != 0xff)
289 goto checkstatus;
290 }
291 value = -ETIMEDOUT;
292 goto done;
293 }
294
295checkstatus:
296 bitshift = 0;
297 if (*cp & 0x80) {
298
299 rotator = *cp++ << 8;
300
301 if (cp == end) {
302 value = mmc_spi_readbytes(host, 1);
303 if (value < 0)
304 goto done;
305 cp = host->data->status;
306 end = cp+1;
307 }
308 rotator |= *cp++;
309 while (rotator & 0x8000) {
310 bitshift++;
311 rotator <<= 1;
312 }
313 cmd->resp[0] = rotator >> 8;
314 leftover = rotator;
315 } else {
316 cmd->resp[0] = *cp++;
317 }
318 cmd->error = 0;
319
320
321 if (cmd->resp[0] != 0) {
322 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
323 & cmd->resp[0])
324 value = -EFAULT;
325 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
326 value = -ENOSYS;
327 else if (R1_SPI_COM_CRC & cmd->resp[0])
328 value = -EILSEQ;
329 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
330 & cmd->resp[0])
331 value = -EIO;
332
333 }
334
335 switch (mmc_spi_resp_type(cmd)) {
336
337
338
339
340 case MMC_RSP_SPI_R1B:
341
342 while (cp < end && *cp == 0)
343 cp++;
344 if (cp == end) {
345 timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
346 MMC_SPI_R1B_TIMEOUT_MS;
347 mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
348 }
349 break;
350
351
352
353
354 case MMC_RSP_SPI_R2:
355
356 if (cp == end) {
357 value = mmc_spi_readbytes(host, 1);
358 if (value < 0)
359 goto done;
360 cp = host->data->status;
361 end = cp+1;
362 }
363 if (bitshift) {
364 rotator = leftover << 8;
365 rotator |= *cp << bitshift;
366 cmd->resp[0] |= (rotator & 0xFF00);
367 } else {
368 cmd->resp[0] |= *cp << 8;
369 }
370 break;
371
372
373 case MMC_RSP_SPI_R3:
374 rotator = leftover << 8;
375 cmd->resp[1] = 0;
376 for (i = 0; i < 4; i++) {
377 cmd->resp[1] <<= 8;
378
379 if (cp == end) {
380 value = mmc_spi_readbytes(host, 1);
381 if (value < 0)
382 goto done;
383 cp = host->data->status;
384 end = cp+1;
385 }
386 if (bitshift) {
387 rotator |= *cp++ << bitshift;
388 cmd->resp[1] |= (rotator >> 8);
389 rotator <<= 8;
390 } else {
391 cmd->resp[1] |= *cp++;
392 }
393 }
394 break;
395
396
397 case MMC_RSP_SPI_R1:
398 break;
399
400 default:
401 dev_dbg(&host->spi->dev, "bad response type %04x\n",
402 mmc_spi_resp_type(cmd));
403 if (value >= 0)
404 value = -EINVAL;
405 goto done;
406 }
407
408 if (value < 0)
409 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
410 tag, cmd->resp[0], cmd->resp[1]);
411
412
413 if (value >= 0 && cs_on)
414 return value;
415done:
416 if (value < 0)
417 cmd->error = value;
418 mmc_cs_off(host);
419 return value;
420}
421
422
423
424
425
426
427
428static int
429mmc_spi_command_send(struct mmc_spi_host *host,
430 struct mmc_request *mrq,
431 struct mmc_command *cmd, int cs_on)
432{
433 struct scratch *data = host->data;
434 u8 *cp = data->status;
435 int status;
436 struct spi_transfer *t;
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451 memset(cp, 0xff, sizeof(data->status));
452
453 cp[1] = 0x40 | cmd->opcode;
454 put_unaligned_be32(cmd->arg, cp + 2);
455 cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
456 cp += 7;
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
494 cp += 2;
495
496 } else {
497 cp += 10;
498 if (cmd->flags & MMC_RSP_SPI_S2)
499 cp++;
500 else if (cmd->flags & MMC_RSP_SPI_B4)
501 cp += 4;
502 else if (cmd->flags & MMC_RSP_BUSY)
503 cp = data->status + sizeof(data->status);
504
505 }
506
507 dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
508 cmd->opcode, maptype(cmd));
509
510
511 spi_message_init(&host->m);
512
513 t = &host->t;
514 memset(t, 0, sizeof(*t));
515 t->tx_buf = t->rx_buf = data->status;
516 t->tx_dma = t->rx_dma = host->data_dma;
517 t->len = cp - data->status;
518 t->cs_change = 1;
519 spi_message_add_tail(t, &host->m);
520
521 if (host->dma_dev) {
522 host->m.is_dma_mapped = 1;
523 dma_sync_single_for_device(host->dma_dev,
524 host->data_dma, sizeof(*host->data),
525 DMA_BIDIRECTIONAL);
526 }
527 status = spi_sync_locked(host->spi, &host->m);
528
529 if (host->dma_dev)
530 dma_sync_single_for_cpu(host->dma_dev,
531 host->data_dma, sizeof(*host->data),
532 DMA_BIDIRECTIONAL);
533 if (status < 0) {
534 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
535 cmd->error = status;
536 return status;
537 }
538
539
540 return mmc_spi_response_get(host, cmd, cs_on);
541}
542
543
544
545
546
547
548
549
550
551
552
553
554static void
555mmc_spi_setup_data_message(
556 struct mmc_spi_host *host,
557 int multiple,
558 enum dma_data_direction direction)
559{
560 struct spi_transfer *t;
561 struct scratch *scratch = host->data;
562 dma_addr_t dma = host->data_dma;
563
564 spi_message_init(&host->m);
565 if (dma)
566 host->m.is_dma_mapped = 1;
567
568
569
570
571 if (direction == DMA_TO_DEVICE) {
572 t = &host->token;
573 memset(t, 0, sizeof(*t));
574 t->len = 1;
575 if (multiple)
576 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
577 else
578 scratch->data_token = SPI_TOKEN_SINGLE;
579 t->tx_buf = &scratch->data_token;
580 if (dma)
581 t->tx_dma = dma + offsetof(struct scratch, data_token);
582 spi_message_add_tail(t, &host->m);
583 }
584
585
586
587
588 t = &host->t;
589 memset(t, 0, sizeof(*t));
590 t->tx_buf = host->ones;
591 t->tx_dma = host->ones_dma;
592
593 spi_message_add_tail(t, &host->m);
594
595 t = &host->crc;
596 memset(t, 0, sizeof(*t));
597 t->len = 2;
598 if (direction == DMA_TO_DEVICE) {
599
600 t->tx_buf = &scratch->crc_val;
601 if (dma)
602 t->tx_dma = dma + offsetof(struct scratch, crc_val);
603 } else {
604 t->tx_buf = host->ones;
605 t->tx_dma = host->ones_dma;
606 t->rx_buf = &scratch->crc_val;
607 if (dma)
608 t->rx_dma = dma + offsetof(struct scratch, crc_val);
609 }
610 spi_message_add_tail(t, &host->m);
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 if (multiple || direction == DMA_TO_DEVICE) {
627 t = &host->early_status;
628 memset(t, 0, sizeof(*t));
629 t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
630 t->tx_buf = host->ones;
631 t->tx_dma = host->ones_dma;
632 t->rx_buf = scratch->status;
633 if (dma)
634 t->rx_dma = dma + offsetof(struct scratch, status);
635 t->cs_change = 1;
636 spi_message_add_tail(t, &host->m);
637 }
638}
639
640
641
642
643
644
645
646
647
648
649
650
651
652static int
653mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
654 unsigned long timeout)
655{
656 struct spi_device *spi = host->spi;
657 int status, i;
658 struct scratch *scratch = host->data;
659 u32 pattern;
660
661 if (host->mmc->use_spi_crc)
662 scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
663 if (host->dma_dev)
664 dma_sync_single_for_device(host->dma_dev,
665 host->data_dma, sizeof(*scratch),
666 DMA_BIDIRECTIONAL);
667
668 status = spi_sync_locked(spi, &host->m);
669
670 if (status != 0) {
671 dev_dbg(&spi->dev, "write error (%d)\n", status);
672 return status;
673 }
674
675 if (host->dma_dev)
676 dma_sync_single_for_cpu(host->dma_dev,
677 host->data_dma, sizeof(*scratch),
678 DMA_BIDIRECTIONAL);
679
680
681
682
683
684
685
686
687
688
689
690
691
692 pattern = get_unaligned_be32(scratch->status);
693
694
695 pattern |= 0xE0000000;
696
697
698 while (pattern & 0x80000000)
699 pattern <<= 1;
700
701 pattern >>= 27;
702
703 switch (pattern) {
704 case SPI_RESPONSE_ACCEPTED:
705 status = 0;
706 break;
707 case SPI_RESPONSE_CRC_ERR:
708
709 status = -EILSEQ;
710 break;
711 case SPI_RESPONSE_WRITE_ERR:
712
713
714
715 status = -EIO;
716 break;
717 default:
718 status = -EPROTO;
719 break;
720 }
721 if (status != 0) {
722 dev_dbg(&spi->dev, "write error %02x (%d)\n",
723 scratch->status[0], status);
724 return status;
725 }
726
727 t->tx_buf += t->len;
728 if (host->dma_dev)
729 t->tx_dma += t->len;
730
731
732
733
734 for (i = 4; i < sizeof(scratch->status); i++) {
735
736 if (scratch->status[i] & 0x01)
737 return 0;
738 }
739 return mmc_spi_wait_unbusy(host, timeout);
740}
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758static int
759mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
760 unsigned long timeout)
761{
762 struct spi_device *spi = host->spi;
763 int status;
764 struct scratch *scratch = host->data;
765 unsigned int bitshift;
766 u8 leftover;
767
768
769
770
771 status = mmc_spi_readbytes(host, 1);
772 if (status < 0)
773 return status;
774 status = scratch->status[0];
775 if (status == 0xff || status == 0)
776 status = mmc_spi_readtoken(host, timeout);
777
778 if (status < 0) {
779 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
780 return status;
781 }
782
783
784
785
786 bitshift = 7;
787 while (status & 0x80) {
788 status <<= 1;
789 bitshift--;
790 }
791 leftover = status << 1;
792
793 if (host->dma_dev) {
794 dma_sync_single_for_device(host->dma_dev,
795 host->data_dma, sizeof(*scratch),
796 DMA_BIDIRECTIONAL);
797 dma_sync_single_for_device(host->dma_dev,
798 t->rx_dma, t->len,
799 DMA_FROM_DEVICE);
800 }
801
802 status = spi_sync_locked(spi, &host->m);
803 if (status < 0) {
804 dev_dbg(&spi->dev, "read error %d\n", status);
805 return status;
806 }
807
808 if (host->dma_dev) {
809 dma_sync_single_for_cpu(host->dma_dev,
810 host->data_dma, sizeof(*scratch),
811 DMA_BIDIRECTIONAL);
812 dma_sync_single_for_cpu(host->dma_dev,
813 t->rx_dma, t->len,
814 DMA_FROM_DEVICE);
815 }
816
817 if (bitshift) {
818
819
820
821 u8 *cp = t->rx_buf;
822 unsigned int len;
823 unsigned int bitright = 8 - bitshift;
824 u8 temp;
825 for (len = t->len; len; len--) {
826 temp = *cp;
827 *cp++ = leftover | (temp >> bitshift);
828 leftover = temp << bitright;
829 }
830 cp = (u8 *) &scratch->crc_val;
831 temp = *cp;
832 *cp++ = leftover | (temp >> bitshift);
833 leftover = temp << bitright;
834 temp = *cp;
835 *cp = leftover | (temp >> bitshift);
836 }
837
838 if (host->mmc->use_spi_crc) {
839 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
840
841 be16_to_cpus(&scratch->crc_val);
842 if (scratch->crc_val != crc) {
843 dev_dbg(&spi->dev,
844 "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
845 scratch->crc_val, crc, t->len);
846 return -EILSEQ;
847 }
848 }
849
850 t->rx_buf += t->len;
851 if (host->dma_dev)
852 t->rx_dma += t->len;
853
854 return 0;
855}
856
857
858
859
860
861
862static void
863mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
864 struct mmc_data *data, u32 blk_size)
865{
866 struct spi_device *spi = host->spi;
867 struct device *dma_dev = host->dma_dev;
868 struct spi_transfer *t;
869 enum dma_data_direction direction;
870 struct scatterlist *sg;
871 unsigned n_sg;
872 int multiple = (data->blocks > 1);
873 u32 clock_rate;
874 unsigned long timeout;
875
876 direction = mmc_get_dma_dir(data);
877 mmc_spi_setup_data_message(host, multiple, direction);
878 t = &host->t;
879
880 if (t->speed_hz)
881 clock_rate = t->speed_hz;
882 else
883 clock_rate = spi->max_speed_hz;
884
885 timeout = data->timeout_ns / 1000 +
886 data->timeout_clks * 1000000 / clock_rate;
887 timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
888
889
890
891
892 for_each_sg(data->sg, sg, data->sg_len, n_sg) {
893 int status = 0;
894 dma_addr_t dma_addr = 0;
895 void *kmap_addr;
896 unsigned length = sg->length;
897 enum dma_data_direction dir = direction;
898
899
900
901
902 if (dma_dev) {
903
904 if ((sg->offset != 0 || length != PAGE_SIZE)
905 && dir == DMA_FROM_DEVICE)
906 dir = DMA_BIDIRECTIONAL;
907
908 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
909 PAGE_SIZE, dir);
910 if (dma_mapping_error(dma_dev, dma_addr)) {
911 data->error = -EFAULT;
912 break;
913 }
914 if (direction == DMA_TO_DEVICE)
915 t->tx_dma = dma_addr + sg->offset;
916 else
917 t->rx_dma = dma_addr + sg->offset;
918 }
919
920
921 kmap_addr = kmap(sg_page(sg));
922 if (direction == DMA_TO_DEVICE)
923 t->tx_buf = kmap_addr + sg->offset;
924 else
925 t->rx_buf = kmap_addr + sg->offset;
926
927
928 while (length) {
929 t->len = min(length, blk_size);
930
931 dev_dbg(&host->spi->dev, " %s block, %d bytes\n",
932 (direction == DMA_TO_DEVICE) ? "write" : "read",
933 t->len);
934
935 if (direction == DMA_TO_DEVICE)
936 status = mmc_spi_writeblock(host, t, timeout);
937 else
938 status = mmc_spi_readblock(host, t, timeout);
939 if (status < 0)
940 break;
941
942 data->bytes_xfered += t->len;
943 length -= t->len;
944
945 if (!multiple)
946 break;
947 }
948
949
950 if (direction == DMA_FROM_DEVICE)
951 flush_kernel_dcache_page(sg_page(sg));
952 kunmap(sg_page(sg));
953 if (dma_dev)
954 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
955
956 if (status < 0) {
957 data->error = status;
958 dev_dbg(&spi->dev, "%s status %d\n",
959 (direction == DMA_TO_DEVICE) ? "write" : "read",
960 status);
961 break;
962 }
963 }
964
965
966
967
968
969
970
971 if (direction == DMA_TO_DEVICE && multiple) {
972 struct scratch *scratch = host->data;
973 int tmp;
974 const unsigned statlen = sizeof(scratch->status);
975
976 dev_dbg(&spi->dev, " STOP_TRAN\n");
977
978
979
980
981
982
983 INIT_LIST_HEAD(&host->m.transfers);
984 list_add(&host->early_status.transfer_list,
985 &host->m.transfers);
986
987 memset(scratch->status, 0xff, statlen);
988 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
989
990 host->early_status.tx_buf = host->early_status.rx_buf;
991 host->early_status.tx_dma = host->early_status.rx_dma;
992 host->early_status.len = statlen;
993
994 if (host->dma_dev)
995 dma_sync_single_for_device(host->dma_dev,
996 host->data_dma, sizeof(*scratch),
997 DMA_BIDIRECTIONAL);
998
999 tmp = spi_sync_locked(spi, &host->m);
1000
1001 if (host->dma_dev)
1002 dma_sync_single_for_cpu(host->dma_dev,
1003 host->data_dma, sizeof(*scratch),
1004 DMA_BIDIRECTIONAL);
1005
1006 if (tmp < 0) {
1007 if (!data->error)
1008 data->error = tmp;
1009 return;
1010 }
1011
1012
1013
1014
1015
1016 for (tmp = 2; tmp < statlen; tmp++) {
1017 if (scratch->status[tmp] != 0)
1018 return;
1019 }
1020 tmp = mmc_spi_wait_unbusy(host, timeout);
1021 if (tmp < 0 && !data->error)
1022 data->error = tmp;
1023 }
1024}
1025
1026
1027
1028
1029
1030
1031
1032static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1033{
1034 struct mmc_spi_host *host = mmc_priv(mmc);
1035 int status = -EINVAL;
1036 int crc_retry = 5;
1037 struct mmc_command stop;
1038
1039#ifdef DEBUG
1040
1041 {
1042 struct mmc_command *cmd;
1043 int invalid = 0;
1044
1045 cmd = mrq->cmd;
1046 if (!mmc_spi_resp_type(cmd)) {
1047 dev_dbg(&host->spi->dev, "bogus command\n");
1048 cmd->error = -EINVAL;
1049 invalid = 1;
1050 }
1051
1052 cmd = mrq->stop;
1053 if (cmd && !mmc_spi_resp_type(cmd)) {
1054 dev_dbg(&host->spi->dev, "bogus STOP command\n");
1055 cmd->error = -EINVAL;
1056 invalid = 1;
1057 }
1058
1059 if (invalid) {
1060 dump_stack();
1061 mmc_request_done(host->mmc, mrq);
1062 return;
1063 }
1064 }
1065#endif
1066
1067
1068 spi_bus_lock(host->spi->master);
1069
1070crc_recover:
1071
1072 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1073 if (status == 0 && mrq->data) {
1074 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1075
1076
1077
1078
1079
1080
1081
1082
1083 if (mrq->data->error == -EILSEQ && crc_retry) {
1084 stop.opcode = MMC_STOP_TRANSMISSION;
1085 stop.arg = 0;
1086 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1087 status = mmc_spi_command_send(host, mrq, &stop, 0);
1088 crc_retry--;
1089 mrq->data->error = 0;
1090 goto crc_recover;
1091 }
1092
1093 if (mrq->stop)
1094 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1095 else
1096 mmc_cs_off(host);
1097 }
1098
1099
1100 spi_bus_unlock(host->spi->master);
1101
1102 mmc_request_done(host->mmc, mrq);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113static void mmc_spi_initsequence(struct mmc_spi_host *host)
1114{
1115
1116
1117
1118 mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
1119 mmc_spi_readbytes(host, 10);
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 host->spi->mode ^= SPI_CS_HIGH;
1141 if (spi_setup(host->spi) != 0) {
1142
1143 dev_warn(&host->spi->dev,
1144 "can't change chip-select polarity\n");
1145 host->spi->mode ^= SPI_CS_HIGH;
1146 } else {
1147 mmc_spi_readbytes(host, 18);
1148
1149 host->spi->mode ^= SPI_CS_HIGH;
1150 if (spi_setup(host->spi) != 0) {
1151
1152 dev_err(&host->spi->dev,
1153 "can't restore chip-select polarity\n");
1154 }
1155 }
1156}
1157
1158static char *mmc_powerstring(u8 power_mode)
1159{
1160 switch (power_mode) {
1161 case MMC_POWER_OFF: return "off";
1162 case MMC_POWER_UP: return "up";
1163 case MMC_POWER_ON: return "on";
1164 }
1165 return "?";
1166}
1167
1168static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1169{
1170 struct mmc_spi_host *host = mmc_priv(mmc);
1171
1172 if (host->power_mode != ios->power_mode) {
1173 int canpower;
1174
1175 canpower = host->pdata && host->pdata->setpower;
1176
1177 dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
1178 mmc_powerstring(ios->power_mode),
1179 ios->vdd,
1180 canpower ? ", can switch" : "");
1181
1182
1183
1184
1185 if (canpower) {
1186 switch (ios->power_mode) {
1187 case MMC_POWER_OFF:
1188 case MMC_POWER_UP:
1189 host->pdata->setpower(&host->spi->dev,
1190 ios->vdd);
1191 if (ios->power_mode == MMC_POWER_UP)
1192 msleep(host->powerup_msecs);
1193 }
1194 }
1195
1196
1197 if (ios->power_mode == MMC_POWER_ON)
1198 mmc_spi_initsequence(host);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1210 int mres;
1211 u8 nullbyte = 0;
1212
1213 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1214 mres = spi_setup(host->spi);
1215 if (mres < 0)
1216 dev_dbg(&host->spi->dev,
1217 "switch to SPI mode 0 failed\n");
1218
1219 if (spi_write(host->spi, &nullbyte, 1) < 0)
1220 dev_dbg(&host->spi->dev,
1221 "put spi signals to low failed\n");
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 msleep(10);
1233 if (mres == 0) {
1234 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1235 mres = spi_setup(host->spi);
1236 if (mres < 0)
1237 dev_dbg(&host->spi->dev,
1238 "switch back to SPI mode 3 failed\n");
1239 }
1240 }
1241
1242 host->power_mode = ios->power_mode;
1243 }
1244
1245 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1246 int status;
1247
1248 host->spi->max_speed_hz = ios->clock;
1249 status = spi_setup(host->spi);
1250 dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
1251 host->spi->max_speed_hz, status);
1252 }
1253}
1254
1255static const struct mmc_host_ops mmc_spi_ops = {
1256 .request = mmc_spi_request,
1257 .set_ios = mmc_spi_set_ios,
1258 .get_ro = mmc_gpio_get_ro,
1259 .get_cd = mmc_gpio_get_cd,
1260};
1261
1262
1263
1264
1265
1266
1267
1268
1269static irqreturn_t
1270mmc_spi_detect_irq(int irq, void *mmc)
1271{
1272 struct mmc_spi_host *host = mmc_priv(mmc);
1273 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1274
1275 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1276 return IRQ_HANDLED;
1277}
1278
1279#ifdef CONFIG_HAS_DMA
1280static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
1281{
1282 struct spi_device *spi = host->spi;
1283 struct device *dev;
1284
1285 if (!spi->master->dev.parent->dma_mask)
1286 return 0;
1287
1288 dev = spi->master->dev.parent;
1289
1290 host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
1291 DMA_TO_DEVICE);
1292 if (dma_mapping_error(dev, host->ones_dma))
1293 return -ENOMEM;
1294
1295 host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
1296 DMA_BIDIRECTIONAL);
1297 if (dma_mapping_error(dev, host->data_dma)) {
1298 dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1299 DMA_TO_DEVICE);
1300 return -ENOMEM;
1301 }
1302
1303 dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
1304 DMA_BIDIRECTIONAL);
1305
1306 host->dma_dev = dev;
1307 return 0;
1308}
1309
1310static void mmc_spi_dma_free(struct mmc_spi_host *host)
1311{
1312 if (!host->dma_dev)
1313 return;
1314
1315 dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1316 DMA_TO_DEVICE);
1317 dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
1318 DMA_BIDIRECTIONAL);
1319}
1320#else
1321static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
1322static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
1323#endif
1324
1325static int mmc_spi_probe(struct spi_device *spi)
1326{
1327 void *ones;
1328 struct mmc_host *mmc;
1329 struct mmc_spi_host *host;
1330 int status;
1331 bool has_ro = false;
1332
1333
1334
1335
1336 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1337 return -EINVAL;
1338
1339
1340
1341
1342
1343
1344
1345 if (spi->mode != SPI_MODE_3)
1346 spi->mode = SPI_MODE_0;
1347 spi->bits_per_word = 8;
1348
1349 status = spi_setup(spi);
1350 if (status < 0) {
1351 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1352 spi->mode, spi->max_speed_hz / 1000,
1353 status);
1354 return status;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363 status = -ENOMEM;
1364 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1365 if (!ones)
1366 goto nomem;
1367 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1368
1369 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1370 if (!mmc)
1371 goto nomem;
1372
1373 mmc->ops = &mmc_spi_ops;
1374 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1375 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1376 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1377 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1378
1379 mmc->caps = MMC_CAP_SPI;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 mmc->f_min = 400000;
1390 mmc->f_max = spi->max_speed_hz;
1391
1392 host = mmc_priv(mmc);
1393 host->mmc = mmc;
1394 host->spi = spi;
1395
1396 host->ones = ones;
1397
1398 dev_set_drvdata(&spi->dev, mmc);
1399
1400
1401
1402
1403 host->pdata = mmc_spi_get_pdata(spi);
1404 if (host->pdata)
1405 mmc->ocr_avail = host->pdata->ocr_mask;
1406 if (!mmc->ocr_avail) {
1407 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1408 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1409 }
1410 if (host->pdata && host->pdata->setpower) {
1411 host->powerup_msecs = host->pdata->powerup_msecs;
1412 if (!host->powerup_msecs || host->powerup_msecs > 250)
1413 host->powerup_msecs = 250;
1414 }
1415
1416
1417 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1418 if (!host->data)
1419 goto fail_nobuf1;
1420
1421 status = mmc_spi_dma_alloc(host);
1422 if (status)
1423 goto fail_dma;
1424
1425
1426 spi_message_init(&host->readback);
1427 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1428
1429 spi_message_add_tail(&host->status, &host->readback);
1430 host->status.tx_buf = host->ones;
1431 host->status.tx_dma = host->ones_dma;
1432 host->status.rx_buf = &host->data->status;
1433 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1434 host->status.cs_change = 1;
1435
1436
1437 if (host->pdata && host->pdata->init) {
1438 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1439 if (status != 0)
1440 goto fail_glue_init;
1441 }
1442
1443
1444 if (host->pdata) {
1445 mmc->caps |= host->pdata->caps;
1446 mmc->caps2 |= host->pdata->caps2;
1447 }
1448
1449 status = mmc_add_host(mmc);
1450 if (status != 0)
1451 goto fail_add_host;
1452
1453
1454
1455
1456
1457 status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
1458 if (status == -EPROBE_DEFER)
1459 goto fail_add_host;
1460 if (!status) {
1461
1462
1463
1464
1465
1466 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1467 mmc_gpiod_request_cd_irq(mmc);
1468 }
1469 mmc_detect_change(mmc, 0);
1470
1471
1472 status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
1473 if (status == -EPROBE_DEFER)
1474 goto fail_add_host;
1475 if (!status)
1476 has_ro = true;
1477
1478 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1479 dev_name(&mmc->class_dev),
1480 host->dma_dev ? "" : ", no DMA",
1481 has_ro ? "" : ", no WP",
1482 (host->pdata && host->pdata->setpower)
1483 ? "" : ", no poweroff",
1484 (mmc->caps & MMC_CAP_NEEDS_POLL)
1485 ? ", cd polling" : "");
1486 return 0;
1487
1488fail_add_host:
1489 mmc_remove_host(mmc);
1490fail_glue_init:
1491 mmc_spi_dma_free(host);
1492fail_dma:
1493 kfree(host->data);
1494fail_nobuf1:
1495 mmc_spi_put_pdata(spi);
1496 mmc_free_host(mmc);
1497nomem:
1498 kfree(ones);
1499 return status;
1500}
1501
1502
1503static int mmc_spi_remove(struct spi_device *spi)
1504{
1505 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1506 struct mmc_spi_host *host = mmc_priv(mmc);
1507
1508
1509 if (host->pdata && host->pdata->exit)
1510 host->pdata->exit(&spi->dev, mmc);
1511
1512 mmc_remove_host(mmc);
1513
1514 mmc_spi_dma_free(host);
1515 kfree(host->data);
1516 kfree(host->ones);
1517
1518 spi->max_speed_hz = mmc->f_max;
1519 mmc_spi_put_pdata(spi);
1520 mmc_free_host(mmc);
1521 return 0;
1522}
1523
1524static const struct of_device_id mmc_spi_of_match_table[] = {
1525 { .compatible = "mmc-spi-slot", },
1526 {},
1527};
1528MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1529
1530static struct spi_driver mmc_spi_driver = {
1531 .driver = {
1532 .name = "mmc_spi",
1533 .of_match_table = mmc_spi_of_match_table,
1534 },
1535 .probe = mmc_spi_probe,
1536 .remove = mmc_spi_remove,
1537};
1538
1539module_spi_driver(mmc_spi_driver);
1540
1541MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
1542MODULE_DESCRIPTION("SPI SD/MMC host driver");
1543MODULE_LICENSE("GPL");
1544MODULE_ALIAS("spi:mmc_spi");
1545