linux/drivers/spi/spi-rspi.c
<<
>>
Prefs
   1/*
   2 * SH RSPI driver
   3 *
   4 * Copyright (C) 2012  Renesas Solutions Corp.
   5 *
   6 * Based on spi-sh.c:
   7 * Copyright (C) 2011 Renesas Solutions Corp.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; version 2 of the License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  21 *
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/errno.h>
  28#include <linux/list.h>
  29#include <linux/workqueue.h>
  30#include <linux/interrupt.h>
  31#include <linux/platform_device.h>
  32#include <linux/io.h>
  33#include <linux/clk.h>
  34#include <linux/dmaengine.h>
  35#include <linux/dma-mapping.h>
  36#include <linux/sh_dma.h>
  37#include <linux/spi/spi.h>
  38#include <linux/spi/rspi.h>
  39
  40#define RSPI_SPCR               0x00
  41#define RSPI_SSLP               0x01
  42#define RSPI_SPPCR              0x02
  43#define RSPI_SPSR               0x03
  44#define RSPI_SPDR               0x04
  45#define RSPI_SPSCR              0x08
  46#define RSPI_SPSSR              0x09
  47#define RSPI_SPBR               0x0a
  48#define RSPI_SPDCR              0x0b
  49#define RSPI_SPCKD              0x0c
  50#define RSPI_SSLND              0x0d
  51#define RSPI_SPND               0x0e
  52#define RSPI_SPCR2              0x0f
  53#define RSPI_SPCMD0             0x10
  54#define RSPI_SPCMD1             0x12
  55#define RSPI_SPCMD2             0x14
  56#define RSPI_SPCMD3             0x16
  57#define RSPI_SPCMD4             0x18
  58#define RSPI_SPCMD5             0x1a
  59#define RSPI_SPCMD6             0x1c
  60#define RSPI_SPCMD7             0x1e
  61
  62/* SPCR */
  63#define SPCR_SPRIE              0x80
  64#define SPCR_SPE                0x40
  65#define SPCR_SPTIE              0x20
  66#define SPCR_SPEIE              0x10
  67#define SPCR_MSTR               0x08
  68#define SPCR_MODFEN             0x04
  69#define SPCR_TXMD               0x02
  70#define SPCR_SPMS               0x01
  71
  72/* SSLP */
  73#define SSLP_SSL1P              0x02
  74#define SSLP_SSL0P              0x01
  75
  76/* SPPCR */
  77#define SPPCR_MOIFE             0x20
  78#define SPPCR_MOIFV             0x10
  79#define SPPCR_SPOM              0x04
  80#define SPPCR_SPLP2             0x02
  81#define SPPCR_SPLP              0x01
  82
  83/* SPSR */
  84#define SPSR_SPRF               0x80
  85#define SPSR_SPTEF              0x20
  86#define SPSR_PERF               0x08
  87#define SPSR_MODF               0x04
  88#define SPSR_IDLNF              0x02
  89#define SPSR_OVRF               0x01
  90
  91/* SPSCR */
  92#define SPSCR_SPSLN_MASK        0x07
  93
  94/* SPSSR */
  95#define SPSSR_SPECM_MASK        0x70
  96#define SPSSR_SPCP_MASK         0x07
  97
  98/* SPDCR */
  99#define SPDCR_SPLW              0x20
 100#define SPDCR_SPRDTD            0x10
 101#define SPDCR_SLSEL1            0x08
 102#define SPDCR_SLSEL0            0x04
 103#define SPDCR_SLSEL_MASK        0x0c
 104#define SPDCR_SPFC1             0x02
 105#define SPDCR_SPFC0             0x01
 106
 107/* SPCKD */
 108#define SPCKD_SCKDL_MASK        0x07
 109
 110/* SSLND */
 111#define SSLND_SLNDL_MASK        0x07
 112
 113/* SPND */
 114#define SPND_SPNDL_MASK         0x07
 115
 116/* SPCR2 */
 117#define SPCR2_PTE               0x08
 118#define SPCR2_SPIE              0x04
 119#define SPCR2_SPOE              0x02
 120#define SPCR2_SPPE              0x01
 121
 122/* SPCMDn */
 123#define SPCMD_SCKDEN            0x8000
 124#define SPCMD_SLNDEN            0x4000
 125#define SPCMD_SPNDEN            0x2000
 126#define SPCMD_LSBF              0x1000
 127#define SPCMD_SPB_MASK          0x0f00
 128#define SPCMD_SPB_8_TO_16(bit)  (((bit - 1) << 8) & SPCMD_SPB_MASK)
 129#define SPCMD_SPB_20BIT         0x0000
 130#define SPCMD_SPB_24BIT         0x0100
 131#define SPCMD_SPB_32BIT         0x0200
 132#define SPCMD_SSLKP             0x0080
 133#define SPCMD_SSLA_MASK         0x0030
 134#define SPCMD_BRDV_MASK         0x000c
 135#define SPCMD_CPOL              0x0002
 136#define SPCMD_CPHA              0x0001
 137
 138struct rspi_data {
 139        void __iomem *addr;
 140        u32 max_speed_hz;
 141        struct spi_master *master;
 142        struct list_head queue;
 143        struct work_struct ws;
 144        wait_queue_head_t wait;
 145        spinlock_t lock;
 146        struct clk *clk;
 147        unsigned char spsr;
 148
 149        /* for dmaengine */
 150        struct sh_dmae_slave dma_tx;
 151        struct sh_dmae_slave dma_rx;
 152        struct dma_chan *chan_tx;
 153        struct dma_chan *chan_rx;
 154        int irq;
 155
 156        unsigned dma_width_16bit:1;
 157        unsigned dma_callbacked:1;
 158};
 159
 160static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
 161{
 162        iowrite8(data, rspi->addr + offset);
 163}
 164
 165static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
 166{
 167        iowrite16(data, rspi->addr + offset);
 168}
 169
 170static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
 171{
 172        return ioread8(rspi->addr + offset);
 173}
 174
 175static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
 176{
 177        return ioread16(rspi->addr + offset);
 178}
 179
 180static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
 181{
 182        int tmp;
 183        unsigned char spbr;
 184
 185        tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
 186        spbr = clamp(tmp, 0, 255);
 187
 188        return spbr;
 189}
 190
 191static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
 192{
 193        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
 194}
 195
 196static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
 197{
 198        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
 199}
 200
 201static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
 202                                   u8 enable_bit)
 203{
 204        int ret;
 205
 206        rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
 207        rspi_enable_irq(rspi, enable_bit);
 208        ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
 209        if (ret == 0 && !(rspi->spsr & wait_mask))
 210                return -ETIMEDOUT;
 211
 212        return 0;
 213}
 214
 215static void rspi_assert_ssl(struct rspi_data *rspi)
 216{
 217        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
 218}
 219
 220static void rspi_negate_ssl(struct rspi_data *rspi)
 221{
 222        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
 223}
 224
 225static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
 226{
 227        /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
 228        rspi_write8(rspi, 0x00, RSPI_SPPCR);
 229
 230        /* Sets transfer bit rate */
 231        rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
 232
 233        /* Sets number of frames to be used: 1 frame */
 234        rspi_write8(rspi, 0x00, RSPI_SPDCR);
 235
 236        /* Sets RSPCK, SSL, next-access delay value */
 237        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 238        rspi_write8(rspi, 0x00, RSPI_SSLND);
 239        rspi_write8(rspi, 0x00, RSPI_SPND);
 240
 241        /* Sets parity, interrupt mask */
 242        rspi_write8(rspi, 0x00, RSPI_SPCR2);
 243
 244        /* Sets SPCMD */
 245        rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
 246                     RSPI_SPCMD0);
 247
 248        /* Sets RSPI mode */
 249        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 250
 251        return 0;
 252}
 253
 254static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
 255                         struct spi_transfer *t)
 256{
 257        int remain = t->len;
 258        u8 *data;
 259
 260        data = (u8 *)t->tx_buf;
 261        while (remain > 0) {
 262                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
 263                            RSPI_SPCR);
 264
 265                if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
 266                        dev_err(&rspi->master->dev,
 267                                "%s: tx empty timeout\n", __func__);
 268                        return -ETIMEDOUT;
 269                }
 270
 271                rspi_write16(rspi, *data, RSPI_SPDR);
 272                data++;
 273                remain--;
 274        }
 275
 276        /* Waiting for the last transmition */
 277        rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
 278
 279        return 0;
 280}
 281
 282static void rspi_dma_complete(void *arg)
 283{
 284        struct rspi_data *rspi = arg;
 285
 286        rspi->dma_callbacked = 1;
 287        wake_up_interruptible(&rspi->wait);
 288}
 289
 290static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
 291                           struct dma_chan *chan,
 292                           enum dma_transfer_direction dir)
 293{
 294        sg_init_table(sg, 1);
 295        sg_set_buf(sg, buf, len);
 296        sg_dma_len(sg) = len;
 297        return dma_map_sg(chan->device->dev, sg, 1, dir);
 298}
 299
 300static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
 301                              enum dma_transfer_direction dir)
 302{
 303        dma_unmap_sg(chan->device->dev, sg, 1, dir);
 304}
 305
 306static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
 307{
 308        u16 *dst = buf;
 309        const u8 *src = data;
 310
 311        while (len) {
 312                *dst++ = (u16)(*src++);
 313                len--;
 314        }
 315}
 316
 317static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
 318{
 319        u8 *dst = buf;
 320        const u16 *src = data;
 321
 322        while (len) {
 323                *dst++ = (u8)*src++;
 324                len--;
 325        }
 326}
 327
 328static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
 329{
 330        struct scatterlist sg;
 331        void *buf = NULL;
 332        struct dma_async_tx_descriptor *desc;
 333        unsigned len;
 334        int ret = 0;
 335
 336        if (rspi->dma_width_16bit) {
 337                /*
 338                 * If DMAC bus width is 16-bit, the driver allocates a dummy
 339                 * buffer. And, the driver converts original data into the
 340                 * DMAC data as the following format:
 341                 *  original data: 1st byte, 2nd byte ...
 342                 *  DMAC data:     1st byte, dummy, 2nd byte, dummy ...
 343                 */
 344                len = t->len * 2;
 345                buf = kmalloc(len, GFP_KERNEL);
 346                if (!buf)
 347                        return -ENOMEM;
 348                rspi_memory_to_8bit(buf, t->tx_buf, t->len);
 349        } else {
 350                len = t->len;
 351                buf = (void *)t->tx_buf;
 352        }
 353
 354        if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
 355                ret = -EFAULT;
 356                goto end_nomap;
 357        }
 358        desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
 359                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 360        if (!desc) {
 361                ret = -EIO;
 362                goto end;
 363        }
 364
 365        /*
 366         * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
 367         * called. So, this driver disables the IRQ while DMA transfer.
 368         */
 369        disable_irq(rspi->irq);
 370
 371        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
 372        rspi_enable_irq(rspi, SPCR_SPTIE);
 373        rspi->dma_callbacked = 0;
 374
 375        desc->callback = rspi_dma_complete;
 376        desc->callback_param = rspi;
 377        dmaengine_submit(desc);
 378        dma_async_issue_pending(rspi->chan_tx);
 379
 380        ret = wait_event_interruptible_timeout(rspi->wait,
 381                                               rspi->dma_callbacked, HZ);
 382        if (ret > 0 && rspi->dma_callbacked)
 383                ret = 0;
 384        else if (!ret)
 385                ret = -ETIMEDOUT;
 386        rspi_disable_irq(rspi, SPCR_SPTIE);
 387
 388        enable_irq(rspi->irq);
 389
 390end:
 391        rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
 392end_nomap:
 393        if (rspi->dma_width_16bit)
 394                kfree(buf);
 395
 396        return ret;
 397}
 398
 399static void rspi_receive_init(struct rspi_data *rspi)
 400{
 401        unsigned char spsr;
 402
 403        spsr = rspi_read8(rspi, RSPI_SPSR);
 404        if (spsr & SPSR_SPRF)
 405                rspi_read16(rspi, RSPI_SPDR);   /* dummy read */
 406        if (spsr & SPSR_OVRF)
 407                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
 408                            RSPI_SPCR);
 409}
 410
 411static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
 412                            struct spi_transfer *t)
 413{
 414        int remain = t->len;
 415        u8 *data;
 416
 417        rspi_receive_init(rspi);
 418
 419        data = (u8 *)t->rx_buf;
 420        while (remain > 0) {
 421                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
 422                            RSPI_SPCR);
 423
 424                if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
 425                        dev_err(&rspi->master->dev,
 426                                "%s: tx empty timeout\n", __func__);
 427                        return -ETIMEDOUT;
 428                }
 429                /* dummy write for generate clock */
 430                rspi_write16(rspi, 0x00, RSPI_SPDR);
 431
 432                if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
 433                        dev_err(&rspi->master->dev,
 434                                "%s: receive timeout\n", __func__);
 435                        return -ETIMEDOUT;
 436                }
 437                /* SPDR allows 16 or 32-bit access only */
 438                *data = (u8)rspi_read16(rspi, RSPI_SPDR);
 439
 440                data++;
 441                remain--;
 442        }
 443
 444        return 0;
 445}
 446
 447static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
 448{
 449        struct scatterlist sg, sg_dummy;
 450        void *dummy = NULL, *rx_buf = NULL;
 451        struct dma_async_tx_descriptor *desc, *desc_dummy;
 452        unsigned len;
 453        int ret = 0;
 454
 455        if (rspi->dma_width_16bit) {
 456                /*
 457                 * If DMAC bus width is 16-bit, the driver allocates a dummy
 458                 * buffer. And, finally the driver converts the DMAC data into
 459                 * actual data as the following format:
 460                 *  DMAC data:   1st byte, dummy, 2nd byte, dummy ...
 461                 *  actual data: 1st byte, 2nd byte ...
 462                 */
 463                len = t->len * 2;
 464                rx_buf = kmalloc(len, GFP_KERNEL);
 465                if (!rx_buf)
 466                        return -ENOMEM;
 467         } else {
 468                len = t->len;
 469                rx_buf = t->rx_buf;
 470        }
 471
 472        /* prepare dummy transfer to generate SPI clocks */
 473        dummy = kzalloc(len, GFP_KERNEL);
 474        if (!dummy) {
 475                ret = -ENOMEM;
 476                goto end_nomap;
 477        }
 478        if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
 479                             DMA_TO_DEVICE)) {
 480                ret = -EFAULT;
 481                goto end_nomap;
 482        }
 483        desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
 484                        DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 485        if (!desc_dummy) {
 486                ret = -EIO;
 487                goto end_dummy_mapped;
 488        }
 489
 490        /* prepare receive transfer */
 491        if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
 492                             DMA_FROM_DEVICE)) {
 493                ret = -EFAULT;
 494                goto end_dummy_mapped;
 495
 496        }
 497        desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
 498                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 499        if (!desc) {
 500                ret = -EIO;
 501                goto end;
 502        }
 503
 504        rspi_receive_init(rspi);
 505
 506        /*
 507         * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
 508         * called. So, this driver disables the IRQ while DMA transfer.
 509         */
 510        disable_irq(rspi->irq);
 511
 512        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
 513        rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
 514        rspi->dma_callbacked = 0;
 515
 516        desc->callback = rspi_dma_complete;
 517        desc->callback_param = rspi;
 518        dmaengine_submit(desc);
 519        dma_async_issue_pending(rspi->chan_rx);
 520
 521        desc_dummy->callback = NULL;    /* No callback */
 522        dmaengine_submit(desc_dummy);
 523        dma_async_issue_pending(rspi->chan_tx);
 524
 525        ret = wait_event_interruptible_timeout(rspi->wait,
 526                                               rspi->dma_callbacked, HZ);
 527        if (ret > 0 && rspi->dma_callbacked)
 528                ret = 0;
 529        else if (!ret)
 530                ret = -ETIMEDOUT;
 531        rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
 532
 533        enable_irq(rspi->irq);
 534
 535end:
 536        rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
 537end_dummy_mapped:
 538        rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
 539end_nomap:
 540        if (rspi->dma_width_16bit) {
 541                if (!ret)
 542                        rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
 543                kfree(rx_buf);
 544        }
 545        kfree(dummy);
 546
 547        return ret;
 548}
 549
 550static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
 551{
 552        if (t->tx_buf && rspi->chan_tx)
 553                return 1;
 554        /* If the module receives data by DMAC, it also needs TX DMAC */
 555        if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
 556                return 1;
 557
 558        return 0;
 559}
 560
 561static void rspi_work(struct work_struct *work)
 562{
 563        struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
 564        struct spi_message *mesg;
 565        struct spi_transfer *t;
 566        unsigned long flags;
 567        int ret;
 568
 569        spin_lock_irqsave(&rspi->lock, flags);
 570        while (!list_empty(&rspi->queue)) {
 571                mesg = list_entry(rspi->queue.next, struct spi_message, queue);
 572                list_del_init(&mesg->queue);
 573                spin_unlock_irqrestore(&rspi->lock, flags);
 574
 575                rspi_assert_ssl(rspi);
 576
 577                list_for_each_entry(t, &mesg->transfers, transfer_list) {
 578                        if (t->tx_buf) {
 579                                if (rspi_is_dma(rspi, t))
 580                                        ret = rspi_send_dma(rspi, t);
 581                                else
 582                                        ret = rspi_send_pio(rspi, mesg, t);
 583                                if (ret < 0)
 584                                        goto error;
 585                        }
 586                        if (t->rx_buf) {
 587                                if (rspi_is_dma(rspi, t))
 588                                        ret = rspi_receive_dma(rspi, t);
 589                                else
 590                                        ret = rspi_receive_pio(rspi, mesg, t);
 591                                if (ret < 0)
 592                                        goto error;
 593                        }
 594                        mesg->actual_length += t->len;
 595                }
 596                rspi_negate_ssl(rspi);
 597
 598                mesg->status = 0;
 599                mesg->complete(mesg->context);
 600
 601                spin_lock_irqsave(&rspi->lock, flags);
 602        }
 603
 604        return;
 605
 606error:
 607        mesg->status = ret;
 608        mesg->complete(mesg->context);
 609}
 610
 611static int rspi_setup(struct spi_device *spi)
 612{
 613        struct rspi_data *rspi = spi_master_get_devdata(spi->master);
 614
 615        if (!spi->bits_per_word)
 616                spi->bits_per_word = 8;
 617        rspi->max_speed_hz = spi->max_speed_hz;
 618
 619        rspi_set_config_register(rspi, 8);
 620
 621        return 0;
 622}
 623
 624static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
 625{
 626        struct rspi_data *rspi = spi_master_get_devdata(spi->master);
 627        unsigned long flags;
 628
 629        mesg->actual_length = 0;
 630        mesg->status = -EINPROGRESS;
 631
 632        spin_lock_irqsave(&rspi->lock, flags);
 633        list_add_tail(&mesg->queue, &rspi->queue);
 634        schedule_work(&rspi->ws);
 635        spin_unlock_irqrestore(&rspi->lock, flags);
 636
 637        return 0;
 638}
 639
 640static void rspi_cleanup(struct spi_device *spi)
 641{
 642}
 643
 644static irqreturn_t rspi_irq(int irq, void *_sr)
 645{
 646        struct rspi_data *rspi = (struct rspi_data *)_sr;
 647        unsigned long spsr;
 648        irqreturn_t ret = IRQ_NONE;
 649        unsigned char disable_irq = 0;
 650
 651        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
 652        if (spsr & SPSR_SPRF)
 653                disable_irq |= SPCR_SPRIE;
 654        if (spsr & SPSR_SPTEF)
 655                disable_irq |= SPCR_SPTIE;
 656
 657        if (disable_irq) {
 658                ret = IRQ_HANDLED;
 659                rspi_disable_irq(rspi, disable_irq);
 660                wake_up(&rspi->wait);
 661        }
 662
 663        return ret;
 664}
 665
 666static bool rspi_filter(struct dma_chan *chan, void *filter_param)
 667{
 668        chan->private = filter_param;
 669        return true;
 670}
 671
 672static void __devinit rspi_request_dma(struct rspi_data *rspi,
 673                                       struct platform_device *pdev)
 674{
 675        struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
 676        dma_cap_mask_t mask;
 677
 678        if (!rspi_pd)
 679                return;
 680
 681        rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
 682
 683        /* If the module receives data by DMAC, it also needs TX DMAC */
 684        if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
 685                dma_cap_zero(mask);
 686                dma_cap_set(DMA_SLAVE, mask);
 687                rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
 688                rspi->chan_rx = dma_request_channel(mask, rspi_filter,
 689                                                    &rspi->dma_rx);
 690                if (rspi->chan_rx)
 691                        dev_info(&pdev->dev, "Use DMA when rx.\n");
 692        }
 693        if (rspi_pd->dma_tx_id) {
 694                dma_cap_zero(mask);
 695                dma_cap_set(DMA_SLAVE, mask);
 696                rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
 697                rspi->chan_tx = dma_request_channel(mask, rspi_filter,
 698                                                    &rspi->dma_tx);
 699                if (rspi->chan_tx)
 700                        dev_info(&pdev->dev, "Use DMA when tx\n");
 701        }
 702}
 703
 704static void __devexit rspi_release_dma(struct rspi_data *rspi)
 705{
 706        if (rspi->chan_tx)
 707                dma_release_channel(rspi->chan_tx);
 708        if (rspi->chan_rx)
 709                dma_release_channel(rspi->chan_rx);
 710}
 711
 712static int __devexit rspi_remove(struct platform_device *pdev)
 713{
 714        struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
 715
 716        spi_unregister_master(rspi->master);
 717        rspi_release_dma(rspi);
 718        free_irq(platform_get_irq(pdev, 0), rspi);
 719        clk_put(rspi->clk);
 720        iounmap(rspi->addr);
 721        spi_master_put(rspi->master);
 722
 723        return 0;
 724}
 725
 726static int __devinit rspi_probe(struct platform_device *pdev)
 727{
 728        struct resource *res;
 729        struct spi_master *master;
 730        struct rspi_data *rspi;
 731        int ret, irq;
 732        char clk_name[16];
 733
 734        /* get base addr */
 735        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 736        if (unlikely(res == NULL)) {
 737                dev_err(&pdev->dev, "invalid resource\n");
 738                return -EINVAL;
 739        }
 740
 741        irq = platform_get_irq(pdev, 0);
 742        if (irq < 0) {
 743                dev_err(&pdev->dev, "platform_get_irq error\n");
 744                return -ENODEV;
 745        }
 746
 747        master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
 748        if (master == NULL) {
 749                dev_err(&pdev->dev, "spi_alloc_master error.\n");
 750                return -ENOMEM;
 751        }
 752
 753        rspi = spi_master_get_devdata(master);
 754        dev_set_drvdata(&pdev->dev, rspi);
 755
 756        rspi->master = master;
 757        rspi->addr = ioremap(res->start, resource_size(res));
 758        if (rspi->addr == NULL) {
 759                dev_err(&pdev->dev, "ioremap error.\n");
 760                ret = -ENOMEM;
 761                goto error1;
 762        }
 763
 764        snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
 765        rspi->clk = clk_get(&pdev->dev, clk_name);
 766        if (IS_ERR(rspi->clk)) {
 767                dev_err(&pdev->dev, "cannot get clock\n");
 768                ret = PTR_ERR(rspi->clk);
 769                goto error2;
 770        }
 771        clk_enable(rspi->clk);
 772
 773        INIT_LIST_HEAD(&rspi->queue);
 774        spin_lock_init(&rspi->lock);
 775        INIT_WORK(&rspi->ws, rspi_work);
 776        init_waitqueue_head(&rspi->wait);
 777
 778        master->num_chipselect = 2;
 779        master->bus_num = pdev->id;
 780        master->setup = rspi_setup;
 781        master->transfer = rspi_transfer;
 782        master->cleanup = rspi_cleanup;
 783
 784        ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
 785        if (ret < 0) {
 786                dev_err(&pdev->dev, "request_irq error\n");
 787                goto error3;
 788        }
 789
 790        rspi->irq = irq;
 791        rspi_request_dma(rspi, pdev);
 792
 793        ret = spi_register_master(master);
 794        if (ret < 0) {
 795                dev_err(&pdev->dev, "spi_register_master error.\n");
 796                goto error4;
 797        }
 798
 799        dev_info(&pdev->dev, "probed\n");
 800
 801        return 0;
 802
 803error4:
 804        rspi_release_dma(rspi);
 805        free_irq(irq, rspi);
 806error3:
 807        clk_put(rspi->clk);
 808error2:
 809        iounmap(rspi->addr);
 810error1:
 811        spi_master_put(master);
 812
 813        return ret;
 814}
 815
 816static struct platform_driver rspi_driver = {
 817        .probe =        rspi_probe,
 818        .remove =       __devexit_p(rspi_remove),
 819        .driver         = {
 820                .name = "rspi",
 821                .owner  = THIS_MODULE,
 822        },
 823};
 824module_platform_driver(rspi_driver);
 825
 826MODULE_DESCRIPTION("Renesas RSPI bus driver");
 827MODULE_LICENSE("GPL v2");
 828MODULE_AUTHOR("Yoshihiro Shimoda");
 829MODULE_ALIAS("platform:rspi");
 830
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.