linux/drivers/spi/spi-pxa2xx-dma.c
<<
>>
Prefs
   1/*
   2 * PXA2xx SPI DMA engine support.
   3 *
   4 * Copyright (C) 2013, Intel Corporation
   5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/device.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmaengine.h>
  16#include <linux/pxa2xx_ssp.h>
  17#include <linux/scatterlist.h>
  18#include <linux/sizes.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/pxa2xx_spi.h>
  21
  22#include "spi-pxa2xx.h"
  23
  24static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
  25                                     enum dma_data_direction dir)
  26{
  27        int i, nents, len = drv_data->len;
  28        struct scatterlist *sg;
  29        struct device *dmadev;
  30        struct sg_table *sgt;
  31        void *buf, *pbuf;
  32
  33        /*
  34         * Some DMA controllers have problems transferring buffers that are
  35         * not multiple of 4 bytes. So we truncate the transfer so that it
  36         * is suitable for such controllers, and handle the trailing bytes
  37         * manually after the DMA completes.
  38         *
  39         * REVISIT: It would be better if this information could be
  40         * retrieved directly from the DMA device in a similar way than
  41         * ->copy_align etc. is done.
  42         */
  43        len = ALIGN(drv_data->len, 4);
  44
  45        if (dir == DMA_TO_DEVICE) {
  46                dmadev = drv_data->tx_chan->device->dev;
  47                sgt = &drv_data->tx_sgt;
  48                buf = drv_data->tx;
  49                drv_data->tx_map_len = len;
  50        } else {
  51                dmadev = drv_data->rx_chan->device->dev;
  52                sgt = &drv_data->rx_sgt;
  53                buf = drv_data->rx;
  54                drv_data->rx_map_len = len;
  55        }
  56
  57        nents = DIV_ROUND_UP(len, SZ_2K);
  58        if (nents != sgt->nents) {
  59                int ret;
  60
  61                sg_free_table(sgt);
  62                ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  63                if (ret)
  64                        return ret;
  65        }
  66
  67        pbuf = buf;
  68        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  69                size_t bytes = min_t(size_t, len, SZ_2K);
  70
  71                if (buf)
  72                        sg_set_buf(sg, pbuf, bytes);
  73                else
  74                        sg_set_buf(sg, drv_data->dummy, bytes);
  75
  76                pbuf += bytes;
  77                len -= bytes;
  78        }
  79
  80        nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
  81        if (!nents)
  82                return -ENOMEM;
  83
  84        return nents;
  85}
  86
  87static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
  88                                        enum dma_data_direction dir)
  89{
  90        struct device *dmadev;
  91        struct sg_table *sgt;
  92
  93        if (dir == DMA_TO_DEVICE) {
  94                dmadev = drv_data->tx_chan->device->dev;
  95                sgt = &drv_data->tx_sgt;
  96        } else {
  97                dmadev = drv_data->rx_chan->device->dev;
  98                sgt = &drv_data->rx_sgt;
  99        }
 100
 101        dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
 102}
 103
 104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
 105{
 106        if (!drv_data->dma_mapped)
 107                return;
 108
 109        pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
 110        pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
 111
 112        drv_data->dma_mapped = 0;
 113}
 114
 115static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
 116                                             bool error)
 117{
 118        struct spi_message *msg = drv_data->cur_msg;
 119
 120        /*
 121         * It is possible that one CPU is handling ROR interrupt and other
 122         * just gets DMA completion. Calling pump_transfers() twice for the
 123         * same transfer leads to problems thus we prevent concurrent calls
 124         * by using ->dma_running.
 125         */
 126        if (atomic_dec_and_test(&drv_data->dma_running)) {
 127                void __iomem *reg = drv_data->ioaddr;
 128
 129                /*
 130                 * If the other CPU is still handling the ROR interrupt we
 131                 * might not know about the error yet. So we re-check the
 132                 * ROR bit here before we clear the status register.
 133                 */
 134                if (!error) {
 135                        u32 status = read_SSSR(reg) & drv_data->mask_sr;
 136                        error = status & SSSR_ROR;
 137                }
 138
 139                /* Clear status & disable interrupts */
 140                write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
 141                write_SSSR_CS(drv_data, drv_data->clear_sr);
 142                if (!pxa25x_ssp_comp(drv_data))
 143                        write_SSTO(0, reg);
 144
 145                if (!error) {
 146                        pxa2xx_spi_unmap_dma_buffers(drv_data);
 147
 148                        /* Handle the last bytes of unaligned transfer */
 149                        drv_data->tx += drv_data->tx_map_len;
 150                        drv_data->write(drv_data);
 151
 152                        drv_data->rx += drv_data->rx_map_len;
 153                        drv_data->read(drv_data);
 154
 155                        msg->actual_length += drv_data->len;
 156                        msg->state = pxa2xx_spi_next_transfer(drv_data);
 157                } else {
 158                        /* In case we got an error we disable the SSP now */
 159                        write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
 160
 161                        msg->state = ERROR_STATE;
 162                }
 163
 164                tasklet_schedule(&drv_data->pump_transfers);
 165        }
 166}
 167
 168static void pxa2xx_spi_dma_callback(void *data)
 169{
 170        pxa2xx_spi_dma_transfer_complete(data, false);
 171}
 172
 173static struct dma_async_tx_descriptor *
 174pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
 175                           enum dma_transfer_direction dir)
 176{
 177        struct pxa2xx_spi_master *pdata = drv_data->master_info;
 178        struct chip_data *chip = drv_data->cur_chip;
 179        enum dma_slave_buswidth width;
 180        struct dma_slave_config cfg;
 181        struct dma_chan *chan;
 182        struct sg_table *sgt;
 183        int nents, ret;
 184
 185        switch (drv_data->n_bytes) {
 186        case 1:
 187                width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 188                break;
 189        case 2:
 190                width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 191                break;
 192        default:
 193                width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 194                break;
 195        }
 196
 197        memset(&cfg, 0, sizeof(cfg));
 198        cfg.direction = dir;
 199
 200        if (dir == DMA_MEM_TO_DEV) {
 201                cfg.dst_addr = drv_data->ssdr_physical;
 202                cfg.dst_addr_width = width;
 203                cfg.dst_maxburst = chip->dma_burst_size;
 204                cfg.slave_id = pdata->tx_slave_id;
 205
 206                sgt = &drv_data->tx_sgt;
 207                nents = drv_data->tx_nents;
 208                chan = drv_data->tx_chan;
 209        } else {
 210                cfg.src_addr = drv_data->ssdr_physical;
 211                cfg.src_addr_width = width;
 212                cfg.src_maxburst = chip->dma_burst_size;
 213                cfg.slave_id = pdata->rx_slave_id;
 214
 215                sgt = &drv_data->rx_sgt;
 216                nents = drv_data->rx_nents;
 217                chan = drv_data->rx_chan;
 218        }
 219
 220        ret = dmaengine_slave_config(chan, &cfg);
 221        if (ret) {
 222                dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
 223                return NULL;
 224        }
 225
 226        return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
 227                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 228}
 229
 230static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param)
 231{
 232        const struct pxa2xx_spi_master *pdata = param;
 233
 234        return chan->chan_id == pdata->tx_chan_id ||
 235               chan->chan_id == pdata->rx_chan_id;
 236}
 237
 238bool pxa2xx_spi_dma_is_possible(size_t len)
 239{
 240        return len <= MAX_DMA_LEN;
 241}
 242
 243int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
 244{
 245        const struct chip_data *chip = drv_data->cur_chip;
 246        int ret;
 247
 248        if (!chip->enable_dma)
 249                return 0;
 250
 251        /* Don't bother with DMA if we can't do even a single burst */
 252        if (drv_data->len < chip->dma_burst_size)
 253                return 0;
 254
 255        ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
 256        if (ret <= 0) {
 257                dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
 258                return 0;
 259        }
 260
 261        drv_data->tx_nents = ret;
 262
 263        ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
 264        if (ret <= 0) {
 265                pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
 266                dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
 267                return 0;
 268        }
 269
 270        drv_data->rx_nents = ret;
 271        return 1;
 272}
 273
 274irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 275{
 276        u32 status;
 277
 278        status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
 279        if (status & SSSR_ROR) {
 280                dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 281
 282                dmaengine_terminate_all(drv_data->rx_chan);
 283                dmaengine_terminate_all(drv_data->tx_chan);
 284
 285                pxa2xx_spi_dma_transfer_complete(drv_data, true);
 286                return IRQ_HANDLED;
 287        }
 288
 289        return IRQ_NONE;
 290}
 291
 292int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
 293{
 294        struct dma_async_tx_descriptor *tx_desc, *rx_desc;
 295
 296        tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
 297        if (!tx_desc) {
 298                dev_err(&drv_data->pdev->dev,
 299                        "failed to get DMA TX descriptor\n");
 300                return -EBUSY;
 301        }
 302
 303        rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
 304        if (!rx_desc) {
 305                dev_err(&drv_data->pdev->dev,
 306                        "failed to get DMA RX descriptor\n");
 307                return -EBUSY;
 308        }
 309
 310        /* We are ready when RX completes */
 311        rx_desc->callback = pxa2xx_spi_dma_callback;
 312        rx_desc->callback_param = drv_data;
 313
 314        dmaengine_submit(rx_desc);
 315        dmaengine_submit(tx_desc);
 316        return 0;
 317}
 318
 319void pxa2xx_spi_dma_start(struct driver_data *drv_data)
 320{
 321        dma_async_issue_pending(drv_data->rx_chan);
 322        dma_async_issue_pending(drv_data->tx_chan);
 323
 324        atomic_set(&drv_data->dma_running, 1);
 325}
 326
 327int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
 328{
 329        struct pxa2xx_spi_master *pdata = drv_data->master_info;
 330        dma_cap_mask_t mask;
 331
 332        dma_cap_zero(mask);
 333        dma_cap_set(DMA_SLAVE, mask);
 334
 335        drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL);
 336        if (!drv_data->dummy)
 337                return -ENOMEM;
 338
 339        drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
 340                                                pdata);
 341        if (!drv_data->tx_chan)
 342                return -ENODEV;
 343
 344        drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
 345                                                pdata);
 346        if (!drv_data->rx_chan) {
 347                dma_release_channel(drv_data->tx_chan);
 348                drv_data->tx_chan = NULL;
 349                return -ENODEV;
 350        }
 351
 352        return 0;
 353}
 354
 355void pxa2xx_spi_dma_release(struct driver_data *drv_data)
 356{
 357        if (drv_data->rx_chan) {
 358                dmaengine_terminate_all(drv_data->rx_chan);
 359                dma_release_channel(drv_data->rx_chan);
 360                sg_free_table(&drv_data->rx_sgt);
 361                drv_data->rx_chan = NULL;
 362        }
 363        if (drv_data->tx_chan) {
 364                dmaengine_terminate_all(drv_data->tx_chan);
 365                dma_release_channel(drv_data->tx_chan);
 366                sg_free_table(&drv_data->tx_sgt);
 367                drv_data->tx_chan = NULL;
 368        }
 369}
 370
 371void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
 372{
 373}
 374
 375int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
 376                                           struct spi_device *spi,
 377                                           u8 bits_per_word, u32 *burst_code,
 378                                           u32 *threshold)
 379{
 380        struct pxa2xx_spi_chip *chip_info = spi->controller_data;
 381
 382        /*
 383         * If the DMA burst size is given in chip_info we use that,
 384         * otherwise we use the default. Also we use the default FIFO
 385         * thresholds for now.
 386         */
 387        *burst_code = chip_info ? chip_info->dma_burst_size : 16;
 388        *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
 389                   | SSCR1_TxTresh(TX_THRESH_DFLT);
 390
 391        return 0;
 392}
 393
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.