linux/drivers/spi/spi-bfin-sport.c
<<
>>
Prefs
   1/*
   2 * SPI bus via the Blackfin SPORT peripheral
   3 *
   4 * Enter bugs at http://blackfin.uclinux.org/
   5 *
   6 * Copyright 2009-2011 Analog Devices Inc.
   7 *
   8 * Licensed under the GPL-2 or later.
   9 */
  10
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/delay.h>
  14#include <linux/device.h>
  15#include <linux/gpio.h>
  16#include <linux/io.h>
  17#include <linux/ioport.h>
  18#include <linux/irq.h>
  19#include <linux/errno.h>
  20#include <linux/interrupt.h>
  21#include <linux/platform_device.h>
  22#include <linux/spi/spi.h>
  23#include <linux/workqueue.h>
  24
  25#include <asm/portmux.h>
  26#include <asm/bfin5xx_spi.h>
  27#include <asm/blackfin.h>
  28#include <asm/bfin_sport.h>
  29#include <asm/cacheflush.h>
  30
  31#define DRV_NAME        "bfin-sport-spi"
  32#define DRV_DESC        "SPI bus via the Blackfin SPORT"
  33
  34MODULE_AUTHOR("Cliff Cai");
  35MODULE_DESCRIPTION(DRV_DESC);
  36MODULE_LICENSE("GPL");
  37MODULE_ALIAS("platform:bfin-sport-spi");
  38
  39enum bfin_sport_spi_state {
  40        START_STATE,
  41        RUNNING_STATE,
  42        DONE_STATE,
  43        ERROR_STATE,
  44};
  45
  46struct bfin_sport_spi_master_data;
  47
  48struct bfin_sport_transfer_ops {
  49        void (*write) (struct bfin_sport_spi_master_data *);
  50        void (*read) (struct bfin_sport_spi_master_data *);
  51        void (*duplex) (struct bfin_sport_spi_master_data *);
  52};
  53
  54struct bfin_sport_spi_master_data {
  55        /* Driver model hookup */
  56        struct device *dev;
  57
  58        /* SPI framework hookup */
  59        struct spi_master *master;
  60
  61        /* Regs base of SPI controller */
  62        struct sport_register __iomem *regs;
  63        int err_irq;
  64
  65        /* Pin request list */
  66        u16 *pin_req;
  67
  68        /* Driver message queue */
  69        struct workqueue_struct *workqueue;
  70        struct work_struct pump_messages;
  71        spinlock_t lock;
  72        struct list_head queue;
  73        int busy;
  74        bool run;
  75
  76        /* Message Transfer pump */
  77        struct tasklet_struct pump_transfers;
  78
  79        /* Current message transfer state info */
  80        enum bfin_sport_spi_state state;
  81        struct spi_message *cur_msg;
  82        struct spi_transfer *cur_transfer;
  83        struct bfin_sport_spi_slave_data *cur_chip;
  84        union {
  85                void *tx;
  86                u8 *tx8;
  87                u16 *tx16;
  88        };
  89        void *tx_end;
  90        union {
  91                void *rx;
  92                u8 *rx8;
  93                u16 *rx16;
  94        };
  95        void *rx_end;
  96
  97        int cs_change;
  98        struct bfin_sport_transfer_ops *ops;
  99};
 100
 101struct bfin_sport_spi_slave_data {
 102        u16 ctl_reg;
 103        u16 baud;
 104        u16 cs_chg_udelay;      /* Some devices require > 255usec delay */
 105        u32 cs_gpio;
 106        u16 idle_tx_val;
 107        struct bfin_sport_transfer_ops *ops;
 108};
 109
 110static void
 111bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
 112{
 113        bfin_write_or(&drv_data->regs->tcr1, TSPEN);
 114        bfin_write_or(&drv_data->regs->rcr1, TSPEN);
 115        SSYNC();
 116}
 117
 118static void
 119bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
 120{
 121        bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
 122        bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
 123        SSYNC();
 124}
 125
 126/* Caculate the SPI_BAUD register value based on input HZ */
 127static u16
 128bfin_sport_hz_to_spi_baud(u32 speed_hz)
 129{
 130        u_long clk, sclk = get_sclk();
 131        int div = (sclk / (2 * speed_hz)) - 1;
 132
 133        if (div < 0)
 134                div = 0;
 135
 136        clk = sclk / (2 * (div + 1));
 137
 138        if (clk > speed_hz)
 139                div++;
 140
 141        return div;
 142}
 143
 144/* Chip select operation functions for cs_change flag */
 145static void
 146bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
 147{
 148        gpio_direction_output(chip->cs_gpio, 0);
 149}
 150
 151static void
 152bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
 153{
 154        gpio_direction_output(chip->cs_gpio, 1);
 155        /* Move delay here for consistency */
 156        if (chip->cs_chg_udelay)
 157                udelay(chip->cs_chg_udelay);
 158}
 159
 160static void
 161bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
 162{
 163        unsigned long timeout = jiffies + HZ;
 164        while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
 165                if (!time_before(jiffies, timeout))
 166                        break;
 167        }
 168}
 169
 170static void
 171bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
 172{
 173        u16 dummy;
 174
 175        while (drv_data->tx < drv_data->tx_end) {
 176                bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
 177                bfin_sport_spi_stat_poll_complete(drv_data);
 178                dummy = bfin_read(&drv_data->regs->rx16);
 179        }
 180}
 181
 182static void
 183bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
 184{
 185        u16 tx_val = drv_data->cur_chip->idle_tx_val;
 186
 187        while (drv_data->rx < drv_data->rx_end) {
 188                bfin_write(&drv_data->regs->tx16, tx_val);
 189                bfin_sport_spi_stat_poll_complete(drv_data);
 190                *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
 191        }
 192}
 193
 194static void
 195bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
 196{
 197        while (drv_data->rx < drv_data->rx_end) {
 198                bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
 199                bfin_sport_spi_stat_poll_complete(drv_data);
 200                *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
 201        }
 202}
 203
 204static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
 205        .write  = bfin_sport_spi_u8_writer,
 206        .read   = bfin_sport_spi_u8_reader,
 207        .duplex = bfin_sport_spi_u8_duplex,
 208};
 209
 210static void
 211bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
 212{
 213        u16 dummy;
 214
 215        while (drv_data->tx < drv_data->tx_end) {
 216                bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
 217                bfin_sport_spi_stat_poll_complete(drv_data);
 218                dummy = bfin_read(&drv_data->regs->rx16);
 219        }
 220}
 221
 222static void
 223bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
 224{
 225        u16 tx_val = drv_data->cur_chip->idle_tx_val;
 226
 227        while (drv_data->rx < drv_data->rx_end) {
 228                bfin_write(&drv_data->regs->tx16, tx_val);
 229                bfin_sport_spi_stat_poll_complete(drv_data);
 230                *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
 231        }
 232}
 233
 234static void
 235bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
 236{
 237        while (drv_data->rx < drv_data->rx_end) {
 238                bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
 239                bfin_sport_spi_stat_poll_complete(drv_data);
 240                *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
 241        }
 242}
 243
 244static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
 245        .write  = bfin_sport_spi_u16_writer,
 246        .read   = bfin_sport_spi_u16_reader,
 247        .duplex = bfin_sport_spi_u16_duplex,
 248};
 249
 250/* stop controller and re-config current chip */
 251static void
 252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
 253{
 254        struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
 255
 256        bfin_sport_spi_disable(drv_data);
 257        dev_dbg(drv_data->dev, "restoring spi ctl state\n");
 258
 259        bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
 260        bfin_write(&drv_data->regs->tclkdiv, chip->baud);
 261        SSYNC();
 262
 263        bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
 264        SSYNC();
 265
 266        bfin_sport_spi_cs_active(chip);
 267}
 268
 269/* test if there is more transfer to be done */
 270static enum bfin_sport_spi_state
 271bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
 272{
 273        struct spi_message *msg = drv_data->cur_msg;
 274        struct spi_transfer *trans = drv_data->cur_transfer;
 275
 276        /* Move to next transfer */
 277        if (trans->transfer_list.next != &msg->transfers) {
 278                drv_data->cur_transfer =
 279                    list_entry(trans->transfer_list.next,
 280                               struct spi_transfer, transfer_list);
 281                return RUNNING_STATE;
 282        }
 283
 284        return DONE_STATE;
 285}
 286
 287/*
 288 * caller already set message->status;
 289 * dma and pio irqs are blocked give finished message back
 290 */
 291static void
 292bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
 293{
 294        struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
 295        unsigned long flags;
 296        struct spi_message *msg;
 297
 298        spin_lock_irqsave(&drv_data->lock, flags);
 299        msg = drv_data->cur_msg;
 300        drv_data->state = START_STATE;
 301        drv_data->cur_msg = NULL;
 302        drv_data->cur_transfer = NULL;
 303        drv_data->cur_chip = NULL;
 304        queue_work(drv_data->workqueue, &drv_data->pump_messages);
 305        spin_unlock_irqrestore(&drv_data->lock, flags);
 306
 307        if (!drv_data->cs_change)
 308                bfin_sport_spi_cs_deactive(chip);
 309
 310        if (msg->complete)
 311                msg->complete(msg->context);
 312}
 313
 314static irqreturn_t
 315sport_err_handler(int irq, void *dev_id)
 316{
 317        struct bfin_sport_spi_master_data *drv_data = dev_id;
 318        u16 status;
 319
 320        dev_dbg(drv_data->dev, "%s enter\n", __func__);
 321        status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
 322
 323        if (status) {
 324                bfin_write(&drv_data->regs->stat, status);
 325                SSYNC();
 326
 327                bfin_sport_spi_disable(drv_data);
 328                dev_err(drv_data->dev, "status error:%s%s%s%s\n",
 329                        status & TOVF ? " TOVF" : "",
 330                        status & TUVF ? " TUVF" : "",
 331                        status & ROVF ? " ROVF" : "",
 332                        status & RUVF ? " RUVF" : "");
 333        }
 334
 335        return IRQ_HANDLED;
 336}
 337
 338static void
 339bfin_sport_spi_pump_transfers(unsigned long data)
 340{
 341        struct bfin_sport_spi_master_data *drv_data = (void *)data;
 342        struct spi_message *message = NULL;
 343        struct spi_transfer *transfer = NULL;
 344        struct spi_transfer *previous = NULL;
 345        struct bfin_sport_spi_slave_data *chip = NULL;
 346        unsigned int bits_per_word;
 347        u32 tranf_success = 1;
 348        u32 transfer_speed;
 349        u8 full_duplex = 0;
 350
 351        /* Get current state information */
 352        message = drv_data->cur_msg;
 353        transfer = drv_data->cur_transfer;
 354        chip = drv_data->cur_chip;
 355
 356        if (transfer->speed_hz)
 357                transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
 358        else
 359                transfer_speed = chip->baud;
 360        bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
 361        SSYNC();
 362
 363        /*
 364         * if msg is error or done, report it back using complete() callback
 365         */
 366
 367         /* Handle for abort */
 368        if (drv_data->state == ERROR_STATE) {
 369                dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
 370                message->status = -EIO;
 371                bfin_sport_spi_giveback(drv_data);
 372                return;
 373        }
 374
 375        /* Handle end of message */
 376        if (drv_data->state == DONE_STATE) {
 377                dev_dbg(drv_data->dev, "transfer: all done!\n");
 378                message->status = 0;
 379                bfin_sport_spi_giveback(drv_data);
 380                return;
 381        }
 382
 383        /* Delay if requested at end of transfer */
 384        if (drv_data->state == RUNNING_STATE) {
 385                dev_dbg(drv_data->dev, "transfer: still running ...\n");
 386                previous = list_entry(transfer->transfer_list.prev,
 387                                      struct spi_transfer, transfer_list);
 388                if (previous->delay_usecs)
 389                        udelay(previous->delay_usecs);
 390        }
 391
 392        if (transfer->len == 0) {
 393                /* Move to next transfer of this msg */
 394                drv_data->state = bfin_sport_spi_next_transfer(drv_data);
 395                /* Schedule next transfer tasklet */
 396                tasklet_schedule(&drv_data->pump_transfers);
 397        }
 398
 399        if (transfer->tx_buf != NULL) {
 400                drv_data->tx = (void *)transfer->tx_buf;
 401                drv_data->tx_end = drv_data->tx + transfer->len;
 402                dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
 403                        transfer->tx_buf, drv_data->tx_end);
 404        } else
 405                drv_data->tx = NULL;
 406
 407        if (transfer->rx_buf != NULL) {
 408                full_duplex = transfer->tx_buf != NULL;
 409                drv_data->rx = transfer->rx_buf;
 410                drv_data->rx_end = drv_data->rx + transfer->len;
 411                dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
 412                        transfer->rx_buf, drv_data->rx_end);
 413        } else
 414                drv_data->rx = NULL;
 415
 416        drv_data->cs_change = transfer->cs_change;
 417
 418        /* Bits per word setup */
 419        bits_per_word = transfer->bits_per_word ? :
 420                message->spi->bits_per_word ? : 8;
 421        if (bits_per_word % 16 == 0)
 422                drv_data->ops = &bfin_sport_transfer_ops_u16;
 423        else
 424                drv_data->ops = &bfin_sport_transfer_ops_u8;
 425        bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
 426        bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
 427        bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
 428
 429        drv_data->state = RUNNING_STATE;
 430
 431        if (drv_data->cs_change)
 432                bfin_sport_spi_cs_active(chip);
 433
 434        dev_dbg(drv_data->dev,
 435                "now pumping a transfer: width is %d, len is %d\n",
 436                bits_per_word, transfer->len);
 437
 438        /* PIO mode write then read */
 439        dev_dbg(drv_data->dev, "doing IO transfer\n");
 440
 441        bfin_sport_spi_enable(drv_data);
 442        if (full_duplex) {
 443                /* full duplex mode */
 444                BUG_ON((drv_data->tx_end - drv_data->tx) !=
 445                       (drv_data->rx_end - drv_data->rx));
 446                drv_data->ops->duplex(drv_data);
 447
 448                if (drv_data->tx != drv_data->tx_end)
 449                        tranf_success = 0;
 450        } else if (drv_data->tx != NULL) {
 451                /* write only half duplex */
 452
 453                drv_data->ops->write(drv_data);
 454
 455                if (drv_data->tx != drv_data->tx_end)
 456                        tranf_success = 0;
 457        } else if (drv_data->rx != NULL) {
 458                /* read only half duplex */
 459
 460                drv_data->ops->read(drv_data);
 461                if (drv_data->rx != drv_data->rx_end)
 462                        tranf_success = 0;
 463        }
 464        bfin_sport_spi_disable(drv_data);
 465
 466        if (!tranf_success) {
 467                dev_dbg(drv_data->dev, "IO write error!\n");
 468                drv_data->state = ERROR_STATE;
 469        } else {
 470                /* Update total byte transferred */
 471                message->actual_length += transfer->len;
 472                /* Move to next transfer of this msg */
 473                drv_data->state = bfin_sport_spi_next_transfer(drv_data);
 474                if (drv_data->cs_change)
 475                        bfin_sport_spi_cs_deactive(chip);
 476        }
 477
 478        /* Schedule next transfer tasklet */
 479        tasklet_schedule(&drv_data->pump_transfers);
 480}
 481
 482/* pop a msg from queue and kick off real transfer */
 483static void
 484bfin_sport_spi_pump_messages(struct work_struct *work)
 485{
 486        struct bfin_sport_spi_master_data *drv_data;
 487        unsigned long flags;
 488        struct spi_message *next_msg;
 489
 490        drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
 491
 492        /* Lock queue and check for queue work */
 493        spin_lock_irqsave(&drv_data->lock, flags);
 494        if (list_empty(&drv_data->queue) || !drv_data->run) {
 495                /* pumper kicked off but no work to do */
 496                drv_data->busy = 0;
 497                spin_unlock_irqrestore(&drv_data->lock, flags);
 498                return;
 499        }
 500
 501        /* Make sure we are not already running a message */
 502        if (drv_data->cur_msg) {
 503                spin_unlock_irqrestore(&drv_data->lock, flags);
 504                return;
 505        }
 506
 507        /* Extract head of queue */
 508        next_msg = list_entry(drv_data->queue.next,
 509                struct spi_message, queue);
 510
 511        drv_data->cur_msg = next_msg;
 512
 513        /* Setup the SSP using the per chip configuration */
 514        drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
 515
 516        list_del_init(&drv_data->cur_msg->queue);
 517
 518        /* Initialize message state */
 519        drv_data->cur_msg->state = START_STATE;
 520        drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
 521                                            struct spi_transfer, transfer_list);
 522        bfin_sport_spi_restore_state(drv_data);
 523        dev_dbg(drv_data->dev, "got a message to pump, "
 524                "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
 525                drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
 526                drv_data->cur_chip->ctl_reg);
 527
 528        dev_dbg(drv_data->dev,
 529                "the first transfer len is %d\n",
 530                drv_data->cur_transfer->len);
 531
 532        /* Mark as busy and launch transfers */
 533        tasklet_schedule(&drv_data->pump_transfers);
 534
 535        drv_data->busy = 1;
 536        spin_unlock_irqrestore(&drv_data->lock, flags);
 537}
 538
 539/*
 540 * got a msg to transfer, queue it in drv_data->queue.
 541 * And kick off message pumper
 542 */
 543static int
 544bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
 545{
 546        struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
 547        unsigned long flags;
 548
 549        spin_lock_irqsave(&drv_data->lock, flags);
 550
 551        if (!drv_data->run) {
 552                spin_unlock_irqrestore(&drv_data->lock, flags);
 553                return -ESHUTDOWN;
 554        }
 555
 556        msg->actual_length = 0;
 557        msg->status = -EINPROGRESS;
 558        msg->state = START_STATE;
 559
 560        dev_dbg(&spi->dev, "adding an msg in transfer()\n");
 561        list_add_tail(&msg->queue, &drv_data->queue);
 562
 563        if (drv_data->run && !drv_data->busy)
 564                queue_work(drv_data->workqueue, &drv_data->pump_messages);
 565
 566        spin_unlock_irqrestore(&drv_data->lock, flags);
 567
 568        return 0;
 569}
 570
 571/* Called every time common spi devices change state */
 572static int
 573bfin_sport_spi_setup(struct spi_device *spi)
 574{
 575        struct bfin_sport_spi_slave_data *chip, *first = NULL;
 576        int ret;
 577
 578        /* Only alloc (or use chip_info) on first setup */
 579        chip = spi_get_ctldata(spi);
 580        if (chip == NULL) {
 581                struct bfin5xx_spi_chip *chip_info;
 582
 583                chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
 584                if (!chip)
 585                        return -ENOMEM;
 586
 587                /* platform chip_info isn't required */
 588                chip_info = spi->controller_data;
 589                if (chip_info) {
 590                        /*
 591                         * DITFS and TDTYPE are only thing we don't set, but
 592                         * they probably shouldn't be changed by people.
 593                         */
 594                        if (chip_info->ctl_reg || chip_info->enable_dma) {
 595                                ret = -EINVAL;
 596                                dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
 597                                goto error;
 598                        }
 599                        chip->cs_chg_udelay = chip_info->cs_chg_udelay;
 600                        chip->idle_tx_val = chip_info->idle_tx_val;
 601                }
 602        }
 603
 604        if (spi->bits_per_word % 8) {
 605                dev_err(&spi->dev, "%d bits_per_word is not supported\n",
 606                                spi->bits_per_word);
 607                ret = -EINVAL;
 608                goto error;
 609        }
 610
 611        /* translate common spi framework into our register
 612         * following configure contents are same for tx and rx.
 613         */
 614
 615        if (spi->mode & SPI_CPHA)
 616                chip->ctl_reg &= ~TCKFE;
 617        else
 618                chip->ctl_reg |= TCKFE;
 619
 620        if (spi->mode & SPI_LSB_FIRST)
 621                chip->ctl_reg |= TLSBIT;
 622        else
 623                chip->ctl_reg &= ~TLSBIT;
 624
 625        /* Sport in master mode */
 626        chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
 627
 628        chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
 629
 630        chip->cs_gpio = spi->chip_select;
 631        ret = gpio_request(chip->cs_gpio, spi->modalias);
 632        if (ret)
 633                goto error;
 634
 635        dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
 636                        spi->modalias, spi->bits_per_word);
 637        dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
 638                        chip->ctl_reg, spi->chip_select);
 639
 640        spi_set_ctldata(spi, chip);
 641
 642        bfin_sport_spi_cs_deactive(chip);
 643
 644        return ret;
 645
 646 error:
 647        kfree(first);
 648        return ret;
 649}
 650
 651/*
 652 * callback for spi framework.
 653 * clean driver specific data
 654 */
 655static void
 656bfin_sport_spi_cleanup(struct spi_device *spi)
 657{
 658        struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
 659
 660        if (!chip)
 661                return;
 662
 663        gpio_free(chip->cs_gpio);
 664
 665        kfree(chip);
 666}
 667
 668static int
 669bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
 670{
 671        INIT_LIST_HEAD(&drv_data->queue);
 672        spin_lock_init(&drv_data->lock);
 673
 674        drv_data->run = false;
 675        drv_data->busy = 0;
 676
 677        /* init transfer tasklet */
 678        tasklet_init(&drv_data->pump_transfers,
 679                     bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
 680
 681        /* init messages workqueue */
 682        INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
 683        drv_data->workqueue =
 684            create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
 685        if (drv_data->workqueue == NULL)
 686                return -EBUSY;
 687
 688        return 0;
 689}
 690
 691static int
 692bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
 693{
 694        unsigned long flags;
 695
 696        spin_lock_irqsave(&drv_data->lock, flags);
 697
 698        if (drv_data->run || drv_data->busy) {
 699                spin_unlock_irqrestore(&drv_data->lock, flags);
 700                return -EBUSY;
 701        }
 702
 703        drv_data->run = true;
 704        drv_data->cur_msg = NULL;
 705        drv_data->cur_transfer = NULL;
 706        drv_data->cur_chip = NULL;
 707        spin_unlock_irqrestore(&drv_data->lock, flags);
 708
 709        queue_work(drv_data->workqueue, &drv_data->pump_messages);
 710
 711        return 0;
 712}
 713
 714static inline int
 715bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
 716{
 717        unsigned long flags;
 718        unsigned limit = 500;
 719        int status = 0;
 720
 721        spin_lock_irqsave(&drv_data->lock, flags);
 722
 723        /*
 724         * This is a bit lame, but is optimized for the common execution path.
 725         * A wait_queue on the drv_data->busy could be used, but then the common
 726         * execution path (pump_messages) would be required to call wake_up or
 727         * friends on every SPI message. Do this instead
 728         */
 729        drv_data->run = false;
 730        while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
 731                spin_unlock_irqrestore(&drv_data->lock, flags);
 732                msleep(10);
 733                spin_lock_irqsave(&drv_data->lock, flags);
 734        }
 735
 736        if (!list_empty(&drv_data->queue) || drv_data->busy)
 737                status = -EBUSY;
 738
 739        spin_unlock_irqrestore(&drv_data->lock, flags);
 740
 741        return status;
 742}
 743
 744static inline int
 745bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
 746{
 747        int status;
 748
 749        status = bfin_sport_spi_stop_queue(drv_data);
 750        if (status)
 751                return status;
 752
 753        destroy_workqueue(drv_data->workqueue);
 754
 755        return 0;
 756}
 757
 758static int __devinit
 759bfin_sport_spi_probe(struct platform_device *pdev)
 760{
 761        struct device *dev = &pdev->dev;
 762        struct bfin5xx_spi_master *platform_info;
 763        struct spi_master *master;
 764        struct resource *res, *ires;
 765        struct bfin_sport_spi_master_data *drv_data;
 766        int status;
 767
 768        platform_info = dev->platform_data;
 769
 770        /* Allocate master with space for drv_data */
 771        master = spi_alloc_master(dev, sizeof(*master) + 16);
 772        if (!master) {
 773                dev_err(dev, "cannot alloc spi_master\n");
 774                return -ENOMEM;
 775        }
 776
 777        drv_data = spi_master_get_devdata(master);
 778        drv_data->master = master;
 779        drv_data->dev = dev;
 780        drv_data->pin_req = platform_info->pin_req;
 781
 782        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
 783        master->bus_num = pdev->id;
 784        master->num_chipselect = platform_info->num_chipselect;
 785        master->cleanup = bfin_sport_spi_cleanup;
 786        master->setup = bfin_sport_spi_setup;
 787        master->transfer = bfin_sport_spi_transfer;
 788
 789        /* Find and map our resources */
 790        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 791        if (res == NULL) {
 792                dev_err(dev, "cannot get IORESOURCE_MEM\n");
 793                status = -ENOENT;
 794                goto out_error_get_res;
 795        }
 796
 797        drv_data->regs = ioremap(res->start, resource_size(res));
 798        if (drv_data->regs == NULL) {
 799                dev_err(dev, "cannot map registers\n");
 800                status = -ENXIO;
 801                goto out_error_ioremap;
 802        }
 803
 804        ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 805        if (!ires) {
 806                dev_err(dev, "cannot get IORESOURCE_IRQ\n");
 807                status = -ENODEV;
 808                goto out_error_get_ires;
 809        }
 810        drv_data->err_irq = ires->start;
 811
 812        /* Initial and start queue */
 813        status = bfin_sport_spi_init_queue(drv_data);
 814        if (status) {
 815                dev_err(dev, "problem initializing queue\n");
 816                goto out_error_queue_alloc;
 817        }
 818
 819        status = bfin_sport_spi_start_queue(drv_data);
 820        if (status) {
 821                dev_err(dev, "problem starting queue\n");
 822                goto out_error_queue_alloc;
 823        }
 824
 825        status = request_irq(drv_data->err_irq, sport_err_handler,
 826                0, "sport_spi_err", drv_data);
 827        if (status) {
 828                dev_err(dev, "unable to request sport err irq\n");
 829                goto out_error_irq;
 830        }
 831
 832        status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
 833        if (status) {
 834                dev_err(dev, "requesting peripherals failed\n");
 835                goto out_error_peripheral;
 836        }
 837
 838        /* Register with the SPI framework */
 839        platform_set_drvdata(pdev, drv_data);
 840        status = spi_register_master(master);
 841        if (status) {
 842                dev_err(dev, "problem registering spi master\n");
 843                goto out_error_master;
 844        }
 845
 846        dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
 847        return 0;
 848
 849 out_error_master:
 850        peripheral_free_list(drv_data->pin_req);
 851 out_error_peripheral:
 852        free_irq(drv_data->err_irq, drv_data);
 853 out_error_irq:
 854 out_error_queue_alloc:
 855        bfin_sport_spi_destroy_queue(drv_data);
 856 out_error_get_ires:
 857        iounmap(drv_data->regs);
 858 out_error_ioremap:
 859 out_error_get_res:
 860        spi_master_put(master);
 861
 862        return status;
 863}
 864
 865/* stop hardware and remove the driver */
 866static int __devexit
 867bfin_sport_spi_remove(struct platform_device *pdev)
 868{
 869        struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
 870        int status = 0;
 871
 872        if (!drv_data)
 873                return 0;
 874
 875        /* Remove the queue */
 876        status = bfin_sport_spi_destroy_queue(drv_data);
 877        if (status)
 878                return status;
 879
 880        /* Disable the SSP at the peripheral and SOC level */
 881        bfin_sport_spi_disable(drv_data);
 882
 883        /* Disconnect from the SPI framework */
 884        spi_unregister_master(drv_data->master);
 885
 886        peripheral_free_list(drv_data->pin_req);
 887
 888        /* Prevent double remove */
 889        platform_set_drvdata(pdev, NULL);
 890
 891        return 0;
 892}
 893
 894#ifdef CONFIG_PM
 895static int
 896bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
 897{
 898        struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
 899        int status;
 900
 901        status = bfin_sport_spi_stop_queue(drv_data);
 902        if (status)
 903                return status;
 904
 905        /* stop hardware */
 906        bfin_sport_spi_disable(drv_data);
 907
 908        return status;
 909}
 910
 911static int
 912bfin_sport_spi_resume(struct platform_device *pdev)
 913{
 914        struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
 915        int status;
 916
 917        /* Enable the SPI interface */
 918        bfin_sport_spi_enable(drv_data);
 919
 920        /* Start the queue running */
 921        status = bfin_sport_spi_start_queue(drv_data);
 922        if (status)
 923                dev_err(drv_data->dev, "problem resuming queue\n");
 924
 925        return status;
 926}
 927#else
 928# define bfin_sport_spi_suspend NULL
 929# define bfin_sport_spi_resume  NULL
 930#endif
 931
 932static struct platform_driver bfin_sport_spi_driver = {
 933        .driver = {
 934                .name = DRV_NAME,
 935                .owner = THIS_MODULE,
 936        },
 937        .probe   = bfin_sport_spi_probe,
 938        .remove  = __devexit_p(bfin_sport_spi_remove),
 939        .suspend = bfin_sport_spi_suspend,
 940        .resume  = bfin_sport_spi_resume,
 941};
 942module_platform_driver(bfin_sport_spi_driver);
 943
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.