linux/drivers/dma/mv_xor.c
<<
>>
Prefs
   1/*
   2 * offload engine driver for the Marvell XOR engine
   3 * Copyright (C) 2007, 2008, Marvell International Ltd.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/module.h>
  21#include <linux/slab.h>
  22#include <linux/delay.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/spinlock.h>
  25#include <linux/interrupt.h>
  26#include <linux/platform_device.h>
  27#include <linux/memory.h>
  28#include <linux/clk.h>
  29#include <linux/of.h>
  30#include <linux/of_irq.h>
  31#include <linux/irqdomain.h>
  32#include <linux/platform_data/dma-mv_xor.h>
  33
  34#include "dmaengine.h"
  35#include "mv_xor.h"
  36
  37static void mv_xor_issue_pending(struct dma_chan *chan);
  38
  39#define to_mv_xor_chan(chan)            \
  40        container_of(chan, struct mv_xor_chan, dmachan)
  41
  42#define to_mv_xor_slot(tx)              \
  43        container_of(tx, struct mv_xor_desc_slot, async_tx)
  44
  45#define mv_chan_to_devp(chan)           \
  46        ((chan)->dmadev.dev)
  47
  48static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
  49{
  50        struct mv_xor_desc *hw_desc = desc->hw_desc;
  51
  52        hw_desc->status = (1 << 31);
  53        hw_desc->phy_next_desc = 0;
  54        hw_desc->desc_command = (1 << 31);
  55}
  56
  57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
  58{
  59        struct mv_xor_desc *hw_desc = desc->hw_desc;
  60        return hw_desc->phy_dest_addr;
  61}
  62
  63static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
  64                                int src_idx)
  65{
  66        struct mv_xor_desc *hw_desc = desc->hw_desc;
  67        return hw_desc->phy_src_addr[src_idx];
  68}
  69
  70
  71static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
  72                                   u32 byte_count)
  73{
  74        struct mv_xor_desc *hw_desc = desc->hw_desc;
  75        hw_desc->byte_count = byte_count;
  76}
  77
  78static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  79                                  u32 next_desc_addr)
  80{
  81        struct mv_xor_desc *hw_desc = desc->hw_desc;
  82        BUG_ON(hw_desc->phy_next_desc);
  83        hw_desc->phy_next_desc = next_desc_addr;
  84}
  85
  86static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
  87{
  88        struct mv_xor_desc *hw_desc = desc->hw_desc;
  89        hw_desc->phy_next_desc = 0;
  90}
  91
  92static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
  93                                  dma_addr_t addr)
  94{
  95        struct mv_xor_desc *hw_desc = desc->hw_desc;
  96        hw_desc->phy_dest_addr = addr;
  97}
  98
  99static int mv_chan_memset_slot_count(size_t len)
 100{
 101        return 1;
 102}
 103
 104#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
 105
 106static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
 107                                 int index, dma_addr_t addr)
 108{
 109        struct mv_xor_desc *hw_desc = desc->hw_desc;
 110        hw_desc->phy_src_addr[index] = addr;
 111        if (desc->type == DMA_XOR)
 112                hw_desc->desc_command |= (1 << index);
 113}
 114
 115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
 116{
 117        return __raw_readl(XOR_CURR_DESC(chan));
 118}
 119
 120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
 121                                        u32 next_desc_addr)
 122{
 123        __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
 124}
 125
 126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
 127{
 128        u32 val = __raw_readl(XOR_INTR_MASK(chan));
 129        val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
 130        __raw_writel(val, XOR_INTR_MASK(chan));
 131}
 132
 133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 134{
 135        u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
 136        intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
 137        return intr_cause;
 138}
 139
 140static int mv_is_err_intr(u32 intr_cause)
 141{
 142        if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
 143                return 1;
 144
 145        return 0;
 146}
 147
 148static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 149{
 150        u32 val = ~(1 << (chan->idx * 16));
 151        dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
 152        __raw_writel(val, XOR_INTR_CAUSE(chan));
 153}
 154
 155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
 156{
 157        u32 val = 0xFFFF0000 >> (chan->idx * 16);
 158        __raw_writel(val, XOR_INTR_CAUSE(chan));
 159}
 160
 161static int mv_can_chain(struct mv_xor_desc_slot *desc)
 162{
 163        struct mv_xor_desc_slot *chain_old_tail = list_entry(
 164                desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
 165
 166        if (chain_old_tail->type != desc->type)
 167                return 0;
 168
 169        return 1;
 170}
 171
 172static void mv_set_mode(struct mv_xor_chan *chan,
 173                               enum dma_transaction_type type)
 174{
 175        u32 op_mode;
 176        u32 config = __raw_readl(XOR_CONFIG(chan));
 177
 178        switch (type) {
 179        case DMA_XOR:
 180                op_mode = XOR_OPERATION_MODE_XOR;
 181                break;
 182        case DMA_MEMCPY:
 183                op_mode = XOR_OPERATION_MODE_MEMCPY;
 184                break;
 185        default:
 186                dev_err(mv_chan_to_devp(chan),
 187                        "error: unsupported operation %d\n",
 188                        type);
 189                BUG();
 190                return;
 191        }
 192
 193        config &= ~0x7;
 194        config |= op_mode;
 195        __raw_writel(config, XOR_CONFIG(chan));
 196        chan->current_type = type;
 197}
 198
 199static void mv_chan_activate(struct mv_xor_chan *chan)
 200{
 201        u32 activation;
 202
 203        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
 204        activation = __raw_readl(XOR_ACTIVATION(chan));
 205        activation |= 0x1;
 206        __raw_writel(activation, XOR_ACTIVATION(chan));
 207}
 208
 209static char mv_chan_is_busy(struct mv_xor_chan *chan)
 210{
 211        u32 state = __raw_readl(XOR_ACTIVATION(chan));
 212
 213        state = (state >> 4) & 0x3;
 214
 215        return (state == 1) ? 1 : 0;
 216}
 217
 218static int mv_chan_xor_slot_count(size_t len, int src_cnt)
 219{
 220        return 1;
 221}
 222
 223/**
 224 * mv_xor_free_slots - flags descriptor slots for reuse
 225 * @slot: Slot to free
 226 * Caller must hold &mv_chan->lock while calling this function
 227 */
 228static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
 229                              struct mv_xor_desc_slot *slot)
 230{
 231        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
 232                __func__, __LINE__, slot);
 233
 234        slot->slots_per_op = 0;
 235
 236}
 237
 238/*
 239 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
 240 * sw_desc
 241 * Caller must hold &mv_chan->lock while calling this function
 242 */
 243static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
 244                                   struct mv_xor_desc_slot *sw_desc)
 245{
 246        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
 247                __func__, __LINE__, sw_desc);
 248        if (sw_desc->type != mv_chan->current_type)
 249                mv_set_mode(mv_chan, sw_desc->type);
 250
 251        /* set the hardware chain */
 252        mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
 253
 254        mv_chan->pending += sw_desc->slot_cnt;
 255        mv_xor_issue_pending(&mv_chan->dmachan);
 256}
 257
 258static dma_cookie_t
 259mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
 260        struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
 261{
 262        BUG_ON(desc->async_tx.cookie < 0);
 263
 264        if (desc->async_tx.cookie > 0) {
 265                cookie = desc->async_tx.cookie;
 266
 267                /* call the callback (must not sleep or submit new
 268                 * operations to this channel)
 269                 */
 270                if (desc->async_tx.callback)
 271                        desc->async_tx.callback(
 272                                desc->async_tx.callback_param);
 273
 274                /* unmap dma addresses
 275                 * (unmap_single vs unmap_page?)
 276                 */
 277                if (desc->group_head && desc->unmap_len) {
 278                        struct mv_xor_desc_slot *unmap = desc->group_head;
 279                        struct device *dev = mv_chan_to_devp(mv_chan);
 280                        u32 len = unmap->unmap_len;
 281                        enum dma_ctrl_flags flags = desc->async_tx.flags;
 282                        u32 src_cnt;
 283                        dma_addr_t addr;
 284                        dma_addr_t dest;
 285
 286                        src_cnt = unmap->unmap_src_cnt;
 287                        dest = mv_desc_get_dest_addr(unmap);
 288                        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 289                                enum dma_data_direction dir;
 290
 291                                if (src_cnt > 1) /* is xor ? */
 292                                        dir = DMA_BIDIRECTIONAL;
 293                                else
 294                                        dir = DMA_FROM_DEVICE;
 295                                dma_unmap_page(dev, dest, len, dir);
 296                        }
 297
 298                        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 299                                while (src_cnt--) {
 300                                        addr = mv_desc_get_src_addr(unmap,
 301                                                                    src_cnt);
 302                                        if (addr == dest)
 303                                                continue;
 304                                        dma_unmap_page(dev, addr, len,
 305                                                       DMA_TO_DEVICE);
 306                                }
 307                        }
 308                        desc->group_head = NULL;
 309                }
 310        }
 311
 312        /* run dependent operations */
 313        dma_run_dependencies(&desc->async_tx);
 314
 315        return cookie;
 316}
 317
 318static int
 319mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
 320{
 321        struct mv_xor_desc_slot *iter, *_iter;
 322
 323        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 324        list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 325                                 completed_node) {
 326
 327                if (async_tx_test_ack(&iter->async_tx)) {
 328                        list_del(&iter->completed_node);
 329                        mv_xor_free_slots(mv_chan, iter);
 330                }
 331        }
 332        return 0;
 333}
 334
 335static int
 336mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
 337        struct mv_xor_chan *mv_chan)
 338{
 339        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
 340                __func__, __LINE__, desc, desc->async_tx.flags);
 341        list_del(&desc->chain_node);
 342        /* the client is allowed to attach dependent operations
 343         * until 'ack' is set
 344         */
 345        if (!async_tx_test_ack(&desc->async_tx)) {
 346                /* move this slot to the completed_slots */
 347                list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
 348                return 0;
 349        }
 350
 351        mv_xor_free_slots(mv_chan, desc);
 352        return 0;
 353}
 354
 355static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
 356{
 357        struct mv_xor_desc_slot *iter, *_iter;
 358        dma_cookie_t cookie = 0;
 359        int busy = mv_chan_is_busy(mv_chan);
 360        u32 current_desc = mv_chan_get_current_desc(mv_chan);
 361        int seen_current = 0;
 362
 363        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 364        dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
 365        mv_xor_clean_completed_slots(mv_chan);
 366
 367        /* free completed slots from the chain starting with
 368         * the oldest descriptor
 369         */
 370
 371        list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 372                                        chain_node) {
 373                prefetch(_iter);
 374                prefetch(&_iter->async_tx);
 375
 376                /* do not advance past the current descriptor loaded into the
 377                 * hardware channel, subsequent descriptors are either in
 378                 * process or have not been submitted
 379                 */
 380                if (seen_current)
 381                        break;
 382
 383                /* stop the search if we reach the current descriptor and the
 384                 * channel is busy
 385                 */
 386                if (iter->async_tx.phys == current_desc) {
 387                        seen_current = 1;
 388                        if (busy)
 389                                break;
 390                }
 391
 392                cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
 393
 394                if (mv_xor_clean_slot(iter, mv_chan))
 395                        break;
 396        }
 397
 398        if ((busy == 0) && !list_empty(&mv_chan->chain)) {
 399                struct mv_xor_desc_slot *chain_head;
 400                chain_head = list_entry(mv_chan->chain.next,
 401                                        struct mv_xor_desc_slot,
 402                                        chain_node);
 403
 404                mv_xor_start_new_chain(mv_chan, chain_head);
 405        }
 406
 407        if (cookie > 0)
 408                mv_chan->dmachan.completed_cookie = cookie;
 409}
 410
 411static void
 412mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
 413{
 414        spin_lock_bh(&mv_chan->lock);
 415        __mv_xor_slot_cleanup(mv_chan);
 416        spin_unlock_bh(&mv_chan->lock);
 417}
 418
 419static void mv_xor_tasklet(unsigned long data)
 420{
 421        struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
 422        mv_xor_slot_cleanup(chan);
 423}
 424
 425static struct mv_xor_desc_slot *
 426mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
 427                    int slots_per_op)
 428{
 429        struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
 430        LIST_HEAD(chain);
 431        int slots_found, retry = 0;
 432
 433        /* start search from the last allocated descrtiptor
 434         * if a contiguous allocation can not be found start searching
 435         * from the beginning of the list
 436         */
 437retry:
 438        slots_found = 0;
 439        if (retry == 0)
 440                iter = mv_chan->last_used;
 441        else
 442                iter = list_entry(&mv_chan->all_slots,
 443                        struct mv_xor_desc_slot,
 444                        slot_node);
 445
 446        list_for_each_entry_safe_continue(
 447                iter, _iter, &mv_chan->all_slots, slot_node) {
 448                prefetch(_iter);
 449                prefetch(&_iter->async_tx);
 450                if (iter->slots_per_op) {
 451                        /* give up after finding the first busy slot
 452                         * on the second pass through the list
 453                         */
 454                        if (retry)
 455                                break;
 456
 457                        slots_found = 0;
 458                        continue;
 459                }
 460
 461                /* start the allocation if the slot is correctly aligned */
 462                if (!slots_found++)
 463                        alloc_start = iter;
 464
 465                if (slots_found == num_slots) {
 466                        struct mv_xor_desc_slot *alloc_tail = NULL;
 467                        struct mv_xor_desc_slot *last_used = NULL;
 468                        iter = alloc_start;
 469                        while (num_slots) {
 470                                int i;
 471
 472                                /* pre-ack all but the last descriptor */
 473                                async_tx_ack(&iter->async_tx);
 474
 475                                list_add_tail(&iter->chain_node, &chain);
 476                                alloc_tail = iter;
 477                                iter->async_tx.cookie = 0;
 478                                iter->slot_cnt = num_slots;
 479                                iter->xor_check_result = NULL;
 480                                for (i = 0; i < slots_per_op; i++) {
 481                                        iter->slots_per_op = slots_per_op - i;
 482                                        last_used = iter;
 483                                        iter = list_entry(iter->slot_node.next,
 484                                                struct mv_xor_desc_slot,
 485                                                slot_node);
 486                                }
 487                                num_slots -= slots_per_op;
 488                        }
 489                        alloc_tail->group_head = alloc_start;
 490                        alloc_tail->async_tx.cookie = -EBUSY;
 491                        list_splice(&chain, &alloc_tail->tx_list);
 492                        mv_chan->last_used = last_used;
 493                        mv_desc_clear_next_desc(alloc_start);
 494                        mv_desc_clear_next_desc(alloc_tail);
 495                        return alloc_tail;
 496                }
 497        }
 498        if (!retry++)
 499                goto retry;
 500
 501        /* try to free some slots if the allocation fails */
 502        tasklet_schedule(&mv_chan->irq_tasklet);
 503
 504        return NULL;
 505}
 506
 507/************************ DMA engine API functions ****************************/
 508static dma_cookie_t
 509mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 510{
 511        struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
 512        struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
 513        struct mv_xor_desc_slot *grp_start, *old_chain_tail;
 514        dma_cookie_t cookie;
 515        int new_hw_chain = 1;
 516
 517        dev_dbg(mv_chan_to_devp(mv_chan),
 518                "%s sw_desc %p: async_tx %p\n",
 519                __func__, sw_desc, &sw_desc->async_tx);
 520
 521        grp_start = sw_desc->group_head;
 522
 523        spin_lock_bh(&mv_chan->lock);
 524        cookie = dma_cookie_assign(tx);
 525
 526        if (list_empty(&mv_chan->chain))
 527                list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
 528        else {
 529                new_hw_chain = 0;
 530
 531                old_chain_tail = list_entry(mv_chan->chain.prev,
 532                                            struct mv_xor_desc_slot,
 533                                            chain_node);
 534                list_splice_init(&grp_start->tx_list,
 535                                 &old_chain_tail->chain_node);
 536
 537                if (!mv_can_chain(grp_start))
 538                        goto submit_done;
 539
 540                dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
 541                        old_chain_tail->async_tx.phys);
 542
 543                /* fix up the hardware chain */
 544                mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
 545
 546                /* if the channel is not busy */
 547                if (!mv_chan_is_busy(mv_chan)) {
 548                        u32 current_desc = mv_chan_get_current_desc(mv_chan);
 549                        /*
 550                         * and the curren desc is the end of the chain before
 551                         * the append, then we need to start the channel
 552                         */
 553                        if (current_desc == old_chain_tail->async_tx.phys)
 554                                new_hw_chain = 1;
 555                }
 556        }
 557
 558        if (new_hw_chain)
 559                mv_xor_start_new_chain(mv_chan, grp_start);
 560
 561submit_done:
 562        spin_unlock_bh(&mv_chan->lock);
 563
 564        return cookie;
 565}
 566
 567/* returns the number of allocated descriptors */
 568static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 569{
 570        char *hw_desc;
 571        int idx;
 572        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 573        struct mv_xor_desc_slot *slot = NULL;
 574        int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
 575
 576        /* Allocate descriptor slots */
 577        idx = mv_chan->slots_allocated;
 578        while (idx < num_descs_in_pool) {
 579                slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 580                if (!slot) {
 581                        printk(KERN_INFO "MV XOR Channel only initialized"
 582                                " %d descriptor slots", idx);
 583                        break;
 584                }
 585                hw_desc = (char *) mv_chan->dma_desc_pool_virt;
 586                slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
 587
 588                dma_async_tx_descriptor_init(&slot->async_tx, chan);
 589                slot->async_tx.tx_submit = mv_xor_tx_submit;
 590                INIT_LIST_HEAD(&slot->chain_node);
 591                INIT_LIST_HEAD(&slot->slot_node);
 592                INIT_LIST_HEAD(&slot->tx_list);
 593                hw_desc = (char *) mv_chan->dma_desc_pool;
 594                slot->async_tx.phys =
 595                        (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
 596                slot->idx = idx++;
 597
 598                spin_lock_bh(&mv_chan->lock);
 599                mv_chan->slots_allocated = idx;
 600                list_add_tail(&slot->slot_node, &mv_chan->all_slots);
 601                spin_unlock_bh(&mv_chan->lock);
 602        }
 603
 604        if (mv_chan->slots_allocated && !mv_chan->last_used)
 605                mv_chan->last_used = list_entry(mv_chan->all_slots.next,
 606                                        struct mv_xor_desc_slot,
 607                                        slot_node);
 608
 609        dev_dbg(mv_chan_to_devp(mv_chan),
 610                "allocated %d descriptor slots last_used: %p\n",
 611                mv_chan->slots_allocated, mv_chan->last_used);
 612
 613        return mv_chan->slots_allocated ? : -ENOMEM;
 614}
 615
 616static struct dma_async_tx_descriptor *
 617mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 618                size_t len, unsigned long flags)
 619{
 620        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 621        struct mv_xor_desc_slot *sw_desc, *grp_start;
 622        int slot_cnt;
 623
 624        dev_dbg(mv_chan_to_devp(mv_chan),
 625                "%s dest: %x src %x len: %u flags: %ld\n",
 626                __func__, dest, src, len, flags);
 627        if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 628                return NULL;
 629
 630        BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 631
 632        spin_lock_bh(&mv_chan->lock);
 633        slot_cnt = mv_chan_memcpy_slot_count(len);
 634        sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
 635        if (sw_desc) {
 636                sw_desc->type = DMA_MEMCPY;
 637                sw_desc->async_tx.flags = flags;
 638                grp_start = sw_desc->group_head;
 639                mv_desc_init(grp_start, flags);
 640                mv_desc_set_byte_count(grp_start, len);
 641                mv_desc_set_dest_addr(sw_desc->group_head, dest);
 642                mv_desc_set_src_addr(grp_start, 0, src);
 643                sw_desc->unmap_src_cnt = 1;
 644                sw_desc->unmap_len = len;
 645        }
 646        spin_unlock_bh(&mv_chan->lock);
 647
 648        dev_dbg(mv_chan_to_devp(mv_chan),
 649                "%s sw_desc %p async_tx %p\n",
 650                __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
 651
 652        return sw_desc ? &sw_desc->async_tx : NULL;
 653}
 654
 655static struct dma_async_tx_descriptor *
 656mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 657                    unsigned int src_cnt, size_t len, unsigned long flags)
 658{
 659        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 660        struct mv_xor_desc_slot *sw_desc, *grp_start;
 661        int slot_cnt;
 662
 663        if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 664                return NULL;
 665
 666        BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 667
 668        dev_dbg(mv_chan_to_devp(mv_chan),
 669                "%s src_cnt: %d len: dest %x %u flags: %ld\n",
 670                __func__, src_cnt, len, dest, flags);
 671
 672        spin_lock_bh(&mv_chan->lock);
 673        slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
 674        sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
 675        if (sw_desc) {
 676                sw_desc->type = DMA_XOR;
 677                sw_desc->async_tx.flags = flags;
 678                grp_start = sw_desc->group_head;
 679                mv_desc_init(grp_start, flags);
 680                /* the byte count field is the same as in memcpy desc*/
 681                mv_desc_set_byte_count(grp_start, len);
 682                mv_desc_set_dest_addr(sw_desc->group_head, dest);
 683                sw_desc->unmap_src_cnt = src_cnt;
 684                sw_desc->unmap_len = len;
 685                while (src_cnt--)
 686                        mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
 687        }
 688        spin_unlock_bh(&mv_chan->lock);
 689        dev_dbg(mv_chan_to_devp(mv_chan),
 690                "%s sw_desc %p async_tx %p \n",
 691                __func__, sw_desc, &sw_desc->async_tx);
 692        return sw_desc ? &sw_desc->async_tx : NULL;
 693}
 694
 695static void mv_xor_free_chan_resources(struct dma_chan *chan)
 696{
 697        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 698        struct mv_xor_desc_slot *iter, *_iter;
 699        int in_use_descs = 0;
 700
 701        mv_xor_slot_cleanup(mv_chan);
 702
 703        spin_lock_bh(&mv_chan->lock);
 704        list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 705                                        chain_node) {
 706                in_use_descs++;
 707                list_del(&iter->chain_node);
 708        }
 709        list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 710                                 completed_node) {
 711                in_use_descs++;
 712                list_del(&iter->completed_node);
 713        }
 714        list_for_each_entry_safe_reverse(
 715                iter, _iter, &mv_chan->all_slots, slot_node) {
 716                list_del(&iter->slot_node);
 717                kfree(iter);
 718                mv_chan->slots_allocated--;
 719        }
 720        mv_chan->last_used = NULL;
 721
 722        dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
 723                __func__, mv_chan->slots_allocated);
 724        spin_unlock_bh(&mv_chan->lock);
 725
 726        if (in_use_descs)
 727                dev_err(mv_chan_to_devp(mv_chan),
 728                        "freeing %d in use descriptors!\n", in_use_descs);
 729}
 730
 731/**
 732 * mv_xor_status - poll the status of an XOR transaction
 733 * @chan: XOR channel handle
 734 * @cookie: XOR transaction identifier
 735 * @txstate: XOR transactions state holder (or NULL)
 736 */
 737static enum dma_status mv_xor_status(struct dma_chan *chan,
 738                                          dma_cookie_t cookie,
 739                                          struct dma_tx_state *txstate)
 740{
 741        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 742        enum dma_status ret;
 743
 744        ret = dma_cookie_status(chan, cookie, txstate);
 745        if (ret == DMA_SUCCESS) {
 746                mv_xor_clean_completed_slots(mv_chan);
 747                return ret;
 748        }
 749        mv_xor_slot_cleanup(mv_chan);
 750
 751        return dma_cookie_status(chan, cookie, txstate);
 752}
 753
 754static void mv_dump_xor_regs(struct mv_xor_chan *chan)
 755{
 756        u32 val;
 757
 758        val = __raw_readl(XOR_CONFIG(chan));
 759        dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
 760
 761        val = __raw_readl(XOR_ACTIVATION(chan));
 762        dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
 763
 764        val = __raw_readl(XOR_INTR_CAUSE(chan));
 765        dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
 766
 767        val = __raw_readl(XOR_INTR_MASK(chan));
 768        dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
 769
 770        val = __raw_readl(XOR_ERROR_CAUSE(chan));
 771        dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
 772
 773        val = __raw_readl(XOR_ERROR_ADDR(chan));
 774        dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
 775}
 776
 777static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
 778                                         u32 intr_cause)
 779{
 780        if (intr_cause & (1 << 4)) {
 781             dev_dbg(mv_chan_to_devp(chan),
 782                     "ignore this error\n");
 783             return;
 784        }
 785
 786        dev_err(mv_chan_to_devp(chan),
 787                "error on chan %d. intr cause 0x%08x\n",
 788                chan->idx, intr_cause);
 789
 790        mv_dump_xor_regs(chan);
 791        BUG();
 792}
 793
 794static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
 795{
 796        struct mv_xor_chan *chan = data;
 797        u32 intr_cause = mv_chan_get_intr_cause(chan);
 798
 799        dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
 800
 801        if (mv_is_err_intr(intr_cause))
 802                mv_xor_err_interrupt_handler(chan, intr_cause);
 803
 804        tasklet_schedule(&chan->irq_tasklet);
 805
 806        mv_xor_device_clear_eoc_cause(chan);
 807
 808        return IRQ_HANDLED;
 809}
 810
 811static void mv_xor_issue_pending(struct dma_chan *chan)
 812{
 813        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 814
 815        if (mv_chan->pending >= MV_XOR_THRESHOLD) {
 816                mv_chan->pending = 0;
 817                mv_chan_activate(mv_chan);
 818        }
 819}
 820
 821/*
 822 * Perform a transaction to verify the HW works.
 823 */
 824#define MV_XOR_TEST_SIZE 2000
 825
 826static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
 827{
 828        int i;
 829        void *src, *dest;
 830        dma_addr_t src_dma, dest_dma;
 831        struct dma_chan *dma_chan;
 832        dma_cookie_t cookie;
 833        struct dma_async_tx_descriptor *tx;
 834        int err = 0;
 835
 836        src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
 837        if (!src)
 838                return -ENOMEM;
 839
 840        dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
 841        if (!dest) {
 842                kfree(src);
 843                return -ENOMEM;
 844        }
 845
 846        /* Fill in src buffer */
 847        for (i = 0; i < MV_XOR_TEST_SIZE; i++)
 848                ((u8 *) src)[i] = (u8)i;
 849
 850        dma_chan = &mv_chan->dmachan;
 851        if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 852                err = -ENODEV;
 853                goto out;
 854        }
 855
 856        dest_dma = dma_map_single(dma_chan->device->dev, dest,
 857                                  MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
 858
 859        src_dma = dma_map_single(dma_chan->device->dev, src,
 860                                 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
 861
 862        tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
 863                                    MV_XOR_TEST_SIZE, 0);
 864        cookie = mv_xor_tx_submit(tx);
 865        mv_xor_issue_pending(dma_chan);
 866        async_tx_ack(tx);
 867        msleep(1);
 868
 869        if (mv_xor_status(dma_chan, cookie, NULL) !=
 870            DMA_SUCCESS) {
 871                dev_err(dma_chan->device->dev,
 872                        "Self-test copy timed out, disabling\n");
 873                err = -ENODEV;
 874                goto free_resources;
 875        }
 876
 877        dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 878                                MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
 879        if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
 880                dev_err(dma_chan->device->dev,
 881                        "Self-test copy failed compare, disabling\n");
 882                err = -ENODEV;
 883                goto free_resources;
 884        }
 885
 886free_resources:
 887        mv_xor_free_chan_resources(dma_chan);
 888out:
 889        kfree(src);
 890        kfree(dest);
 891        return err;
 892}
 893
 894#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
 895static int
 896mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
 897{
 898        int i, src_idx;
 899        struct page *dest;
 900        struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
 901        dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
 902        dma_addr_t dest_dma;
 903        struct dma_async_tx_descriptor *tx;
 904        struct dma_chan *dma_chan;
 905        dma_cookie_t cookie;
 906        u8 cmp_byte = 0;
 907        u32 cmp_word;
 908        int err = 0;
 909
 910        for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
 911                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 912                if (!xor_srcs[src_idx]) {
 913                        while (src_idx--)
 914                                __free_page(xor_srcs[src_idx]);
 915                        return -ENOMEM;
 916                }
 917        }
 918
 919        dest = alloc_page(GFP_KERNEL);
 920        if (!dest) {
 921                while (src_idx--)
 922                        __free_page(xor_srcs[src_idx]);
 923                return -ENOMEM;
 924        }
 925
 926        /* Fill in src buffers */
 927        for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
 928                u8 *ptr = page_address(xor_srcs[src_idx]);
 929                for (i = 0; i < PAGE_SIZE; i++)
 930                        ptr[i] = (1 << src_idx);
 931        }
 932
 933        for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
 934                cmp_byte ^= (u8) (1 << src_idx);
 935
 936        cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 937                (cmp_byte << 8) | cmp_byte;
 938
 939        memset(page_address(dest), 0, PAGE_SIZE);
 940
 941        dma_chan = &mv_chan->dmachan;
 942        if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 943                err = -ENODEV;
 944                goto out;
 945        }
 946
 947        /* test xor */
 948        dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
 949                                DMA_FROM_DEVICE);
 950
 951        for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
 952                dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 953                                           0, PAGE_SIZE, DMA_TO_DEVICE);
 954
 955        tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 956                                 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
 957
 958        cookie = mv_xor_tx_submit(tx);
 959        mv_xor_issue_pending(dma_chan);
 960        async_tx_ack(tx);
 961        msleep(8);
 962
 963        if (mv_xor_status(dma_chan, cookie, NULL) !=
 964            DMA_SUCCESS) {
 965                dev_err(dma_chan->device->dev,
 966                        "Self-test xor timed out, disabling\n");
 967                err = -ENODEV;
 968                goto free_resources;
 969        }
 970
 971        dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 972                                PAGE_SIZE, DMA_FROM_DEVICE);
 973        for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 974                u32 *ptr = page_address(dest);
 975                if (ptr[i] != cmp_word) {
 976                        dev_err(dma_chan->device->dev,
 977                                "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
 978                                i, ptr[i], cmp_word);
 979                        err = -ENODEV;
 980                        goto free_resources;
 981                }
 982        }
 983
 984free_resources:
 985        mv_xor_free_chan_resources(dma_chan);
 986out:
 987        src_idx = MV_XOR_NUM_SRC_TEST;
 988        while (src_idx--)
 989                __free_page(xor_srcs[src_idx]);
 990        __free_page(dest);
 991        return err;
 992}
 993
 994/* This driver does not implement any of the optional DMA operations. */
 995static int
 996mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 997               unsigned long arg)
 998{
 999        return -ENOSYS;
1000}
1001
1002static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1003{
1004        struct dma_chan *chan, *_chan;
1005        struct device *dev = mv_chan->dmadev.dev;
1006
1007        dma_async_device_unregister(&mv_chan->dmadev);
1008
1009        dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1010                          mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1011
1012        list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1013                                 device_node) {
1014                list_del(&chan->device_node);
1015        }
1016
1017        free_irq(mv_chan->irq, mv_chan);
1018
1019        return 0;
1020}
1021
1022static struct mv_xor_chan *
1023mv_xor_channel_add(struct mv_xor_device *xordev,
1024                   struct platform_device *pdev,
1025                   int idx, dma_cap_mask_t cap_mask, int irq)
1026{
1027        int ret = 0;
1028        struct mv_xor_chan *mv_chan;
1029        struct dma_device *dma_dev;
1030
1031        mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1032        if (!mv_chan) {
1033                ret = -ENOMEM;
1034                goto err_free_dma;
1035        }
1036
1037        mv_chan->idx = idx;
1038        mv_chan->irq = irq;
1039
1040        dma_dev = &mv_chan->dmadev;
1041
1042        /* allocate coherent memory for hardware descriptors
1043         * note: writecombine gives slightly better performance, but
1044         * requires that we explicitly flush the writes
1045         */
1046        mv_chan->dma_desc_pool_virt =
1047          dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1048                                 &mv_chan->dma_desc_pool, GFP_KERNEL);
1049        if (!mv_chan->dma_desc_pool_virt)
1050                return ERR_PTR(-ENOMEM);
1051
1052        /* discover transaction capabilites from the platform data */
1053        dma_dev->cap_mask = cap_mask;
1054
1055        INIT_LIST_HEAD(&dma_dev->channels);
1056
1057        /* set base routines */
1058        dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1059        dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1060        dma_dev->device_tx_status = mv_xor_status;
1061        dma_dev->device_issue_pending = mv_xor_issue_pending;
1062        dma_dev->device_control = mv_xor_control;
1063        dma_dev->dev = &pdev->dev;
1064
1065        /* set prep routines based on capability */
1066        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1067                dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1068        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1069                dma_dev->max_xor = 8;
1070                dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1071        }
1072
1073        mv_chan->mmr_base = xordev->xor_base;
1074        if (!mv_chan->mmr_base) {
1075                ret = -ENOMEM;
1076                goto err_free_dma;
1077        }
1078        tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1079                     mv_chan);
1080
1081        /* clear errors before enabling interrupts */
1082        mv_xor_device_clear_err_status(mv_chan);
1083
1084        ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1085                          0, dev_name(&pdev->dev), mv_chan);
1086        if (ret)
1087                goto err_free_dma;
1088
1089        mv_chan_unmask_interrupts(mv_chan);
1090
1091        mv_set_mode(mv_chan, DMA_MEMCPY);
1092
1093        spin_lock_init(&mv_chan->lock);
1094        INIT_LIST_HEAD(&mv_chan->chain);
1095        INIT_LIST_HEAD(&mv_chan->completed_slots);
1096        INIT_LIST_HEAD(&mv_chan->all_slots);
1097        mv_chan->dmachan.device = dma_dev;
1098        dma_cookie_init(&mv_chan->dmachan);
1099
1100        list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1101
1102        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1103                ret = mv_xor_memcpy_self_test(mv_chan);
1104                dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1105                if (ret)
1106                        goto err_free_irq;
1107        }
1108
1109        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1110                ret = mv_xor_xor_self_test(mv_chan);
1111                dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1112                if (ret)
1113                        goto err_free_irq;
1114        }
1115
1116        dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1117                 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1118                 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1119                 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1120
1121        dma_async_device_register(dma_dev);
1122        return mv_chan;
1123
1124err_free_irq:
1125        free_irq(mv_chan->irq, mv_chan);
1126 err_free_dma:
1127        dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1128                          mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1129        return ERR_PTR(ret);
1130}
1131
1132static void
1133mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1134                         const struct mbus_dram_target_info *dram)
1135{
1136        void __iomem *base = xordev->xor_base;
1137        u32 win_enable = 0;
1138        int i;
1139
1140        for (i = 0; i < 8; i++) {
1141                writel(0, base + WINDOW_BASE(i));
1142                writel(0, base + WINDOW_SIZE(i));
1143                if (i < 4)
1144                        writel(0, base + WINDOW_REMAP_HIGH(i));
1145        }
1146
1147        for (i = 0; i < dram->num_cs; i++) {
1148                const struct mbus_dram_window *cs = dram->cs + i;
1149
1150                writel((cs->base & 0xffff0000) |
1151                       (cs->mbus_attr << 8) |
1152                       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1153                writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1154
1155                win_enable |= (1 << i);
1156                win_enable |= 3 << (16 + (2 * i));
1157        }
1158
1159        writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1160        writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1161        writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1162        writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1163}
1164
1165static int mv_xor_probe(struct platform_device *pdev)
1166{
1167        const struct mbus_dram_target_info *dram;
1168        struct mv_xor_device *xordev;
1169        struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1170        struct resource *res;
1171        int i, ret;
1172
1173        dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1174
1175        xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1176        if (!xordev)
1177                return -ENOMEM;
1178
1179        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1180        if (!res)
1181                return -ENODEV;
1182
1183        xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1184                                        resource_size(res));
1185        if (!xordev->xor_base)
1186                return -EBUSY;
1187
1188        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1189        if (!res)
1190                return -ENODEV;
1191
1192        xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1193                                             resource_size(res));
1194        if (!xordev->xor_high_base)
1195                return -EBUSY;
1196
1197        platform_set_drvdata(pdev, xordev);
1198
1199        /*
1200         * (Re-)program MBUS remapping windows if we are asked to.
1201         */
1202        dram = mv_mbus_dram_info();
1203        if (dram)
1204                mv_xor_conf_mbus_windows(xordev, dram);
1205
1206        /* Not all platforms can gate the clock, so it is not
1207         * an error if the clock does not exists.
1208         */
1209        xordev->clk = clk_get(&pdev->dev, NULL);
1210        if (!IS_ERR(xordev->clk))
1211                clk_prepare_enable(xordev->clk);
1212
1213        if (pdev->dev.of_node) {
1214                struct device_node *np;
1215                int i = 0;
1216
1217                for_each_child_of_node(pdev->dev.of_node, np) {
1218                        dma_cap_mask_t cap_mask;
1219                        int irq;
1220
1221                        dma_cap_zero(cap_mask);
1222                        if (of_property_read_bool(np, "dmacap,memcpy"))
1223                                dma_cap_set(DMA_MEMCPY, cap_mask);
1224                        if (of_property_read_bool(np, "dmacap,xor"))
1225                                dma_cap_set(DMA_XOR, cap_mask);
1226                        if (of_property_read_bool(np, "dmacap,interrupt"))
1227                                dma_cap_set(DMA_INTERRUPT, cap_mask);
1228
1229                        irq = irq_of_parse_and_map(np, 0);
1230                        if (!irq) {
1231                                ret = -ENODEV;
1232                                goto err_channel_add;
1233                        }
1234
1235                        xordev->channels[i] =
1236                                mv_xor_channel_add(xordev, pdev, i,
1237                                                   cap_mask, irq);
1238                        if (IS_ERR(xordev->channels[i])) {
1239                                ret = PTR_ERR(xordev->channels[i]);
1240                                xordev->channels[i] = NULL;
1241                                irq_dispose_mapping(irq);
1242                                goto err_channel_add;
1243                        }
1244
1245                        i++;
1246                }
1247        } else if (pdata && pdata->channels) {
1248                for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1249                        struct mv_xor_channel_data *cd;
1250                        int irq;
1251
1252                        cd = &pdata->channels[i];
1253                        if (!cd) {
1254                                ret = -ENODEV;
1255                                goto err_channel_add;
1256                        }
1257
1258                        irq = platform_get_irq(pdev, i);
1259                        if (irq < 0) {
1260                                ret = irq;
1261                                goto err_channel_add;
1262                        }
1263
1264                        xordev->channels[i] =
1265                                mv_xor_channel_add(xordev, pdev, i,
1266                                                   cd->cap_mask, irq);
1267                        if (IS_ERR(xordev->channels[i])) {
1268                                ret = PTR_ERR(xordev->channels[i]);
1269                                goto err_channel_add;
1270                        }
1271                }
1272        }
1273
1274        return 0;
1275
1276err_channel_add:
1277        for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1278                if (xordev->channels[i]) {
1279                        mv_xor_channel_remove(xordev->channels[i]);
1280                        if (pdev->dev.of_node)
1281                                irq_dispose_mapping(xordev->channels[i]->irq);
1282                }
1283
1284        if (!IS_ERR(xordev->clk)) {
1285                clk_disable_unprepare(xordev->clk);
1286                clk_put(xordev->clk);
1287        }
1288
1289        return ret;
1290}
1291
1292static int mv_xor_remove(struct platform_device *pdev)
1293{
1294        struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1295        int i;
1296
1297        for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1298                if (xordev->channels[i])
1299                        mv_xor_channel_remove(xordev->channels[i]);
1300        }
1301
1302        if (!IS_ERR(xordev->clk)) {
1303                clk_disable_unprepare(xordev->clk);
1304                clk_put(xordev->clk);
1305        }
1306
1307        return 0;
1308}
1309
1310#ifdef CONFIG_OF
1311static struct of_device_id mv_xor_dt_ids[] = {
1312       { .compatible = "marvell,orion-xor", },
1313       {},
1314};
1315MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1316#endif
1317
1318static struct platform_driver mv_xor_driver = {
1319        .probe          = mv_xor_probe,
1320        .remove         = mv_xor_remove,
1321        .driver         = {
1322                .owner          = THIS_MODULE,
1323                .name           = MV_XOR_NAME,
1324                .of_match_table = of_match_ptr(mv_xor_dt_ids),
1325        },
1326};
1327
1328
1329static int __init mv_xor_init(void)
1330{
1331        return platform_driver_register(&mv_xor_driver);
1332}
1333module_init(mv_xor_init);
1334
1335/* it's currently unsafe to unload this module */
1336#if 0
1337static void __exit mv_xor_exit(void)
1338{
1339        platform_driver_unregister(&mv_xor_driver);
1340        return;
1341}
1342
1343module_exit(mv_xor_exit);
1344#endif
1345
1346MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1347MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1348MODULE_LICENSE("GPL");
1349
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.