linux/drivers/vme/vme.c
<<
>>
Prefs
   1/*
   2 * VME Bridge Framework
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * This program is free software; you can redistribute  it and/or modify it
  11 * under  the terms of  the GNU General  Public License as published by the
  12 * Free Software Foundation;  either version 2 of the  License, or (at your
  13 * option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <linux/mm.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/pci.h>
  23#include <linux/poll.h>
  24#include <linux/highmem.h>
  25#include <linux/interrupt.h>
  26#include <linux/pagemap.h>
  27#include <linux/device.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/syscalls.h>
  30#include <linux/mutex.h>
  31#include <linux/spinlock.h>
  32#include <linux/slab.h>
  33#include <linux/vme.h>
  34
  35#include "vme_bridge.h"
  36
  37/* Bitmask and list of registered buses both protected by common mutex */
  38static unsigned int vme_bus_numbers;
  39static LIST_HEAD(vme_bus_list);
  40static DEFINE_MUTEX(vme_buses_lock);
  41
  42static void __exit vme_exit(void);
  43static int __init vme_init(void);
  44
  45static struct vme_dev *dev_to_vme_dev(struct device *dev)
  46{
  47        return container_of(dev, struct vme_dev, dev);
  48}
  49
  50/*
  51 * Find the bridge that the resource is associated with.
  52 */
  53static struct vme_bridge *find_bridge(struct vme_resource *resource)
  54{
  55        /* Get list to search */
  56        switch (resource->type) {
  57        case VME_MASTER:
  58                return list_entry(resource->entry, struct vme_master_resource,
  59                        list)->parent;
  60                break;
  61        case VME_SLAVE:
  62                return list_entry(resource->entry, struct vme_slave_resource,
  63                        list)->parent;
  64                break;
  65        case VME_DMA:
  66                return list_entry(resource->entry, struct vme_dma_resource,
  67                        list)->parent;
  68                break;
  69        case VME_LM:
  70                return list_entry(resource->entry, struct vme_lm_resource,
  71                        list)->parent;
  72                break;
  73        default:
  74                printk(KERN_ERR "Unknown resource type\n");
  75                return NULL;
  76                break;
  77        }
  78}
  79
  80/*
  81 * Allocate a contiguous block of memory for use by the driver. This is used to
  82 * create the buffers for the slave windows.
  83 */
  84void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  85        dma_addr_t *dma)
  86{
  87        struct vme_bridge *bridge;
  88
  89        if (resource == NULL) {
  90                printk(KERN_ERR "No resource\n");
  91                return NULL;
  92        }
  93
  94        bridge = find_bridge(resource);
  95        if (bridge == NULL) {
  96                printk(KERN_ERR "Can't find bridge\n");
  97                return NULL;
  98        }
  99
 100        if (bridge->parent == NULL) {
 101                printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 102                return NULL;
 103        }
 104
 105        if (bridge->alloc_consistent == NULL) {
 106                printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
 107                       bridge->name);
 108                return NULL;
 109        }
 110
 111        return bridge->alloc_consistent(bridge->parent, size, dma);
 112}
 113EXPORT_SYMBOL(vme_alloc_consistent);
 114
 115/*
 116 * Free previously allocated contiguous block of memory.
 117 */
 118void vme_free_consistent(struct vme_resource *resource, size_t size,
 119        void *vaddr, dma_addr_t dma)
 120{
 121        struct vme_bridge *bridge;
 122
 123        if (resource == NULL) {
 124                printk(KERN_ERR "No resource\n");
 125                return;
 126        }
 127
 128        bridge = find_bridge(resource);
 129        if (bridge == NULL) {
 130                printk(KERN_ERR "Can't find bridge\n");
 131                return;
 132        }
 133
 134        if (bridge->parent == NULL) {
 135                printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 136                return;
 137        }
 138
 139        if (bridge->free_consistent == NULL) {
 140                printk(KERN_ERR "free_consistent not supported by bridge %s\n",
 141                       bridge->name);
 142                return;
 143        }
 144
 145        bridge->free_consistent(bridge->parent, size, vaddr, dma);
 146}
 147EXPORT_SYMBOL(vme_free_consistent);
 148
 149size_t vme_get_size(struct vme_resource *resource)
 150{
 151        int enabled, retval;
 152        unsigned long long base, size;
 153        dma_addr_t buf_base;
 154        u32 aspace, cycle, dwidth;
 155
 156        switch (resource->type) {
 157        case VME_MASTER:
 158                retval = vme_master_get(resource, &enabled, &base, &size,
 159                        &aspace, &cycle, &dwidth);
 160
 161                return size;
 162                break;
 163        case VME_SLAVE:
 164                retval = vme_slave_get(resource, &enabled, &base, &size,
 165                        &buf_base, &aspace, &cycle);
 166
 167                return size;
 168                break;
 169        case VME_DMA:
 170                return 0;
 171                break;
 172        default:
 173                printk(KERN_ERR "Unknown resource type\n");
 174                return 0;
 175                break;
 176        }
 177}
 178EXPORT_SYMBOL(vme_get_size);
 179
 180static int vme_check_window(u32 aspace, unsigned long long vme_base,
 181        unsigned long long size)
 182{
 183        int retval = 0;
 184
 185        switch (aspace) {
 186        case VME_A16:
 187                if (((vme_base + size) > VME_A16_MAX) ||
 188                                (vme_base > VME_A16_MAX))
 189                        retval = -EFAULT;
 190                break;
 191        case VME_A24:
 192                if (((vme_base + size) > VME_A24_MAX) ||
 193                                (vme_base > VME_A24_MAX))
 194                        retval = -EFAULT;
 195                break;
 196        case VME_A32:
 197                if (((vme_base + size) > VME_A32_MAX) ||
 198                                (vme_base > VME_A32_MAX))
 199                        retval = -EFAULT;
 200                break;
 201        case VME_A64:
 202                /*
 203                 * Any value held in an unsigned long long can be used as the
 204                 * base
 205                 */
 206                break;
 207        case VME_CRCSR:
 208                if (((vme_base + size) > VME_CRCSR_MAX) ||
 209                                (vme_base > VME_CRCSR_MAX))
 210                        retval = -EFAULT;
 211                break;
 212        case VME_USER1:
 213        case VME_USER2:
 214        case VME_USER3:
 215        case VME_USER4:
 216                /* User Defined */
 217                break;
 218        default:
 219                printk(KERN_ERR "Invalid address space\n");
 220                retval = -EINVAL;
 221                break;
 222        }
 223
 224        return retval;
 225}
 226
 227/*
 228 * Request a slave image with specific attributes, return some unique
 229 * identifier.
 230 */
 231struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
 232        u32 cycle)
 233{
 234        struct vme_bridge *bridge;
 235        struct list_head *slave_pos = NULL;
 236        struct vme_slave_resource *allocated_image = NULL;
 237        struct vme_slave_resource *slave_image = NULL;
 238        struct vme_resource *resource = NULL;
 239
 240        bridge = vdev->bridge;
 241        if (bridge == NULL) {
 242                printk(KERN_ERR "Can't find VME bus\n");
 243                goto err_bus;
 244        }
 245
 246        /* Loop through slave resources */
 247        list_for_each(slave_pos, &bridge->slave_resources) {
 248                slave_image = list_entry(slave_pos,
 249                        struct vme_slave_resource, list);
 250
 251                if (slave_image == NULL) {
 252                        printk(KERN_ERR "Registered NULL Slave resource\n");
 253                        continue;
 254                }
 255
 256                /* Find an unlocked and compatible image */
 257                mutex_lock(&slave_image->mtx);
 258                if (((slave_image->address_attr & address) == address) &&
 259                        ((slave_image->cycle_attr & cycle) == cycle) &&
 260                        (slave_image->locked == 0)) {
 261
 262                        slave_image->locked = 1;
 263                        mutex_unlock(&slave_image->mtx);
 264                        allocated_image = slave_image;
 265                        break;
 266                }
 267                mutex_unlock(&slave_image->mtx);
 268        }
 269
 270        /* No free image */
 271        if (allocated_image == NULL)
 272                goto err_image;
 273
 274        resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 275        if (resource == NULL) {
 276                printk(KERN_WARNING "Unable to allocate resource structure\n");
 277                goto err_alloc;
 278        }
 279        resource->type = VME_SLAVE;
 280        resource->entry = &allocated_image->list;
 281
 282        return resource;
 283
 284err_alloc:
 285        /* Unlock image */
 286        mutex_lock(&slave_image->mtx);
 287        slave_image->locked = 0;
 288        mutex_unlock(&slave_image->mtx);
 289err_image:
 290err_bus:
 291        return NULL;
 292}
 293EXPORT_SYMBOL(vme_slave_request);
 294
 295int vme_slave_set(struct vme_resource *resource, int enabled,
 296        unsigned long long vme_base, unsigned long long size,
 297        dma_addr_t buf_base, u32 aspace, u32 cycle)
 298{
 299        struct vme_bridge *bridge = find_bridge(resource);
 300        struct vme_slave_resource *image;
 301        int retval;
 302
 303        if (resource->type != VME_SLAVE) {
 304                printk(KERN_ERR "Not a slave resource\n");
 305                return -EINVAL;
 306        }
 307
 308        image = list_entry(resource->entry, struct vme_slave_resource, list);
 309
 310        if (bridge->slave_set == NULL) {
 311                printk(KERN_ERR "Function not supported\n");
 312                return -ENOSYS;
 313        }
 314
 315        if (!(((image->address_attr & aspace) == aspace) &&
 316                ((image->cycle_attr & cycle) == cycle))) {
 317                printk(KERN_ERR "Invalid attributes\n");
 318                return -EINVAL;
 319        }
 320
 321        retval = vme_check_window(aspace, vme_base, size);
 322        if (retval)
 323                return retval;
 324
 325        return bridge->slave_set(image, enabled, vme_base, size, buf_base,
 326                aspace, cycle);
 327}
 328EXPORT_SYMBOL(vme_slave_set);
 329
 330int vme_slave_get(struct vme_resource *resource, int *enabled,
 331        unsigned long long *vme_base, unsigned long long *size,
 332        dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
 333{
 334        struct vme_bridge *bridge = find_bridge(resource);
 335        struct vme_slave_resource *image;
 336
 337        if (resource->type != VME_SLAVE) {
 338                printk(KERN_ERR "Not a slave resource\n");
 339                return -EINVAL;
 340        }
 341
 342        image = list_entry(resource->entry, struct vme_slave_resource, list);
 343
 344        if (bridge->slave_get == NULL) {
 345                printk(KERN_ERR "vme_slave_get not supported\n");
 346                return -EINVAL;
 347        }
 348
 349        return bridge->slave_get(image, enabled, vme_base, size, buf_base,
 350                aspace, cycle);
 351}
 352EXPORT_SYMBOL(vme_slave_get);
 353
 354void vme_slave_free(struct vme_resource *resource)
 355{
 356        struct vme_slave_resource *slave_image;
 357
 358        if (resource->type != VME_SLAVE) {
 359                printk(KERN_ERR "Not a slave resource\n");
 360                return;
 361        }
 362
 363        slave_image = list_entry(resource->entry, struct vme_slave_resource,
 364                list);
 365        if (slave_image == NULL) {
 366                printk(KERN_ERR "Can't find slave resource\n");
 367                return;
 368        }
 369
 370        /* Unlock image */
 371        mutex_lock(&slave_image->mtx);
 372        if (slave_image->locked == 0)
 373                printk(KERN_ERR "Image is already free\n");
 374
 375        slave_image->locked = 0;
 376        mutex_unlock(&slave_image->mtx);
 377
 378        /* Free up resource memory */
 379        kfree(resource);
 380}
 381EXPORT_SYMBOL(vme_slave_free);
 382
 383/*
 384 * Request a master image with specific attributes, return some unique
 385 * identifier.
 386 */
 387struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
 388        u32 cycle, u32 dwidth)
 389{
 390        struct vme_bridge *bridge;
 391        struct list_head *master_pos = NULL;
 392        struct vme_master_resource *allocated_image = NULL;
 393        struct vme_master_resource *master_image = NULL;
 394        struct vme_resource *resource = NULL;
 395
 396        bridge = vdev->bridge;
 397        if (bridge == NULL) {
 398                printk(KERN_ERR "Can't find VME bus\n");
 399                goto err_bus;
 400        }
 401
 402        /* Loop through master resources */
 403        list_for_each(master_pos, &bridge->master_resources) {
 404                master_image = list_entry(master_pos,
 405                        struct vme_master_resource, list);
 406
 407                if (master_image == NULL) {
 408                        printk(KERN_WARNING "Registered NULL master resource\n");
 409                        continue;
 410                }
 411
 412                /* Find an unlocked and compatible image */
 413                spin_lock(&master_image->lock);
 414                if (((master_image->address_attr & address) == address) &&
 415                        ((master_image->cycle_attr & cycle) == cycle) &&
 416                        ((master_image->width_attr & dwidth) == dwidth) &&
 417                        (master_image->locked == 0)) {
 418
 419                        master_image->locked = 1;
 420                        spin_unlock(&master_image->lock);
 421                        allocated_image = master_image;
 422                        break;
 423                }
 424                spin_unlock(&master_image->lock);
 425        }
 426
 427        /* Check to see if we found a resource */
 428        if (allocated_image == NULL) {
 429                printk(KERN_ERR "Can't find a suitable resource\n");
 430                goto err_image;
 431        }
 432
 433        resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 434        if (resource == NULL) {
 435                printk(KERN_ERR "Unable to allocate resource structure\n");
 436                goto err_alloc;
 437        }
 438        resource->type = VME_MASTER;
 439        resource->entry = &allocated_image->list;
 440
 441        return resource;
 442
 443err_alloc:
 444        /* Unlock image */
 445        spin_lock(&master_image->lock);
 446        master_image->locked = 0;
 447        spin_unlock(&master_image->lock);
 448err_image:
 449err_bus:
 450        return NULL;
 451}
 452EXPORT_SYMBOL(vme_master_request);
 453
 454int vme_master_set(struct vme_resource *resource, int enabled,
 455        unsigned long long vme_base, unsigned long long size, u32 aspace,
 456        u32 cycle, u32 dwidth)
 457{
 458        struct vme_bridge *bridge = find_bridge(resource);
 459        struct vme_master_resource *image;
 460        int retval;
 461
 462        if (resource->type != VME_MASTER) {
 463                printk(KERN_ERR "Not a master resource\n");
 464                return -EINVAL;
 465        }
 466
 467        image = list_entry(resource->entry, struct vme_master_resource, list);
 468
 469        if (bridge->master_set == NULL) {
 470                printk(KERN_WARNING "vme_master_set not supported\n");
 471                return -EINVAL;
 472        }
 473
 474        if (!(((image->address_attr & aspace) == aspace) &&
 475                ((image->cycle_attr & cycle) == cycle) &&
 476                ((image->width_attr & dwidth) == dwidth))) {
 477                printk(KERN_WARNING "Invalid attributes\n");
 478                return -EINVAL;
 479        }
 480
 481        retval = vme_check_window(aspace, vme_base, size);
 482        if (retval)
 483                return retval;
 484
 485        return bridge->master_set(image, enabled, vme_base, size, aspace,
 486                cycle, dwidth);
 487}
 488EXPORT_SYMBOL(vme_master_set);
 489
 490int vme_master_get(struct vme_resource *resource, int *enabled,
 491        unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 492        u32 *cycle, u32 *dwidth)
 493{
 494        struct vme_bridge *bridge = find_bridge(resource);
 495        struct vme_master_resource *image;
 496
 497        if (resource->type != VME_MASTER) {
 498                printk(KERN_ERR "Not a master resource\n");
 499                return -EINVAL;
 500        }
 501
 502        image = list_entry(resource->entry, struct vme_master_resource, list);
 503
 504        if (bridge->master_get == NULL) {
 505                printk(KERN_WARNING "vme_master_set not supported\n");
 506                return -EINVAL;
 507        }
 508
 509        return bridge->master_get(image, enabled, vme_base, size, aspace,
 510                cycle, dwidth);
 511}
 512EXPORT_SYMBOL(vme_master_get);
 513
 514/*
 515 * Read data out of VME space into a buffer.
 516 */
 517ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
 518        loff_t offset)
 519{
 520        struct vme_bridge *bridge = find_bridge(resource);
 521        struct vme_master_resource *image;
 522        size_t length;
 523
 524        if (bridge->master_read == NULL) {
 525                printk(KERN_WARNING "Reading from resource not supported\n");
 526                return -EINVAL;
 527        }
 528
 529        if (resource->type != VME_MASTER) {
 530                printk(KERN_ERR "Not a master resource\n");
 531                return -EINVAL;
 532        }
 533
 534        image = list_entry(resource->entry, struct vme_master_resource, list);
 535
 536        length = vme_get_size(resource);
 537
 538        if (offset > length) {
 539                printk(KERN_WARNING "Invalid Offset\n");
 540                return -EFAULT;
 541        }
 542
 543        if ((offset + count) > length)
 544                count = length - offset;
 545
 546        return bridge->master_read(image, buf, count, offset);
 547
 548}
 549EXPORT_SYMBOL(vme_master_read);
 550
 551/*
 552 * Write data out to VME space from a buffer.
 553 */
 554ssize_t vme_master_write(struct vme_resource *resource, void *buf,
 555        size_t count, loff_t offset)
 556{
 557        struct vme_bridge *bridge = find_bridge(resource);
 558        struct vme_master_resource *image;
 559        size_t length;
 560
 561        if (bridge->master_write == NULL) {
 562                printk(KERN_WARNING "Writing to resource not supported\n");
 563                return -EINVAL;
 564        }
 565
 566        if (resource->type != VME_MASTER) {
 567                printk(KERN_ERR "Not a master resource\n");
 568                return -EINVAL;
 569        }
 570
 571        image = list_entry(resource->entry, struct vme_master_resource, list);
 572
 573        length = vme_get_size(resource);
 574
 575        if (offset > length) {
 576                printk(KERN_WARNING "Invalid Offset\n");
 577                return -EFAULT;
 578        }
 579
 580        if ((offset + count) > length)
 581                count = length - offset;
 582
 583        return bridge->master_write(image, buf, count, offset);
 584}
 585EXPORT_SYMBOL(vme_master_write);
 586
 587/*
 588 * Perform RMW cycle to provided location.
 589 */
 590unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
 591        unsigned int compare, unsigned int swap, loff_t offset)
 592{
 593        struct vme_bridge *bridge = find_bridge(resource);
 594        struct vme_master_resource *image;
 595
 596        if (bridge->master_rmw == NULL) {
 597                printk(KERN_WARNING "Writing to resource not supported\n");
 598                return -EINVAL;
 599        }
 600
 601        if (resource->type != VME_MASTER) {
 602                printk(KERN_ERR "Not a master resource\n");
 603                return -EINVAL;
 604        }
 605
 606        image = list_entry(resource->entry, struct vme_master_resource, list);
 607
 608        return bridge->master_rmw(image, mask, compare, swap, offset);
 609}
 610EXPORT_SYMBOL(vme_master_rmw);
 611
 612void vme_master_free(struct vme_resource *resource)
 613{
 614        struct vme_master_resource *master_image;
 615
 616        if (resource->type != VME_MASTER) {
 617                printk(KERN_ERR "Not a master resource\n");
 618                return;
 619        }
 620
 621        master_image = list_entry(resource->entry, struct vme_master_resource,
 622                list);
 623        if (master_image == NULL) {
 624                printk(KERN_ERR "Can't find master resource\n");
 625                return;
 626        }
 627
 628        /* Unlock image */
 629        spin_lock(&master_image->lock);
 630        if (master_image->locked == 0)
 631                printk(KERN_ERR "Image is already free\n");
 632
 633        master_image->locked = 0;
 634        spin_unlock(&master_image->lock);
 635
 636        /* Free up resource memory */
 637        kfree(resource);
 638}
 639EXPORT_SYMBOL(vme_master_free);
 640
 641/*
 642 * Request a DMA controller with specific attributes, return some unique
 643 * identifier.
 644 */
 645struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
 646{
 647        struct vme_bridge *bridge;
 648        struct list_head *dma_pos = NULL;
 649        struct vme_dma_resource *allocated_ctrlr = NULL;
 650        struct vme_dma_resource *dma_ctrlr = NULL;
 651        struct vme_resource *resource = NULL;
 652
 653        /* XXX Not checking resource attributes */
 654        printk(KERN_ERR "No VME resource Attribute tests done\n");
 655
 656        bridge = vdev->bridge;
 657        if (bridge == NULL) {
 658                printk(KERN_ERR "Can't find VME bus\n");
 659                goto err_bus;
 660        }
 661
 662        /* Loop through DMA resources */
 663        list_for_each(dma_pos, &bridge->dma_resources) {
 664                dma_ctrlr = list_entry(dma_pos,
 665                        struct vme_dma_resource, list);
 666
 667                if (dma_ctrlr == NULL) {
 668                        printk(KERN_ERR "Registered NULL DMA resource\n");
 669                        continue;
 670                }
 671
 672                /* Find an unlocked and compatible controller */
 673                mutex_lock(&dma_ctrlr->mtx);
 674                if (((dma_ctrlr->route_attr & route) == route) &&
 675                        (dma_ctrlr->locked == 0)) {
 676
 677                        dma_ctrlr->locked = 1;
 678                        mutex_unlock(&dma_ctrlr->mtx);
 679                        allocated_ctrlr = dma_ctrlr;
 680                        break;
 681                }
 682                mutex_unlock(&dma_ctrlr->mtx);
 683        }
 684
 685        /* Check to see if we found a resource */
 686        if (allocated_ctrlr == NULL)
 687                goto err_ctrlr;
 688
 689        resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 690        if (resource == NULL) {
 691                printk(KERN_WARNING "Unable to allocate resource structure\n");
 692                goto err_alloc;
 693        }
 694        resource->type = VME_DMA;
 695        resource->entry = &allocated_ctrlr->list;
 696
 697        return resource;
 698
 699err_alloc:
 700        /* Unlock image */
 701        mutex_lock(&dma_ctrlr->mtx);
 702        dma_ctrlr->locked = 0;
 703        mutex_unlock(&dma_ctrlr->mtx);
 704err_ctrlr:
 705err_bus:
 706        return NULL;
 707}
 708EXPORT_SYMBOL(vme_dma_request);
 709
 710/*
 711 * Start new list
 712 */
 713struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
 714{
 715        struct vme_dma_resource *ctrlr;
 716        struct vme_dma_list *dma_list;
 717
 718        if (resource->type != VME_DMA) {
 719                printk(KERN_ERR "Not a DMA resource\n");
 720                return NULL;
 721        }
 722
 723        ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 724
 725        dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
 726        if (dma_list == NULL) {
 727                printk(KERN_ERR "Unable to allocate memory for new dma list\n");
 728                return NULL;
 729        }
 730        INIT_LIST_HEAD(&dma_list->entries);
 731        dma_list->parent = ctrlr;
 732        mutex_init(&dma_list->mtx);
 733
 734        return dma_list;
 735}
 736EXPORT_SYMBOL(vme_new_dma_list);
 737
 738/*
 739 * Create "Pattern" type attributes
 740 */
 741struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
 742{
 743        struct vme_dma_attr *attributes;
 744        struct vme_dma_pattern *pattern_attr;
 745
 746        attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 747        if (attributes == NULL) {
 748                printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 749                goto err_attr;
 750        }
 751
 752        pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
 753        if (pattern_attr == NULL) {
 754                printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
 755                goto err_pat;
 756        }
 757
 758        attributes->type = VME_DMA_PATTERN;
 759        attributes->private = (void *)pattern_attr;
 760
 761        pattern_attr->pattern = pattern;
 762        pattern_attr->type = type;
 763
 764        return attributes;
 765
 766err_pat:
 767        kfree(attributes);
 768err_attr:
 769        return NULL;
 770}
 771EXPORT_SYMBOL(vme_dma_pattern_attribute);
 772
 773/*
 774 * Create "PCI" type attributes
 775 */
 776struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
 777{
 778        struct vme_dma_attr *attributes;
 779        struct vme_dma_pci *pci_attr;
 780
 781        /* XXX Run some sanity checks here */
 782
 783        attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 784        if (attributes == NULL) {
 785                printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 786                goto err_attr;
 787        }
 788
 789        pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
 790        if (pci_attr == NULL) {
 791                printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
 792                goto err_pci;
 793        }
 794
 795
 796
 797        attributes->type = VME_DMA_PCI;
 798        attributes->private = (void *)pci_attr;
 799
 800        pci_attr->address = address;
 801
 802        return attributes;
 803
 804err_pci:
 805        kfree(attributes);
 806err_attr:
 807        return NULL;
 808}
 809EXPORT_SYMBOL(vme_dma_pci_attribute);
 810
 811/*
 812 * Create "VME" type attributes
 813 */
 814struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
 815        u32 aspace, u32 cycle, u32 dwidth)
 816{
 817        struct vme_dma_attr *attributes;
 818        struct vme_dma_vme *vme_attr;
 819
 820        attributes = kmalloc(
 821                sizeof(struct vme_dma_attr), GFP_KERNEL);
 822        if (attributes == NULL) {
 823                printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 824                goto err_attr;
 825        }
 826
 827        vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
 828        if (vme_attr == NULL) {
 829                printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
 830                goto err_vme;
 831        }
 832
 833        attributes->type = VME_DMA_VME;
 834        attributes->private = (void *)vme_attr;
 835
 836        vme_attr->address = address;
 837        vme_attr->aspace = aspace;
 838        vme_attr->cycle = cycle;
 839        vme_attr->dwidth = dwidth;
 840
 841        return attributes;
 842
 843err_vme:
 844        kfree(attributes);
 845err_attr:
 846        return NULL;
 847}
 848EXPORT_SYMBOL(vme_dma_vme_attribute);
 849
 850/*
 851 * Free attribute
 852 */
 853void vme_dma_free_attribute(struct vme_dma_attr *attributes)
 854{
 855        kfree(attributes->private);
 856        kfree(attributes);
 857}
 858EXPORT_SYMBOL(vme_dma_free_attribute);
 859
 860int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
 861        struct vme_dma_attr *dest, size_t count)
 862{
 863        struct vme_bridge *bridge = list->parent->parent;
 864        int retval;
 865
 866        if (bridge->dma_list_add == NULL) {
 867                printk(KERN_WARNING "Link List DMA generation not supported\n");
 868                return -EINVAL;
 869        }
 870
 871        if (!mutex_trylock(&list->mtx)) {
 872                printk(KERN_ERR "Link List already submitted\n");
 873                return -EINVAL;
 874        }
 875
 876        retval = bridge->dma_list_add(list, src, dest, count);
 877
 878        mutex_unlock(&list->mtx);
 879
 880        return retval;
 881}
 882EXPORT_SYMBOL(vme_dma_list_add);
 883
 884int vme_dma_list_exec(struct vme_dma_list *list)
 885{
 886        struct vme_bridge *bridge = list->parent->parent;
 887        int retval;
 888
 889        if (bridge->dma_list_exec == NULL) {
 890                printk(KERN_ERR "Link List DMA execution not supported\n");
 891                return -EINVAL;
 892        }
 893
 894        mutex_lock(&list->mtx);
 895
 896        retval = bridge->dma_list_exec(list);
 897
 898        mutex_unlock(&list->mtx);
 899
 900        return retval;
 901}
 902EXPORT_SYMBOL(vme_dma_list_exec);
 903
 904int vme_dma_list_free(struct vme_dma_list *list)
 905{
 906        struct vme_bridge *bridge = list->parent->parent;
 907        int retval;
 908
 909        if (bridge->dma_list_empty == NULL) {
 910                printk(KERN_WARNING "Emptying of Link Lists not supported\n");
 911                return -EINVAL;
 912        }
 913
 914        if (!mutex_trylock(&list->mtx)) {
 915                printk(KERN_ERR "Link List in use\n");
 916                return -EINVAL;
 917        }
 918
 919        /*
 920         * Empty out all of the entries from the dma list. We need to go to the
 921         * low level driver as dma entries are driver specific.
 922         */
 923        retval = bridge->dma_list_empty(list);
 924        if (retval) {
 925                printk(KERN_ERR "Unable to empty link-list entries\n");
 926                mutex_unlock(&list->mtx);
 927                return retval;
 928        }
 929        mutex_unlock(&list->mtx);
 930        kfree(list);
 931
 932        return retval;
 933}
 934EXPORT_SYMBOL(vme_dma_list_free);
 935
 936int vme_dma_free(struct vme_resource *resource)
 937{
 938        struct vme_dma_resource *ctrlr;
 939
 940        if (resource->type != VME_DMA) {
 941                printk(KERN_ERR "Not a DMA resource\n");
 942                return -EINVAL;
 943        }
 944
 945        ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 946
 947        if (!mutex_trylock(&ctrlr->mtx)) {
 948                printk(KERN_ERR "Resource busy, can't free\n");
 949                return -EBUSY;
 950        }
 951
 952        if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
 953                printk(KERN_WARNING "Resource still processing transfers\n");
 954                mutex_unlock(&ctrlr->mtx);
 955                return -EBUSY;
 956        }
 957
 958        ctrlr->locked = 0;
 959
 960        mutex_unlock(&ctrlr->mtx);
 961
 962        kfree(resource);
 963
 964        return 0;
 965}
 966EXPORT_SYMBOL(vme_dma_free);
 967
 968void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
 969{
 970        void (*call)(int, int, void *);
 971        void *priv_data;
 972
 973        call = bridge->irq[level - 1].callback[statid].func;
 974        priv_data = bridge->irq[level - 1].callback[statid].priv_data;
 975
 976        if (call != NULL)
 977                call(level, statid, priv_data);
 978        else
 979                printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
 980                       level, statid);
 981}
 982EXPORT_SYMBOL(vme_irq_handler);
 983
 984int vme_irq_request(struct vme_dev *vdev, int level, int statid,
 985        void (*callback)(int, int, void *),
 986        void *priv_data)
 987{
 988        struct vme_bridge *bridge;
 989
 990        bridge = vdev->bridge;
 991        if (bridge == NULL) {
 992                printk(KERN_ERR "Can't find VME bus\n");
 993                return -EINVAL;
 994        }
 995
 996        if ((level < 1) || (level > 7)) {
 997                printk(KERN_ERR "Invalid interrupt level\n");
 998                return -EINVAL;
 999        }
1000
1001        if (bridge->irq_set == NULL) {
1002                printk(KERN_ERR "Configuring interrupts not supported\n");
1003                return -EINVAL;
1004        }
1005
1006        mutex_lock(&bridge->irq_mtx);
1007
1008        if (bridge->irq[level - 1].callback[statid].func) {
1009                mutex_unlock(&bridge->irq_mtx);
1010                printk(KERN_WARNING "VME Interrupt already taken\n");
1011                return -EBUSY;
1012        }
1013
1014        bridge->irq[level - 1].count++;
1015        bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1016        bridge->irq[level - 1].callback[statid].func = callback;
1017
1018        /* Enable IRQ level */
1019        bridge->irq_set(bridge, level, 1, 1);
1020
1021        mutex_unlock(&bridge->irq_mtx);
1022
1023        return 0;
1024}
1025EXPORT_SYMBOL(vme_irq_request);
1026
1027void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1028{
1029        struct vme_bridge *bridge;
1030
1031        bridge = vdev->bridge;
1032        if (bridge == NULL) {
1033                printk(KERN_ERR "Can't find VME bus\n");
1034                return;
1035        }
1036
1037        if ((level < 1) || (level > 7)) {
1038                printk(KERN_ERR "Invalid interrupt level\n");
1039                return;
1040        }
1041
1042        if (bridge->irq_set == NULL) {
1043                printk(KERN_ERR "Configuring interrupts not supported\n");
1044                return;
1045        }
1046
1047        mutex_lock(&bridge->irq_mtx);
1048
1049        bridge->irq[level - 1].count--;
1050
1051        /* Disable IRQ level if no more interrupts attached at this level*/
1052        if (bridge->irq[level - 1].count == 0)
1053                bridge->irq_set(bridge, level, 0, 1);
1054
1055        bridge->irq[level - 1].callback[statid].func = NULL;
1056        bridge->irq[level - 1].callback[statid].priv_data = NULL;
1057
1058        mutex_unlock(&bridge->irq_mtx);
1059}
1060EXPORT_SYMBOL(vme_irq_free);
1061
1062int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1063{
1064        struct vme_bridge *bridge;
1065
1066        bridge = vdev->bridge;
1067        if (bridge == NULL) {
1068                printk(KERN_ERR "Can't find VME bus\n");
1069                return -EINVAL;
1070        }
1071
1072        if ((level < 1) || (level > 7)) {
1073                printk(KERN_WARNING "Invalid interrupt level\n");
1074                return -EINVAL;
1075        }
1076
1077        if (bridge->irq_generate == NULL) {
1078                printk(KERN_WARNING "Interrupt generation not supported\n");
1079                return -EINVAL;
1080        }
1081
1082        return bridge->irq_generate(bridge, level, statid);
1083}
1084EXPORT_SYMBOL(vme_irq_generate);
1085
1086/*
1087 * Request the location monitor, return resource or NULL
1088 */
1089struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1090{
1091        struct vme_bridge *bridge;
1092        struct list_head *lm_pos = NULL;
1093        struct vme_lm_resource *allocated_lm = NULL;
1094        struct vme_lm_resource *lm = NULL;
1095        struct vme_resource *resource = NULL;
1096
1097        bridge = vdev->bridge;
1098        if (bridge == NULL) {
1099                printk(KERN_ERR "Can't find VME bus\n");
1100                goto err_bus;
1101        }
1102
1103        /* Loop through DMA resources */
1104        list_for_each(lm_pos, &bridge->lm_resources) {
1105                lm = list_entry(lm_pos,
1106                        struct vme_lm_resource, list);
1107
1108                if (lm == NULL) {
1109                        printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1110                        continue;
1111                }
1112
1113                /* Find an unlocked controller */
1114                mutex_lock(&lm->mtx);
1115                if (lm->locked == 0) {
1116                        lm->locked = 1;
1117                        mutex_unlock(&lm->mtx);
1118                        allocated_lm = lm;
1119                        break;
1120                }
1121                mutex_unlock(&lm->mtx);
1122        }
1123
1124        /* Check to see if we found a resource */
1125        if (allocated_lm == NULL)
1126                goto err_lm;
1127
1128        resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1129        if (resource == NULL) {
1130                printk(KERN_ERR "Unable to allocate resource structure\n");
1131                goto err_alloc;
1132        }
1133        resource->type = VME_LM;
1134        resource->entry = &allocated_lm->list;
1135
1136        return resource;
1137
1138err_alloc:
1139        /* Unlock image */
1140        mutex_lock(&lm->mtx);
1141        lm->locked = 0;
1142        mutex_unlock(&lm->mtx);
1143err_lm:
1144err_bus:
1145        return NULL;
1146}
1147EXPORT_SYMBOL(vme_lm_request);
1148
1149int vme_lm_count(struct vme_resource *resource)
1150{
1151        struct vme_lm_resource *lm;
1152
1153        if (resource->type != VME_LM) {
1154                printk(KERN_ERR "Not a Location Monitor resource\n");
1155                return -EINVAL;
1156        }
1157
1158        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1159
1160        return lm->monitors;
1161}
1162EXPORT_SYMBOL(vme_lm_count);
1163
1164int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1165        u32 aspace, u32 cycle)
1166{
1167        struct vme_bridge *bridge = find_bridge(resource);
1168        struct vme_lm_resource *lm;
1169
1170        if (resource->type != VME_LM) {
1171                printk(KERN_ERR "Not a Location Monitor resource\n");
1172                return -EINVAL;
1173        }
1174
1175        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1176
1177        if (bridge->lm_set == NULL) {
1178                printk(KERN_ERR "vme_lm_set not supported\n");
1179                return -EINVAL;
1180        }
1181
1182        return bridge->lm_set(lm, lm_base, aspace, cycle);
1183}
1184EXPORT_SYMBOL(vme_lm_set);
1185
1186int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1187        u32 *aspace, u32 *cycle)
1188{
1189        struct vme_bridge *bridge = find_bridge(resource);
1190        struct vme_lm_resource *lm;
1191
1192        if (resource->type != VME_LM) {
1193                printk(KERN_ERR "Not a Location Monitor resource\n");
1194                return -EINVAL;
1195        }
1196
1197        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1198
1199        if (bridge->lm_get == NULL) {
1200                printk(KERN_ERR "vme_lm_get not supported\n");
1201                return -EINVAL;
1202        }
1203
1204        return bridge->lm_get(lm, lm_base, aspace, cycle);
1205}
1206EXPORT_SYMBOL(vme_lm_get);
1207
1208int vme_lm_attach(struct vme_resource *resource, int monitor,
1209        void (*callback)(int))
1210{
1211        struct vme_bridge *bridge = find_bridge(resource);
1212        struct vme_lm_resource *lm;
1213
1214        if (resource->type != VME_LM) {
1215                printk(KERN_ERR "Not a Location Monitor resource\n");
1216                return -EINVAL;
1217        }
1218
1219        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1220
1221        if (bridge->lm_attach == NULL) {
1222                printk(KERN_ERR "vme_lm_attach not supported\n");
1223                return -EINVAL;
1224        }
1225
1226        return bridge->lm_attach(lm, monitor, callback);
1227}
1228EXPORT_SYMBOL(vme_lm_attach);
1229
1230int vme_lm_detach(struct vme_resource *resource, int monitor)
1231{
1232        struct vme_bridge *bridge = find_bridge(resource);
1233        struct vme_lm_resource *lm;
1234
1235        if (resource->type != VME_LM) {
1236                printk(KERN_ERR "Not a Location Monitor resource\n");
1237                return -EINVAL;
1238        }
1239
1240        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1241
1242        if (bridge->lm_detach == NULL) {
1243                printk(KERN_ERR "vme_lm_detach not supported\n");
1244                return -EINVAL;
1245        }
1246
1247        return bridge->lm_detach(lm, monitor);
1248}
1249EXPORT_SYMBOL(vme_lm_detach);
1250
1251void vme_lm_free(struct vme_resource *resource)
1252{
1253        struct vme_lm_resource *lm;
1254
1255        if (resource->type != VME_LM) {
1256                printk(KERN_ERR "Not a Location Monitor resource\n");
1257                return;
1258        }
1259
1260        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1261
1262        mutex_lock(&lm->mtx);
1263
1264        /* XXX
1265         * Check to see that there aren't any callbacks still attached, if
1266         * there are we should probably be detaching them!
1267         */
1268
1269        lm->locked = 0;
1270
1271        mutex_unlock(&lm->mtx);
1272
1273        kfree(resource);
1274}
1275EXPORT_SYMBOL(vme_lm_free);
1276
1277int vme_slot_get(struct vme_dev *vdev)
1278{
1279        struct vme_bridge *bridge;
1280
1281        bridge = vdev->bridge;
1282        if (bridge == NULL) {
1283                printk(KERN_ERR "Can't find VME bus\n");
1284                return -EINVAL;
1285        }
1286
1287        if (bridge->slot_get == NULL) {
1288                printk(KERN_WARNING "vme_slot_get not supported\n");
1289                return -EINVAL;
1290        }
1291
1292        return bridge->slot_get(bridge);
1293}
1294EXPORT_SYMBOL(vme_slot_get);
1295
1296
1297/* - Bridge Registration --------------------------------------------------- */
1298
1299static void vme_dev_release(struct device *dev)
1300{
1301        kfree(dev_to_vme_dev(dev));
1302}
1303
1304int vme_register_bridge(struct vme_bridge *bridge)
1305{
1306        int i;
1307        int ret = -1;
1308
1309        mutex_lock(&vme_buses_lock);
1310        for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1311                if ((vme_bus_numbers & (1 << i)) == 0) {
1312                        vme_bus_numbers |= (1 << i);
1313                        bridge->num = i;
1314                        INIT_LIST_HEAD(&bridge->devices);
1315                        list_add_tail(&bridge->bus_list, &vme_bus_list);
1316                        ret = 0;
1317                        break;
1318                }
1319        }
1320        mutex_unlock(&vme_buses_lock);
1321
1322        return ret;
1323}
1324EXPORT_SYMBOL(vme_register_bridge);
1325
1326void vme_unregister_bridge(struct vme_bridge *bridge)
1327{
1328        struct vme_dev *vdev;
1329        struct vme_dev *tmp;
1330
1331        mutex_lock(&vme_buses_lock);
1332        vme_bus_numbers &= ~(1 << bridge->num);
1333        list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1334                list_del(&vdev->drv_list);
1335                list_del(&vdev->bridge_list);
1336                device_unregister(&vdev->dev);
1337        }
1338        list_del(&bridge->bus_list);
1339        mutex_unlock(&vme_buses_lock);
1340}
1341EXPORT_SYMBOL(vme_unregister_bridge);
1342
1343/* - Driver Registration --------------------------------------------------- */
1344
1345static int __vme_register_driver_bus(struct vme_driver *drv,
1346        struct vme_bridge *bridge, unsigned int ndevs)
1347{
1348        int err;
1349        unsigned int i;
1350        struct vme_dev *vdev;
1351        struct vme_dev *tmp;
1352
1353        for (i = 0; i < ndevs; i++) {
1354                vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1355                if (!vdev) {
1356                        err = -ENOMEM;
1357                        goto err_devalloc;
1358                }
1359                vdev->num = i;
1360                vdev->bridge = bridge;
1361                vdev->dev.platform_data = drv;
1362                vdev->dev.release = vme_dev_release;
1363                vdev->dev.parent = bridge->parent;
1364                vdev->dev.bus = &vme_bus_type;
1365                dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1366                        vdev->num);
1367
1368                err = device_register(&vdev->dev);
1369                if (err)
1370                        goto err_reg;
1371
1372                if (vdev->dev.platform_data) {
1373                        list_add_tail(&vdev->drv_list, &drv->devices);
1374                        list_add_tail(&vdev->bridge_list, &bridge->devices);
1375                } else
1376                        device_unregister(&vdev->dev);
1377        }
1378        return 0;
1379
1380err_reg:
1381        put_device(&vdev->dev);
1382        kfree(vdev);
1383err_devalloc:
1384        list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1385                list_del(&vdev->drv_list);
1386                list_del(&vdev->bridge_list);
1387                device_unregister(&vdev->dev);
1388        }
1389        return err;
1390}
1391
1392static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1393{
1394        struct vme_bridge *bridge;
1395        int err = 0;
1396
1397        mutex_lock(&vme_buses_lock);
1398        list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1399                /*
1400                 * This cannot cause trouble as we already have vme_buses_lock
1401                 * and if the bridge is removed, it will have to go through
1402                 * vme_unregister_bridge() to do it (which calls remove() on
1403                 * the bridge which in turn tries to acquire vme_buses_lock and
1404                 * will have to wait).
1405                 */
1406                err = __vme_register_driver_bus(drv, bridge, ndevs);
1407                if (err)
1408                        break;
1409        }
1410        mutex_unlock(&vme_buses_lock);
1411        return err;
1412}
1413
1414int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1415{
1416        int err;
1417
1418        drv->driver.name = drv->name;
1419        drv->driver.bus = &vme_bus_type;
1420        INIT_LIST_HEAD(&drv->devices);
1421
1422        err = driver_register(&drv->driver);
1423        if (err)
1424                return err;
1425
1426        err = __vme_register_driver(drv, ndevs);
1427        if (err)
1428                driver_unregister(&drv->driver);
1429
1430        return err;
1431}
1432EXPORT_SYMBOL(vme_register_driver);
1433
1434void vme_unregister_driver(struct vme_driver *drv)
1435{
1436        struct vme_dev *dev, *dev_tmp;
1437
1438        mutex_lock(&vme_buses_lock);
1439        list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1440                list_del(&dev->drv_list);
1441                list_del(&dev->bridge_list);
1442                device_unregister(&dev->dev);
1443        }
1444        mutex_unlock(&vme_buses_lock);
1445
1446        driver_unregister(&drv->driver);
1447}
1448EXPORT_SYMBOL(vme_unregister_driver);
1449
1450/* - Bus Registration ------------------------------------------------------ */
1451
1452static int vme_bus_match(struct device *dev, struct device_driver *drv)
1453{
1454        struct vme_driver *vme_drv;
1455
1456        vme_drv = container_of(drv, struct vme_driver, driver);
1457
1458        if (dev->platform_data == vme_drv) {
1459                struct vme_dev *vdev = dev_to_vme_dev(dev);
1460
1461                if (vme_drv->match && vme_drv->match(vdev))
1462                        return 1;
1463
1464                dev->platform_data = NULL;
1465        }
1466        return 0;
1467}
1468
1469static int vme_bus_probe(struct device *dev)
1470{
1471        int retval = -ENODEV;
1472        struct vme_driver *driver;
1473        struct vme_dev *vdev = dev_to_vme_dev(dev);
1474
1475        driver = dev->platform_data;
1476
1477        if (driver->probe != NULL)
1478                retval = driver->probe(vdev);
1479
1480        return retval;
1481}
1482
1483static int vme_bus_remove(struct device *dev)
1484{
1485        int retval = -ENODEV;
1486        struct vme_driver *driver;
1487        struct vme_dev *vdev = dev_to_vme_dev(dev);
1488
1489        driver = dev->platform_data;
1490
1491        if (driver->remove != NULL)
1492                retval = driver->remove(vdev);
1493
1494        return retval;
1495}
1496
1497struct bus_type vme_bus_type = {
1498        .name = "vme",
1499        .match = vme_bus_match,
1500        .probe = vme_bus_probe,
1501        .remove = vme_bus_remove,
1502};
1503EXPORT_SYMBOL(vme_bus_type);
1504
1505static int __init vme_init(void)
1506{
1507        return bus_register(&vme_bus_type);
1508}
1509
1510static void __exit vme_exit(void)
1511{
1512        bus_unregister(&vme_bus_type);
1513}
1514
1515MODULE_DESCRIPTION("VME bridge driver framework");
1516MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1517MODULE_LICENSE("GPL");
1518
1519module_init(vme_init);
1520module_exit(vme_exit);
1521
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.