linux/drivers/firewire/fw-cdev.c
<<
>>
Prefs
   1/*
   2 * Char device for device raw access
   3 *
   4 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/kernel.h>
  23#include <linux/wait.h>
  24#include <linux/errno.h>
  25#include <linux/device.h>
  26#include <linux/vmalloc.h>
  27#include <linux/poll.h>
  28#include <linux/preempt.h>
  29#include <linux/time.h>
  30#include <linux/delay.h>
  31#include <linux/mm.h>
  32#include <linux/idr.h>
  33#include <linux/compat.h>
  34#include <linux/firewire-cdev.h>
  35#include <asm/system.h>
  36#include <asm/uaccess.h>
  37#include "fw-transaction.h"
  38#include "fw-topology.h"
  39#include "fw-device.h"
  40
  41struct client;
  42struct client_resource {
  43        struct list_head link;
  44        void (*release)(struct client *client, struct client_resource *r);
  45        u32 handle;
  46};
  47
  48/*
  49 * dequeue_event() just kfree()'s the event, so the event has to be
  50 * the first field in the struct.
  51 */
  52
  53struct event {
  54        struct { void *data; size_t size; } v[2];
  55        struct list_head link;
  56};
  57
  58struct bus_reset {
  59        struct event event;
  60        struct fw_cdev_event_bus_reset reset;
  61};
  62
  63struct response {
  64        struct event event;
  65        struct fw_transaction transaction;
  66        struct client *client;
  67        struct client_resource resource;
  68        struct fw_cdev_event_response response;
  69};
  70
  71struct iso_interrupt {
  72        struct event event;
  73        struct fw_cdev_event_iso_interrupt interrupt;
  74};
  75
  76struct client {
  77        u32 version;
  78        struct fw_device *device;
  79        spinlock_t lock;
  80        u32 resource_handle;
  81        struct list_head resource_list;
  82        struct list_head event_list;
  83        wait_queue_head_t wait;
  84        u64 bus_reset_closure;
  85
  86        struct fw_iso_context *iso_context;
  87        u64 iso_closure;
  88        struct fw_iso_buffer buffer;
  89        unsigned long vm_start;
  90
  91        struct list_head link;
  92};
  93
  94static inline void __user *
  95u64_to_uptr(__u64 value)
  96{
  97        return (void __user *)(unsigned long)value;
  98}
  99
 100static inline __u64
 101uptr_to_u64(void __user *ptr)
 102{
 103        return (__u64)(unsigned long)ptr;
 104}
 105
 106static int fw_device_op_open(struct inode *inode, struct file *file)
 107{
 108        struct fw_device *device;
 109        struct client *client;
 110        unsigned long flags;
 111
 112        device = fw_device_get_by_devt(inode->i_rdev);
 113        if (device == NULL)
 114                return -ENODEV;
 115
 116        if (fw_device_is_shutdown(device)) {
 117                fw_device_put(device);
 118                return -ENODEV;
 119        }
 120
 121        client = kzalloc(sizeof(*client), GFP_KERNEL);
 122        if (client == NULL) {
 123                fw_device_put(device);
 124                return -ENOMEM;
 125        }
 126
 127        client->device = device;
 128        INIT_LIST_HEAD(&client->event_list);
 129        INIT_LIST_HEAD(&client->resource_list);
 130        spin_lock_init(&client->lock);
 131        init_waitqueue_head(&client->wait);
 132
 133        file->private_data = client;
 134
 135        spin_lock_irqsave(&device->card->lock, flags);
 136        list_add_tail(&client->link, &device->client_list);
 137        spin_unlock_irqrestore(&device->card->lock, flags);
 138
 139        return 0;
 140}
 141
 142static void queue_event(struct client *client, struct event *event,
 143                        void *data0, size_t size0, void *data1, size_t size1)
 144{
 145        unsigned long flags;
 146
 147        event->v[0].data = data0;
 148        event->v[0].size = size0;
 149        event->v[1].data = data1;
 150        event->v[1].size = size1;
 151
 152        spin_lock_irqsave(&client->lock, flags);
 153        list_add_tail(&event->link, &client->event_list);
 154        spin_unlock_irqrestore(&client->lock, flags);
 155
 156        wake_up_interruptible(&client->wait);
 157}
 158
 159static int
 160dequeue_event(struct client *client, char __user *buffer, size_t count)
 161{
 162        unsigned long flags;
 163        struct event *event;
 164        size_t size, total;
 165        int i, retval;
 166
 167        retval = wait_event_interruptible(client->wait,
 168                                          !list_empty(&client->event_list) ||
 169                                          fw_device_is_shutdown(client->device));
 170        if (retval < 0)
 171                return retval;
 172
 173        if (list_empty(&client->event_list) &&
 174                       fw_device_is_shutdown(client->device))
 175                return -ENODEV;
 176
 177        spin_lock_irqsave(&client->lock, flags);
 178        event = container_of(client->event_list.next, struct event, link);
 179        list_del(&event->link);
 180        spin_unlock_irqrestore(&client->lock, flags);
 181
 182        total = 0;
 183        for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 184                size = min(event->v[i].size, count - total);
 185                if (copy_to_user(buffer + total, event->v[i].data, size)) {
 186                        retval = -EFAULT;
 187                        goto out;
 188                }
 189                total += size;
 190        }
 191        retval = total;
 192
 193 out:
 194        kfree(event);
 195
 196        return retval;
 197}
 198
 199static ssize_t
 200fw_device_op_read(struct file *file,
 201                  char __user *buffer, size_t count, loff_t *offset)
 202{
 203        struct client *client = file->private_data;
 204
 205        return dequeue_event(client, buffer, count);
 206}
 207
 208/* caller must hold card->lock so that node pointers can be dereferenced here */
 209static void
 210fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 211                     struct client *client)
 212{
 213        struct fw_card *card = client->device->card;
 214
 215        event->closure       = client->bus_reset_closure;
 216        event->type          = FW_CDEV_EVENT_BUS_RESET;
 217        event->generation    = client->device->generation;
 218        event->node_id       = client->device->node_id;
 219        event->local_node_id = card->local_node->node_id;
 220        event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
 221        event->irm_node_id   = card->irm_node->node_id;
 222        event->root_node_id  = card->root_node->node_id;
 223}
 224
 225static void
 226for_each_client(struct fw_device *device,
 227                void (*callback)(struct client *client))
 228{
 229        struct fw_card *card = device->card;
 230        struct client *c;
 231        unsigned long flags;
 232
 233        spin_lock_irqsave(&card->lock, flags);
 234
 235        list_for_each_entry(c, &device->client_list, link)
 236                callback(c);
 237
 238        spin_unlock_irqrestore(&card->lock, flags);
 239}
 240
 241static void
 242queue_bus_reset_event(struct client *client)
 243{
 244        struct bus_reset *bus_reset;
 245
 246        bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
 247        if (bus_reset == NULL) {
 248                fw_notify("Out of memory when allocating bus reset event\n");
 249                return;
 250        }
 251
 252        fill_bus_reset_event(&bus_reset->reset, client);
 253
 254        queue_event(client, &bus_reset->event,
 255                    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
 256}
 257
 258void fw_device_cdev_update(struct fw_device *device)
 259{
 260        for_each_client(device, queue_bus_reset_event);
 261}
 262
 263static void wake_up_client(struct client *client)
 264{
 265        wake_up_interruptible(&client->wait);
 266}
 267
 268void fw_device_cdev_remove(struct fw_device *device)
 269{
 270        for_each_client(device, wake_up_client);
 271}
 272
 273static int ioctl_get_info(struct client *client, void *buffer)
 274{
 275        struct fw_cdev_get_info *get_info = buffer;
 276        struct fw_cdev_event_bus_reset bus_reset;
 277        struct fw_card *card = client->device->card;
 278        unsigned long ret = 0;
 279
 280        client->version = get_info->version;
 281        get_info->version = FW_CDEV_VERSION;
 282
 283        down_read(&fw_device_rwsem);
 284
 285        if (get_info->rom != 0) {
 286                void __user *uptr = u64_to_uptr(get_info->rom);
 287                size_t want = get_info->rom_length;
 288                size_t have = client->device->config_rom_length * 4;
 289
 290                ret = copy_to_user(uptr, client->device->config_rom,
 291                                   min(want, have));
 292        }
 293        get_info->rom_length = client->device->config_rom_length * 4;
 294
 295        up_read(&fw_device_rwsem);
 296
 297        if (ret != 0)
 298                return -EFAULT;
 299
 300        client->bus_reset_closure = get_info->bus_reset_closure;
 301        if (get_info->bus_reset != 0) {
 302                void __user *uptr = u64_to_uptr(get_info->bus_reset);
 303                unsigned long flags;
 304
 305                spin_lock_irqsave(&card->lock, flags);
 306                fill_bus_reset_event(&bus_reset, client);
 307                spin_unlock_irqrestore(&card->lock, flags);
 308
 309                if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
 310                        return -EFAULT;
 311        }
 312
 313        get_info->card = card->index;
 314
 315        return 0;
 316}
 317
 318static void
 319add_client_resource(struct client *client, struct client_resource *resource)
 320{
 321        unsigned long flags;
 322
 323        spin_lock_irqsave(&client->lock, flags);
 324        list_add_tail(&resource->link, &client->resource_list);
 325        resource->handle = client->resource_handle++;
 326        spin_unlock_irqrestore(&client->lock, flags);
 327}
 328
 329static int
 330release_client_resource(struct client *client, u32 handle,
 331                        struct client_resource **resource)
 332{
 333        struct client_resource *r;
 334        unsigned long flags;
 335
 336        spin_lock_irqsave(&client->lock, flags);
 337        list_for_each_entry(r, &client->resource_list, link) {
 338                if (r->handle == handle) {
 339                        list_del(&r->link);
 340                        break;
 341                }
 342        }
 343        spin_unlock_irqrestore(&client->lock, flags);
 344
 345        if (&r->link == &client->resource_list)
 346                return -EINVAL;
 347
 348        if (resource)
 349                *resource = r;
 350        else
 351                r->release(client, r);
 352
 353        return 0;
 354}
 355
 356static void
 357release_transaction(struct client *client, struct client_resource *resource)
 358{
 359        struct response *response =
 360                container_of(resource, struct response, resource);
 361
 362        fw_cancel_transaction(client->device->card, &response->transaction);
 363}
 364
 365static void
 366complete_transaction(struct fw_card *card, int rcode,
 367                     void *payload, size_t length, void *data)
 368{
 369        struct response *response = data;
 370        struct client *client = response->client;
 371        unsigned long flags;
 372        struct fw_cdev_event_response *r = &response->response;
 373
 374        if (length < r->length)
 375                r->length = length;
 376        if (rcode == RCODE_COMPLETE)
 377                memcpy(r->data, payload, r->length);
 378
 379        spin_lock_irqsave(&client->lock, flags);
 380        list_del(&response->resource.link);
 381        spin_unlock_irqrestore(&client->lock, flags);
 382
 383        r->type   = FW_CDEV_EVENT_RESPONSE;
 384        r->rcode  = rcode;
 385
 386        /*
 387         * In the case that sizeof(*r) doesn't align with the position of the
 388         * data, and the read is short, preserve an extra copy of the data
 389         * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 390         * for short reads and some apps depended on it, this is both safe
 391         * and prudent for compatibility.
 392         */
 393        if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
 394                queue_event(client, &response->event, r, sizeof(*r),
 395                            r->data, r->length);
 396        else
 397                queue_event(client, &response->event, r, sizeof(*r) + r->length,
 398                            NULL, 0);
 399}
 400
 401static int ioctl_send_request(struct client *client, void *buffer)
 402{
 403        struct fw_device *device = client->device;
 404        struct fw_cdev_send_request *request = buffer;
 405        struct response *response;
 406
 407        /* What is the biggest size we'll accept, really? */
 408        if (request->length > 4096)
 409                return -EINVAL;
 410
 411        response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
 412        if (response == NULL)
 413                return -ENOMEM;
 414
 415        response->client = client;
 416        response->response.length = request->length;
 417        response->response.closure = request->closure;
 418
 419        if (request->data &&
 420            copy_from_user(response->response.data,
 421                           u64_to_uptr(request->data), request->length)) {
 422                kfree(response);
 423                return -EFAULT;
 424        }
 425
 426        response->resource.release = release_transaction;
 427        add_client_resource(client, &response->resource);
 428
 429        fw_send_request(device->card, &response->transaction,
 430                        request->tcode & 0x1f,
 431                        device->node->node_id,
 432                        request->generation,
 433                        device->max_speed,
 434                        request->offset,
 435                        response->response.data, request->length,
 436                        complete_transaction, response);
 437
 438        if (request->data)
 439                return sizeof(request) + request->length;
 440        else
 441                return sizeof(request);
 442}
 443
 444struct address_handler {
 445        struct fw_address_handler handler;
 446        __u64 closure;
 447        struct client *client;
 448        struct client_resource resource;
 449};
 450
 451struct request {
 452        struct fw_request *request;
 453        void *data;
 454        size_t length;
 455        struct client_resource resource;
 456};
 457
 458struct request_event {
 459        struct event event;
 460        struct fw_cdev_event_request request;
 461};
 462
 463static void
 464release_request(struct client *client, struct client_resource *resource)
 465{
 466        struct request *request =
 467                container_of(resource, struct request, resource);
 468
 469        fw_send_response(client->device->card, request->request,
 470                         RCODE_CONFLICT_ERROR);
 471        kfree(request);
 472}
 473
 474static void
 475handle_request(struct fw_card *card, struct fw_request *r,
 476               int tcode, int destination, int source,
 477               int generation, int speed,
 478               unsigned long long offset,
 479               void *payload, size_t length, void *callback_data)
 480{
 481        struct address_handler *handler = callback_data;
 482        struct request *request;
 483        struct request_event *e;
 484        struct client *client = handler->client;
 485
 486        request = kmalloc(sizeof(*request), GFP_ATOMIC);
 487        e = kmalloc(sizeof(*e), GFP_ATOMIC);
 488        if (request == NULL || e == NULL) {
 489                kfree(request);
 490                kfree(e);
 491                fw_send_response(card, r, RCODE_CONFLICT_ERROR);
 492                return;
 493        }
 494
 495        request->request = r;
 496        request->data    = payload;
 497        request->length  = length;
 498
 499        request->resource.release = release_request;
 500        add_client_resource(client, &request->resource);
 501
 502        e->request.type    = FW_CDEV_EVENT_REQUEST;
 503        e->request.tcode   = tcode;
 504        e->request.offset  = offset;
 505        e->request.length  = length;
 506        e->request.handle  = request->resource.handle;
 507        e->request.closure = handler->closure;
 508
 509        queue_event(client, &e->event,
 510                    &e->request, sizeof(e->request), payload, length);
 511}
 512
 513static void
 514release_address_handler(struct client *client,
 515                        struct client_resource *resource)
 516{
 517        struct address_handler *handler =
 518                container_of(resource, struct address_handler, resource);
 519
 520        fw_core_remove_address_handler(&handler->handler);
 521        kfree(handler);
 522}
 523
 524static int ioctl_allocate(struct client *client, void *buffer)
 525{
 526        struct fw_cdev_allocate *request = buffer;
 527        struct address_handler *handler;
 528        struct fw_address_region region;
 529
 530        handler = kmalloc(sizeof(*handler), GFP_KERNEL);
 531        if (handler == NULL)
 532                return -ENOMEM;
 533
 534        region.start = request->offset;
 535        region.end = request->offset + request->length;
 536        handler->handler.length = request->length;
 537        handler->handler.address_callback = handle_request;
 538        handler->handler.callback_data = handler;
 539        handler->closure = request->closure;
 540        handler->client = client;
 541
 542        if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
 543                kfree(handler);
 544                return -EBUSY;
 545        }
 546
 547        handler->resource.release = release_address_handler;
 548        add_client_resource(client, &handler->resource);
 549        request->handle = handler->resource.handle;
 550
 551        return 0;
 552}
 553
 554static int ioctl_deallocate(struct client *client, void *buffer)
 555{
 556        struct fw_cdev_deallocate *request = buffer;
 557
 558        return release_client_resource(client, request->handle, NULL);
 559}
 560
 561static int ioctl_send_response(struct client *client, void *buffer)
 562{
 563        struct fw_cdev_send_response *request = buffer;
 564        struct client_resource *resource;
 565        struct request *r;
 566
 567        if (release_client_resource(client, request->handle, &resource) < 0)
 568                return -EINVAL;
 569        r = container_of(resource, struct request, resource);
 570        if (request->length < r->length)
 571                r->length = request->length;
 572        if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
 573                return -EFAULT;
 574
 575        fw_send_response(client->device->card, r->request, request->rcode);
 576        kfree(r);
 577
 578        return 0;
 579}
 580
 581static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
 582{
 583        struct fw_cdev_initiate_bus_reset *request = buffer;
 584        int short_reset;
 585
 586        short_reset = (request->type == FW_CDEV_SHORT_RESET);
 587
 588        return fw_core_initiate_bus_reset(client->device->card, short_reset);
 589}
 590
 591struct descriptor {
 592        struct fw_descriptor d;
 593        struct client_resource resource;
 594        u32 data[0];
 595};
 596
 597static void release_descriptor(struct client *client,
 598                               struct client_resource *resource)
 599{
 600        struct descriptor *descriptor =
 601                container_of(resource, struct descriptor, resource);
 602
 603        fw_core_remove_descriptor(&descriptor->d);
 604        kfree(descriptor);
 605}
 606
 607static int ioctl_add_descriptor(struct client *client, void *buffer)
 608{
 609        struct fw_cdev_add_descriptor *request = buffer;
 610        struct descriptor *descriptor;
 611        int retval;
 612
 613        if (request->length > 256)
 614                return -EINVAL;
 615
 616        descriptor =
 617                kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
 618        if (descriptor == NULL)
 619                return -ENOMEM;
 620
 621        if (copy_from_user(descriptor->data,
 622                           u64_to_uptr(request->data), request->length * 4)) {
 623                kfree(descriptor);
 624                return -EFAULT;
 625        }
 626
 627        descriptor->d.length = request->length;
 628        descriptor->d.immediate = request->immediate;
 629        descriptor->d.key = request->key;
 630        descriptor->d.data = descriptor->data;
 631
 632        retval = fw_core_add_descriptor(&descriptor->d);
 633        if (retval < 0) {
 634                kfree(descriptor);
 635                return retval;
 636        }
 637
 638        descriptor->resource.release = release_descriptor;
 639        add_client_resource(client, &descriptor->resource);
 640        request->handle = descriptor->resource.handle;
 641
 642        return 0;
 643}
 644
 645static int ioctl_remove_descriptor(struct client *client, void *buffer)
 646{
 647        struct fw_cdev_remove_descriptor *request = buffer;
 648
 649        return release_client_resource(client, request->handle, NULL);
 650}
 651
 652static void
 653iso_callback(struct fw_iso_context *context, u32 cycle,
 654             size_t header_length, void *header, void *data)
 655{
 656        struct client *client = data;
 657        struct iso_interrupt *irq;
 658
 659        irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
 660        if (irq == NULL)
 661                return;
 662
 663        irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 664        irq->interrupt.closure   = client->iso_closure;
 665        irq->interrupt.cycle     = cycle;
 666        irq->interrupt.header_length = header_length;
 667        memcpy(irq->interrupt.header, header, header_length);
 668        queue_event(client, &irq->event, &irq->interrupt,
 669                    sizeof(irq->interrupt) + header_length, NULL, 0);
 670}
 671
 672static int ioctl_create_iso_context(struct client *client, void *buffer)
 673{
 674        struct fw_cdev_create_iso_context *request = buffer;
 675        struct fw_iso_context *context;
 676
 677        /* We only support one context at this time. */
 678        if (client->iso_context != NULL)
 679                return -EBUSY;
 680
 681        if (request->channel > 63)
 682                return -EINVAL;
 683
 684        switch (request->type) {
 685        case FW_ISO_CONTEXT_RECEIVE:
 686                if (request->header_size < 4 || (request->header_size & 3))
 687                        return -EINVAL;
 688
 689                break;
 690
 691        case FW_ISO_CONTEXT_TRANSMIT:
 692                if (request->speed > SCODE_3200)
 693                        return -EINVAL;
 694
 695                break;
 696
 697        default:
 698                return -EINVAL;
 699        }
 700
 701        context =  fw_iso_context_create(client->device->card,
 702                                         request->type,
 703                                         request->channel,
 704                                         request->speed,
 705                                         request->header_size,
 706                                         iso_callback, client);
 707        if (IS_ERR(context))
 708                return PTR_ERR(context);
 709
 710        client->iso_closure = request->closure;
 711        client->iso_context = context;
 712
 713        /* We only support one context at this time. */
 714        request->handle = 0;
 715
 716        return 0;
 717}
 718
 719/* Macros for decoding the iso packet control header. */
 720#define GET_PAYLOAD_LENGTH(v)   ((v) & 0xffff)
 721#define GET_INTERRUPT(v)        (((v) >> 16) & 0x01)
 722#define GET_SKIP(v)             (((v) >> 17) & 0x01)
 723#define GET_TAG(v)              (((v) >> 18) & 0x03)
 724#define GET_SY(v)               (((v) >> 20) & 0x0f)
 725#define GET_HEADER_LENGTH(v)    (((v) >> 24) & 0xff)
 726
 727static int ioctl_queue_iso(struct client *client, void *buffer)
 728{
 729        struct fw_cdev_queue_iso *request = buffer;
 730        struct fw_cdev_iso_packet __user *p, *end, *next;
 731        struct fw_iso_context *ctx = client->iso_context;
 732        unsigned long payload, buffer_end, header_length;
 733        u32 control;
 734        int count;
 735        struct {
 736                struct fw_iso_packet packet;
 737                u8 header[256];
 738        } u;
 739
 740        if (ctx == NULL || request->handle != 0)
 741                return -EINVAL;
 742
 743        /*
 744         * If the user passes a non-NULL data pointer, has mmap()'ed
 745         * the iso buffer, and the pointer points inside the buffer,
 746         * we setup the payload pointers accordingly.  Otherwise we
 747         * set them both to 0, which will still let packets with
 748         * payload_length == 0 through.  In other words, if no packets
 749         * use the indirect payload, the iso buffer need not be mapped
 750         * and the request->data pointer is ignored.
 751         */
 752
 753        payload = (unsigned long)request->data - client->vm_start;
 754        buffer_end = client->buffer.page_count << PAGE_SHIFT;
 755        if (request->data == 0 || client->buffer.pages == NULL ||
 756            payload >= buffer_end) {
 757                payload = 0;
 758                buffer_end = 0;
 759        }
 760
 761        p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
 762
 763        if (!access_ok(VERIFY_READ, p, request->size))
 764                return -EFAULT;
 765
 766        end = (void __user *)p + request->size;
 767        count = 0;
 768        while (p < end) {
 769                if (get_user(control, &p->control))
 770                        return -EFAULT;
 771                u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
 772                u.packet.interrupt = GET_INTERRUPT(control);
 773                u.packet.skip = GET_SKIP(control);
 774                u.packet.tag = GET_TAG(control);
 775                u.packet.sy = GET_SY(control);
 776                u.packet.header_length = GET_HEADER_LENGTH(control);
 777
 778                if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
 779                        header_length = u.packet.header_length;
 780                } else {
 781                        /*
 782                         * We require that header_length is a multiple of
 783                         * the fixed header size, ctx->header_size.
 784                         */
 785                        if (ctx->header_size == 0) {
 786                                if (u.packet.header_length > 0)
 787                                        return -EINVAL;
 788                        } else if (u.packet.header_length % ctx->header_size != 0) {
 789                                return -EINVAL;
 790                        }
 791                        header_length = 0;
 792                }
 793
 794                next = (struct fw_cdev_iso_packet __user *)
 795                        &p->header[header_length / 4];
 796                if (next > end)
 797                        return -EINVAL;
 798                if (__copy_from_user
 799                    (u.packet.header, p->header, header_length))
 800                        return -EFAULT;
 801                if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
 802                    u.packet.header_length + u.packet.payload_length > 0)
 803                        return -EINVAL;
 804                if (payload + u.packet.payload_length > buffer_end)
 805                        return -EINVAL;
 806
 807                if (fw_iso_context_queue(ctx, &u.packet,
 808                                         &client->buffer, payload))
 809                        break;
 810
 811                p = next;
 812                payload += u.packet.payload_length;
 813                count++;
 814        }
 815
 816        request->size    -= uptr_to_u64(p) - request->packets;
 817        request->packets  = uptr_to_u64(p);
 818        request->data     = client->vm_start + payload;
 819
 820        return count;
 821}
 822
 823static int ioctl_start_iso(struct client *client, void *buffer)
 824{
 825        struct fw_cdev_start_iso *request = buffer;
 826
 827        if (client->iso_context == NULL || request->handle != 0)
 828                return -EINVAL;
 829
 830        if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
 831                if (request->tags == 0 || request->tags > 15)
 832                        return -EINVAL;
 833
 834                if (request->sync > 15)
 835                        return -EINVAL;
 836        }
 837
 838        return fw_iso_context_start(client->iso_context, request->cycle,
 839                                    request->sync, request->tags);
 840}
 841
 842static int ioctl_stop_iso(struct client *client, void *buffer)
 843{
 844        struct fw_cdev_stop_iso *request = buffer;
 845
 846        if (client->iso_context == NULL || request->handle != 0)
 847                return -EINVAL;
 848
 849        return fw_iso_context_stop(client->iso_context);
 850}
 851
 852static int ioctl_get_cycle_timer(struct client *client, void *buffer)
 853{
 854        struct fw_cdev_get_cycle_timer *request = buffer;
 855        struct fw_card *card = client->device->card;
 856        unsigned long long bus_time;
 857        struct timeval tv;
 858        unsigned long flags;
 859
 860        preempt_disable();
 861        local_irq_save(flags);
 862
 863        bus_time = card->driver->get_bus_time(card);
 864        do_gettimeofday(&tv);
 865
 866        local_irq_restore(flags);
 867        preempt_enable();
 868
 869        request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
 870        request->cycle_timer = bus_time & 0xffffffff;
 871        return 0;
 872}
 873
 874static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
 875        ioctl_get_info,
 876        ioctl_send_request,
 877        ioctl_allocate,
 878        ioctl_deallocate,
 879        ioctl_send_response,
 880        ioctl_initiate_bus_reset,
 881        ioctl_add_descriptor,
 882        ioctl_remove_descriptor,
 883        ioctl_create_iso_context,
 884        ioctl_queue_iso,
 885        ioctl_start_iso,
 886        ioctl_stop_iso,
 887        ioctl_get_cycle_timer,
 888};
 889
 890static int
 891dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
 892{
 893        char buffer[256];
 894        int retval;
 895
 896        if (_IOC_TYPE(cmd) != '#' ||
 897            _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
 898                return -EINVAL;
 899
 900        if (_IOC_DIR(cmd) & _IOC_WRITE) {
 901                if (_IOC_SIZE(cmd) > sizeof(buffer) ||
 902                    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
 903                        return -EFAULT;
 904        }
 905
 906        retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
 907        if (retval < 0)
 908                return retval;
 909
 910        if (_IOC_DIR(cmd) & _IOC_READ) {
 911                if (_IOC_SIZE(cmd) > sizeof(buffer) ||
 912                    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
 913                        return -EFAULT;
 914        }
 915
 916        return retval;
 917}
 918
 919static long
 920fw_device_op_ioctl(struct file *file,
 921                   unsigned int cmd, unsigned long arg)
 922{
 923        struct client *client = file->private_data;
 924
 925        if (fw_device_is_shutdown(client->device))
 926                return -ENODEV;
 927
 928        return dispatch_ioctl(client, cmd, (void __user *) arg);
 929}
 930
 931#ifdef CONFIG_COMPAT
 932static long
 933fw_device_op_compat_ioctl(struct file *file,
 934                          unsigned int cmd, unsigned long arg)
 935{
 936        struct client *client = file->private_data;
 937
 938        if (fw_device_is_shutdown(client->device))
 939                return -ENODEV;
 940
 941        return dispatch_ioctl(client, cmd, compat_ptr(arg));
 942}
 943#endif
 944
 945static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
 946{
 947        struct client *client = file->private_data;
 948        enum dma_data_direction direction;
 949        unsigned long size;
 950        int page_count, retval;
 951
 952        if (fw_device_is_shutdown(client->device))
 953                return -ENODEV;
 954
 955        /* FIXME: We could support multiple buffers, but we don't. */
 956        if (client->buffer.pages != NULL)
 957                return -EBUSY;
 958
 959        if (!(vma->vm_flags & VM_SHARED))
 960                return -EINVAL;
 961
 962        if (vma->vm_start & ~PAGE_MASK)
 963                return -EINVAL;
 964
 965        client->vm_start = vma->vm_start;
 966        size = vma->vm_end - vma->vm_start;
 967        page_count = size >> PAGE_SHIFT;
 968        if (size & ~PAGE_MASK)
 969                return -EINVAL;
 970
 971        if (vma->vm_flags & VM_WRITE)
 972                direction = DMA_TO_DEVICE;
 973        else
 974                direction = DMA_FROM_DEVICE;
 975
 976        retval = fw_iso_buffer_init(&client->buffer, client->device->card,
 977                                    page_count, direction);
 978        if (retval < 0)
 979                return retval;
 980
 981        retval = fw_iso_buffer_map(&client->buffer, vma);
 982        if (retval < 0)
 983                fw_iso_buffer_destroy(&client->buffer, client->device->card);
 984
 985        return retval;
 986}
 987
 988static int fw_device_op_release(struct inode *inode, struct file *file)
 989{
 990        struct client *client = file->private_data;
 991        struct event *e, *next_e;
 992        struct client_resource *r, *next_r;
 993        unsigned long flags;
 994
 995        if (client->buffer.pages)
 996                fw_iso_buffer_destroy(&client->buffer, client->device->card);
 997
 998        if (client->iso_context)
 999                fw_iso_context_destroy(client->iso_context);
1000
1001        list_for_each_entry_safe(r, next_r, &client->resource_list, link)
1002                r->release(client, r);
1003
1004        /*
1005         * FIXME: We should wait for the async tasklets to stop
1006         * running before freeing the memory.
1007         */
1008
1009        list_for_each_entry_safe(e, next_e, &client->event_list, link)
1010                kfree(e);
1011
1012        spin_lock_irqsave(&client->device->card->lock, flags);
1013        list_del(&client->link);
1014        spin_unlock_irqrestore(&client->device->card->lock, flags);
1015
1016        fw_device_put(client->device);
1017        kfree(client);
1018
1019        return 0;
1020}
1021
1022static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1023{
1024        struct client *client = file->private_data;
1025        unsigned int mask = 0;
1026
1027        poll_wait(file, &client->wait, pt);
1028
1029        if (fw_device_is_shutdown(client->device))
1030                mask |= POLLHUP | POLLERR;
1031        if (!list_empty(&client->event_list))
1032                mask |= POLLIN | POLLRDNORM;
1033
1034        return mask;
1035}
1036
1037const struct file_operations fw_device_ops = {
1038        .owner          = THIS_MODULE,
1039        .open           = fw_device_op_open,
1040        .read           = fw_device_op_read,
1041        .unlocked_ioctl = fw_device_op_ioctl,
1042        .poll           = fw_device_op_poll,
1043        .release        = fw_device_op_release,
1044        .mmap           = fw_device_op_mmap,
1045
1046#ifdef CONFIG_COMPAT
1047        .compat_ioctl   = fw_device_op_compat_ioctl,
1048#endif
1049};
1050
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.