linux/drivers/iio/industrialio-event.c
<<
>>
Prefs
   1/* Industrial I/O event handling
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * Based on elements of hwmon and input subsystems.
  10 */
  11
  12#include <linux/anon_inodes.h>
  13#include <linux/device.h>
  14#include <linux/fs.h>
  15#include <linux/kernel.h>
  16#include <linux/kfifo.h>
  17#include <linux/module.h>
  18#include <linux/poll.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/uaccess.h>
  22#include <linux/wait.h>
  23#include <linux/iio/iio.h>
  24#include "iio_core.h"
  25#include <linux/iio/sysfs.h>
  26#include <linux/iio/events.h>
  27
  28/**
  29 * struct iio_event_interface - chrdev interface for an event line
  30 * @wait:               wait queue to allow blocking reads of events
  31 * @det_events:         list of detected events
  32 * @dev_attr_list:      list of event interface sysfs attribute
  33 * @flags:              file operations related flags including busy flag.
  34 * @group:              event interface sysfs attribute group
  35 */
  36struct iio_event_interface {
  37        wait_queue_head_t       wait;
  38        DECLARE_KFIFO(det_events, struct iio_event_data, 16);
  39
  40        struct list_head        dev_attr_list;
  41        unsigned long           flags;
  42        struct attribute_group  group;
  43};
  44
  45int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
  46{
  47        struct iio_event_interface *ev_int = indio_dev->event_interface;
  48        struct iio_event_data ev;
  49        unsigned long flags;
  50        int copied;
  51
  52        /* Does anyone care? */
  53        spin_lock_irqsave(&ev_int->wait.lock, flags);
  54        if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
  55
  56                ev.id = ev_code;
  57                ev.timestamp = timestamp;
  58
  59                copied = kfifo_put(&ev_int->det_events, &ev);
  60                if (copied != 0)
  61                        wake_up_locked_poll(&ev_int->wait, POLLIN);
  62        }
  63        spin_unlock_irqrestore(&ev_int->wait.lock, flags);
  64
  65        return 0;
  66}
  67EXPORT_SYMBOL(iio_push_event);
  68
  69/**
  70 * iio_event_poll() - poll the event queue to find out if it has data
  71 */
  72static unsigned int iio_event_poll(struct file *filep,
  73                             struct poll_table_struct *wait)
  74{
  75        struct iio_event_interface *ev_int = filep->private_data;
  76        unsigned int events = 0;
  77
  78        poll_wait(filep, &ev_int->wait, wait);
  79
  80        spin_lock_irq(&ev_int->wait.lock);
  81        if (!kfifo_is_empty(&ev_int->det_events))
  82                events = POLLIN | POLLRDNORM;
  83        spin_unlock_irq(&ev_int->wait.lock);
  84
  85        return events;
  86}
  87
  88static ssize_t iio_event_chrdev_read(struct file *filep,
  89                                     char __user *buf,
  90                                     size_t count,
  91                                     loff_t *f_ps)
  92{
  93        struct iio_event_interface *ev_int = filep->private_data;
  94        unsigned int copied;
  95        int ret;
  96
  97        if (count < sizeof(struct iio_event_data))
  98                return -EINVAL;
  99
 100        spin_lock_irq(&ev_int->wait.lock);
 101        if (kfifo_is_empty(&ev_int->det_events)) {
 102                if (filep->f_flags & O_NONBLOCK) {
 103                        ret = -EAGAIN;
 104                        goto error_unlock;
 105                }
 106                /* Blocking on device; waiting for something to be there */
 107                ret = wait_event_interruptible_locked_irq(ev_int->wait,
 108                                        !kfifo_is_empty(&ev_int->det_events));
 109                if (ret)
 110                        goto error_unlock;
 111                /* Single access device so no one else can get the data */
 112        }
 113
 114        ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
 115
 116error_unlock:
 117        spin_unlock_irq(&ev_int->wait.lock);
 118
 119        return ret ? ret : copied;
 120}
 121
 122static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
 123{
 124        struct iio_event_interface *ev_int = filep->private_data;
 125
 126        spin_lock_irq(&ev_int->wait.lock);
 127        __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
 128        /*
 129         * In order to maintain a clean state for reopening,
 130         * clear out any awaiting events. The mask will prevent
 131         * any new __iio_push_event calls running.
 132         */
 133        kfifo_reset_out(&ev_int->det_events);
 134        spin_unlock_irq(&ev_int->wait.lock);
 135
 136        return 0;
 137}
 138
 139static const struct file_operations iio_event_chrdev_fileops = {
 140        .read =  iio_event_chrdev_read,
 141        .poll =  iio_event_poll,
 142        .release = iio_event_chrdev_release,
 143        .owner = THIS_MODULE,
 144        .llseek = noop_llseek,
 145};
 146
 147int iio_event_getfd(struct iio_dev *indio_dev)
 148{
 149        struct iio_event_interface *ev_int = indio_dev->event_interface;
 150        int fd;
 151
 152        if (ev_int == NULL)
 153                return -ENODEV;
 154
 155        spin_lock_irq(&ev_int->wait.lock);
 156        if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
 157                spin_unlock_irq(&ev_int->wait.lock);
 158                return -EBUSY;
 159        }
 160        spin_unlock_irq(&ev_int->wait.lock);
 161        fd = anon_inode_getfd("iio:event",
 162                                &iio_event_chrdev_fileops, ev_int, O_RDONLY);
 163        if (fd < 0) {
 164                spin_lock_irq(&ev_int->wait.lock);
 165                __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
 166                spin_unlock_irq(&ev_int->wait.lock);
 167        }
 168        return fd;
 169}
 170
 171static const char * const iio_ev_type_text[] = {
 172        [IIO_EV_TYPE_THRESH] = "thresh",
 173        [IIO_EV_TYPE_MAG] = "mag",
 174        [IIO_EV_TYPE_ROC] = "roc",
 175        [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
 176        [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
 177};
 178
 179static const char * const iio_ev_dir_text[] = {
 180        [IIO_EV_DIR_EITHER] = "either",
 181        [IIO_EV_DIR_RISING] = "rising",
 182        [IIO_EV_DIR_FALLING] = "falling"
 183};
 184
 185static ssize_t iio_ev_state_store(struct device *dev,
 186                                  struct device_attribute *attr,
 187                                  const char *buf,
 188                                  size_t len)
 189{
 190        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 191        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 192        int ret;
 193        bool val;
 194
 195        ret = strtobool(buf, &val);
 196        if (ret < 0)
 197                return ret;
 198
 199        ret = indio_dev->info->write_event_config(indio_dev,
 200                                                  this_attr->address,
 201                                                  val);
 202        return (ret < 0) ? ret : len;
 203}
 204
 205static ssize_t iio_ev_state_show(struct device *dev,
 206                                 struct device_attribute *attr,
 207                                 char *buf)
 208{
 209        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 210        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 211        int val = indio_dev->info->read_event_config(indio_dev,
 212                                                     this_attr->address);
 213
 214        if (val < 0)
 215                return val;
 216        else
 217                return sprintf(buf, "%d\n", val);
 218}
 219
 220static ssize_t iio_ev_value_show(struct device *dev,
 221                                 struct device_attribute *attr,
 222                                 char *buf)
 223{
 224        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 225        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 226        int val, ret;
 227
 228        ret = indio_dev->info->read_event_value(indio_dev,
 229                                                this_attr->address, &val);
 230        if (ret < 0)
 231                return ret;
 232
 233        return sprintf(buf, "%d\n", val);
 234}
 235
 236static ssize_t iio_ev_value_store(struct device *dev,
 237                                  struct device_attribute *attr,
 238                                  const char *buf,
 239                                  size_t len)
 240{
 241        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 242        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 243        int val;
 244        int ret;
 245
 246        if (!indio_dev->info->write_event_value)
 247                return -EINVAL;
 248
 249        ret = kstrtoint(buf, 10, &val);
 250        if (ret)
 251                return ret;
 252
 253        ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
 254                                                 val);
 255        if (ret < 0)
 256                return ret;
 257
 258        return len;
 259}
 260
 261static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
 262                                      struct iio_chan_spec const *chan)
 263{
 264        int ret = 0, i, attrcount = 0;
 265        u64 mask = 0;
 266        char *postfix;
 267        if (!chan->event_mask)
 268                return 0;
 269
 270        for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
 271                postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
 272                                    iio_ev_type_text[i/IIO_EV_DIR_MAX],
 273                                    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
 274                if (postfix == NULL) {
 275                        ret = -ENOMEM;
 276                        goto error_ret;
 277                }
 278                if (chan->modified)
 279                        mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
 280                                                  i/IIO_EV_DIR_MAX,
 281                                                  i%IIO_EV_DIR_MAX);
 282                else if (chan->differential)
 283                        mask = IIO_EVENT_CODE(chan->type,
 284                                              0, 0,
 285                                              i%IIO_EV_DIR_MAX,
 286                                              i/IIO_EV_DIR_MAX,
 287                                              0,
 288                                              chan->channel,
 289                                              chan->channel2);
 290                else
 291                        mask = IIO_UNMOD_EVENT_CODE(chan->type,
 292                                                    chan->channel,
 293                                                    i/IIO_EV_DIR_MAX,
 294                                                    i%IIO_EV_DIR_MAX);
 295
 296                ret = __iio_add_chan_devattr(postfix,
 297                                             chan,
 298                                             &iio_ev_state_show,
 299                                             iio_ev_state_store,
 300                                             mask,
 301                                             0,
 302                                             &indio_dev->dev,
 303                                             &indio_dev->event_interface->
 304                                             dev_attr_list);
 305                kfree(postfix);
 306                if (ret)
 307                        goto error_ret;
 308                attrcount++;
 309                postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
 310                                    iio_ev_type_text[i/IIO_EV_DIR_MAX],
 311                                    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
 312                if (postfix == NULL) {
 313                        ret = -ENOMEM;
 314                        goto error_ret;
 315                }
 316                ret = __iio_add_chan_devattr(postfix, chan,
 317                                             iio_ev_value_show,
 318                                             iio_ev_value_store,
 319                                             mask,
 320                                             0,
 321                                             &indio_dev->dev,
 322                                             &indio_dev->event_interface->
 323                                             dev_attr_list);
 324                kfree(postfix);
 325                if (ret)
 326                        goto error_ret;
 327                attrcount++;
 328        }
 329        ret = attrcount;
 330error_ret:
 331        return ret;
 332}
 333
 334static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
 335{
 336        struct iio_dev_attr *p, *n;
 337        list_for_each_entry_safe(p, n,
 338                                 &indio_dev->event_interface->
 339                                 dev_attr_list, l) {
 340                kfree(p->dev_attr.attr.name);
 341                kfree(p);
 342        }
 343}
 344
 345static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
 346{
 347        int j, ret, attrcount = 0;
 348
 349        /* Dynically created from the channels array */
 350        for (j = 0; j < indio_dev->num_channels; j++) {
 351                ret = iio_device_add_event_sysfs(indio_dev,
 352                                                 &indio_dev->channels[j]);
 353                if (ret < 0)
 354                        return ret;
 355                attrcount += ret;
 356        }
 357        return attrcount;
 358}
 359
 360static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
 361{
 362        int j;
 363
 364        for (j = 0; j < indio_dev->num_channels; j++)
 365                if (indio_dev->channels[j].event_mask != 0)
 366                        return true;
 367        return false;
 368}
 369
 370static void iio_setup_ev_int(struct iio_event_interface *ev_int)
 371{
 372        INIT_KFIFO(ev_int->det_events);
 373        init_waitqueue_head(&ev_int->wait);
 374}
 375
 376static const char *iio_event_group_name = "events";
 377int iio_device_register_eventset(struct iio_dev *indio_dev)
 378{
 379        struct iio_dev_attr *p;
 380        int ret = 0, attrcount_orig = 0, attrcount, attrn;
 381        struct attribute **attr;
 382
 383        if (!(indio_dev->info->event_attrs ||
 384              iio_check_for_dynamic_events(indio_dev)))
 385                return 0;
 386
 387        indio_dev->event_interface =
 388                kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
 389        if (indio_dev->event_interface == NULL) {
 390                ret = -ENOMEM;
 391                goto error_ret;
 392        }
 393
 394        INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
 395
 396        iio_setup_ev_int(indio_dev->event_interface);
 397        if (indio_dev->info->event_attrs != NULL) {
 398                attr = indio_dev->info->event_attrs->attrs;
 399                while (*attr++ != NULL)
 400                        attrcount_orig++;
 401        }
 402        attrcount = attrcount_orig;
 403        if (indio_dev->channels) {
 404                ret = __iio_add_event_config_attrs(indio_dev);
 405                if (ret < 0)
 406                        goto error_free_setup_event_lines;
 407                attrcount += ret;
 408        }
 409
 410        indio_dev->event_interface->group.name = iio_event_group_name;
 411        indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
 412                                                          sizeof(indio_dev->event_interface->group.attrs[0]),
 413                                                          GFP_KERNEL);
 414        if (indio_dev->event_interface->group.attrs == NULL) {
 415                ret = -ENOMEM;
 416                goto error_free_setup_event_lines;
 417        }
 418        if (indio_dev->info->event_attrs)
 419                memcpy(indio_dev->event_interface->group.attrs,
 420                       indio_dev->info->event_attrs->attrs,
 421                       sizeof(indio_dev->event_interface->group.attrs[0])
 422                       *attrcount_orig);
 423        attrn = attrcount_orig;
 424        /* Add all elements from the list. */
 425        list_for_each_entry(p,
 426                            &indio_dev->event_interface->dev_attr_list,
 427                            l)
 428                indio_dev->event_interface->group.attrs[attrn++] =
 429                        &p->dev_attr.attr;
 430        indio_dev->groups[indio_dev->groupcounter++] =
 431                &indio_dev->event_interface->group;
 432
 433        return 0;
 434
 435error_free_setup_event_lines:
 436        __iio_remove_event_config_attrs(indio_dev);
 437        kfree(indio_dev->event_interface);
 438error_ret:
 439
 440        return ret;
 441}
 442
 443void iio_device_unregister_eventset(struct iio_dev *indio_dev)
 444{
 445        if (indio_dev->event_interface == NULL)
 446                return;
 447        __iio_remove_event_config_attrs(indio_dev);
 448        kfree(indio_dev->event_interface->group.attrs);
 449        kfree(indio_dev->event_interface);
 450}
 451
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.