linux/drivers/iio/industrialio-buffer.c
<<
>>
Prefs
   1/* The industrial I/O core
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * Handling of buffer allocation / resizing.
  10 *
  11 *
  12 * Things to look at here.
  13 * - Better memory allocation techniques?
  14 * - Alternative access techniques?
  15 */
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/device.h>
  19#include <linux/fs.h>
  20#include <linux/cdev.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23
  24#include <linux/iio/iio.h>
  25#include "iio_core.h"
  26#include <linux/iio/sysfs.h>
  27#include <linux/iio/buffer.h>
  28
  29static const char * const iio_endian_prefix[] = {
  30        [IIO_BE] = "be",
  31        [IIO_LE] = "le",
  32};
  33
  34/**
  35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  36 *
  37 * This function relies on all buffer implementations having an
  38 * iio_buffer as their first element.
  39 **/
  40ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  41                                      size_t n, loff_t *f_ps)
  42{
  43        struct iio_dev *indio_dev = filp->private_data;
  44        struct iio_buffer *rb = indio_dev->buffer;
  45
  46        if (!rb || !rb->access->read_first_n)
  47                return -EINVAL;
  48        return rb->access->read_first_n(rb, n, buf);
  49}
  50
  51/**
  52 * iio_buffer_poll() - poll the buffer to find out if it has data
  53 */
  54unsigned int iio_buffer_poll(struct file *filp,
  55                             struct poll_table_struct *wait)
  56{
  57        struct iio_dev *indio_dev = filp->private_data;
  58        struct iio_buffer *rb = indio_dev->buffer;
  59
  60        poll_wait(filp, &rb->pollq, wait);
  61        if (rb->stufftoread)
  62                return POLLIN | POLLRDNORM;
  63        /* need a way of knowing if there may be enough data... */
  64        return 0;
  65}
  66
  67void iio_buffer_init(struct iio_buffer *buffer)
  68{
  69        INIT_LIST_HEAD(&buffer->demux_list);
  70        init_waitqueue_head(&buffer->pollq);
  71}
  72EXPORT_SYMBOL(iio_buffer_init);
  73
  74static ssize_t iio_show_scan_index(struct device *dev,
  75                                   struct device_attribute *attr,
  76                                   char *buf)
  77{
  78        return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  79}
  80
  81static ssize_t iio_show_fixed_type(struct device *dev,
  82                                   struct device_attribute *attr,
  83                                   char *buf)
  84{
  85        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  86        u8 type = this_attr->c->scan_type.endianness;
  87
  88        if (type == IIO_CPU) {
  89#ifdef __LITTLE_ENDIAN
  90                type = IIO_LE;
  91#else
  92                type = IIO_BE;
  93#endif
  94        }
  95        return sprintf(buf, "%s:%c%d/%d>>%u\n",
  96                       iio_endian_prefix[type],
  97                       this_attr->c->scan_type.sign,
  98                       this_attr->c->scan_type.realbits,
  99                       this_attr->c->scan_type.storagebits,
 100                       this_attr->c->scan_type.shift);
 101}
 102
 103static ssize_t iio_scan_el_show(struct device *dev,
 104                                struct device_attribute *attr,
 105                                char *buf)
 106{
 107        int ret;
 108        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 109
 110        ret = test_bit(to_iio_dev_attr(attr)->address,
 111                       indio_dev->buffer->scan_mask);
 112
 113        return sprintf(buf, "%d\n", ret);
 114}
 115
 116static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 117{
 118        clear_bit(bit, buffer->scan_mask);
 119        return 0;
 120}
 121
 122static ssize_t iio_scan_el_store(struct device *dev,
 123                                 struct device_attribute *attr,
 124                                 const char *buf,
 125                                 size_t len)
 126{
 127        int ret;
 128        bool state;
 129        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 130        struct iio_buffer *buffer = indio_dev->buffer;
 131        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 132
 133        ret = strtobool(buf, &state);
 134        if (ret < 0)
 135                return ret;
 136        mutex_lock(&indio_dev->mlock);
 137        if (iio_buffer_enabled(indio_dev)) {
 138                ret = -EBUSY;
 139                goto error_ret;
 140        }
 141        ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 142        if (ret < 0)
 143                goto error_ret;
 144        if (!state && ret) {
 145                ret = iio_scan_mask_clear(buffer, this_attr->address);
 146                if (ret)
 147                        goto error_ret;
 148        } else if (state && !ret) {
 149                ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 150                if (ret)
 151                        goto error_ret;
 152        }
 153
 154error_ret:
 155        mutex_unlock(&indio_dev->mlock);
 156
 157        return ret < 0 ? ret : len;
 158
 159}
 160
 161static ssize_t iio_scan_el_ts_show(struct device *dev,
 162                                   struct device_attribute *attr,
 163                                   char *buf)
 164{
 165        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 166        return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
 167}
 168
 169static ssize_t iio_scan_el_ts_store(struct device *dev,
 170                                    struct device_attribute *attr,
 171                                    const char *buf,
 172                                    size_t len)
 173{
 174        int ret;
 175        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 176        bool state;
 177
 178        ret = strtobool(buf, &state);
 179        if (ret < 0)
 180                return ret;
 181
 182        mutex_lock(&indio_dev->mlock);
 183        if (iio_buffer_enabled(indio_dev)) {
 184                ret = -EBUSY;
 185                goto error_ret;
 186        }
 187        indio_dev->buffer->scan_timestamp = state;
 188        indio_dev->scan_timestamp = state;
 189error_ret:
 190        mutex_unlock(&indio_dev->mlock);
 191
 192        return ret ? ret : len;
 193}
 194
 195static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 196                                        const struct iio_chan_spec *chan)
 197{
 198        int ret, attrcount = 0;
 199        struct iio_buffer *buffer = indio_dev->buffer;
 200
 201        ret = __iio_add_chan_devattr("index",
 202                                     chan,
 203                                     &iio_show_scan_index,
 204                                     NULL,
 205                                     0,
 206                                     0,
 207                                     &indio_dev->dev,
 208                                     &buffer->scan_el_dev_attr_list);
 209        if (ret)
 210                goto error_ret;
 211        attrcount++;
 212        ret = __iio_add_chan_devattr("type",
 213                                     chan,
 214                                     &iio_show_fixed_type,
 215                                     NULL,
 216                                     0,
 217                                     0,
 218                                     &indio_dev->dev,
 219                                     &buffer->scan_el_dev_attr_list);
 220        if (ret)
 221                goto error_ret;
 222        attrcount++;
 223        if (chan->type != IIO_TIMESTAMP)
 224                ret = __iio_add_chan_devattr("en",
 225                                             chan,
 226                                             &iio_scan_el_show,
 227                                             &iio_scan_el_store,
 228                                             chan->scan_index,
 229                                             0,
 230                                             &indio_dev->dev,
 231                                             &buffer->scan_el_dev_attr_list);
 232        else
 233                ret = __iio_add_chan_devattr("en",
 234                                             chan,
 235                                             &iio_scan_el_ts_show,
 236                                             &iio_scan_el_ts_store,
 237                                             chan->scan_index,
 238                                             0,
 239                                             &indio_dev->dev,
 240                                             &buffer->scan_el_dev_attr_list);
 241        attrcount++;
 242        ret = attrcount;
 243error_ret:
 244        return ret;
 245}
 246
 247static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
 248                                                     struct iio_dev_attr *p)
 249{
 250        kfree(p->dev_attr.attr.name);
 251        kfree(p);
 252}
 253
 254static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
 255{
 256        struct iio_dev_attr *p, *n;
 257        struct iio_buffer *buffer = indio_dev->buffer;
 258
 259        list_for_each_entry_safe(p, n,
 260                                 &buffer->scan_el_dev_attr_list, l)
 261                iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
 262}
 263
 264static const char * const iio_scan_elements_group_name = "scan_elements";
 265
 266int iio_buffer_register(struct iio_dev *indio_dev,
 267                        const struct iio_chan_spec *channels,
 268                        int num_channels)
 269{
 270        struct iio_dev_attr *p;
 271        struct attribute **attr;
 272        struct iio_buffer *buffer = indio_dev->buffer;
 273        int ret, i, attrn, attrcount, attrcount_orig = 0;
 274
 275        if (buffer->attrs)
 276                indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
 277
 278        if (buffer->scan_el_attrs != NULL) {
 279                attr = buffer->scan_el_attrs->attrs;
 280                while (*attr++ != NULL)
 281                        attrcount_orig++;
 282        }
 283        attrcount = attrcount_orig;
 284        INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
 285        if (channels) {
 286                /* new magic */
 287                for (i = 0; i < num_channels; i++) {
 288                        if (channels[i].scan_index < 0)
 289                                continue;
 290
 291                        /* Establish necessary mask length */
 292                        if (channels[i].scan_index >
 293                            (int)indio_dev->masklength - 1)
 294                                indio_dev->masklength
 295                                        = channels[i].scan_index + 1;
 296
 297                        ret = iio_buffer_add_channel_sysfs(indio_dev,
 298                                                         &channels[i]);
 299                        if (ret < 0)
 300                                goto error_cleanup_dynamic;
 301                        attrcount += ret;
 302                        if (channels[i].type == IIO_TIMESTAMP)
 303                                indio_dev->scan_index_timestamp =
 304                                        channels[i].scan_index;
 305                }
 306                if (indio_dev->masklength && buffer->scan_mask == NULL) {
 307                        buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 308                                                    sizeof(*buffer->scan_mask),
 309                                                    GFP_KERNEL);
 310                        if (buffer->scan_mask == NULL) {
 311                                ret = -ENOMEM;
 312                                goto error_cleanup_dynamic;
 313                        }
 314                }
 315        }
 316
 317        buffer->scan_el_group.name = iio_scan_elements_group_name;
 318
 319        buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
 320                                              sizeof(buffer->scan_el_group.attrs[0]),
 321                                              GFP_KERNEL);
 322        if (buffer->scan_el_group.attrs == NULL) {
 323                ret = -ENOMEM;
 324                goto error_free_scan_mask;
 325        }
 326        if (buffer->scan_el_attrs)
 327                memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
 328                       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
 329        attrn = attrcount_orig;
 330
 331        list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
 332                buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
 333        indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
 334
 335        return 0;
 336
 337error_free_scan_mask:
 338        kfree(buffer->scan_mask);
 339error_cleanup_dynamic:
 340        __iio_buffer_attr_cleanup(indio_dev);
 341
 342        return ret;
 343}
 344EXPORT_SYMBOL(iio_buffer_register);
 345
 346void iio_buffer_unregister(struct iio_dev *indio_dev)
 347{
 348        kfree(indio_dev->buffer->scan_mask);
 349        kfree(indio_dev->buffer->scan_el_group.attrs);
 350        __iio_buffer_attr_cleanup(indio_dev);
 351}
 352EXPORT_SYMBOL(iio_buffer_unregister);
 353
 354ssize_t iio_buffer_read_length(struct device *dev,
 355                               struct device_attribute *attr,
 356                               char *buf)
 357{
 358        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 359        struct iio_buffer *buffer = indio_dev->buffer;
 360
 361        if (buffer->access->get_length)
 362                return sprintf(buf, "%d\n",
 363                               buffer->access->get_length(buffer));
 364
 365        return 0;
 366}
 367EXPORT_SYMBOL(iio_buffer_read_length);
 368
 369ssize_t iio_buffer_write_length(struct device *dev,
 370                                struct device_attribute *attr,
 371                                const char *buf,
 372                                size_t len)
 373{
 374        int ret;
 375        ulong val;
 376        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 377        struct iio_buffer *buffer = indio_dev->buffer;
 378
 379        ret = strict_strtoul(buf, 10, &val);
 380        if (ret)
 381                return ret;
 382
 383        if (buffer->access->get_length)
 384                if (val == buffer->access->get_length(buffer))
 385                        return len;
 386
 387        mutex_lock(&indio_dev->mlock);
 388        if (iio_buffer_enabled(indio_dev)) {
 389                ret = -EBUSY;
 390        } else {
 391                if (buffer->access->set_length)
 392                        buffer->access->set_length(buffer, val);
 393                ret = 0;
 394        }
 395        mutex_unlock(&indio_dev->mlock);
 396
 397        return ret ? ret : len;
 398}
 399EXPORT_SYMBOL(iio_buffer_write_length);
 400
 401ssize_t iio_buffer_store_enable(struct device *dev,
 402                                struct device_attribute *attr,
 403                                const char *buf,
 404                                size_t len)
 405{
 406        int ret;
 407        bool requested_state, current_state;
 408        int previous_mode;
 409        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 410        struct iio_buffer *buffer = indio_dev->buffer;
 411
 412        mutex_lock(&indio_dev->mlock);
 413        previous_mode = indio_dev->currentmode;
 414        requested_state = !(buf[0] == '0');
 415        current_state = iio_buffer_enabled(indio_dev);
 416        if (current_state == requested_state) {
 417                printk(KERN_INFO "iio-buffer, current state requested again\n");
 418                goto done;
 419        }
 420        if (requested_state) {
 421                if (indio_dev->setup_ops->preenable) {
 422                        ret = indio_dev->setup_ops->preenable(indio_dev);
 423                        if (ret) {
 424                                printk(KERN_ERR
 425                                       "Buffer not started: "
 426                                       "buffer preenable failed\n");
 427                                goto error_ret;
 428                        }
 429                }
 430                if (buffer->access->request_update) {
 431                        ret = buffer->access->request_update(buffer);
 432                        if (ret) {
 433                                printk(KERN_INFO
 434                                       "Buffer not started: "
 435                                       "buffer parameter update failed\n");
 436                                goto error_ret;
 437                        }
 438                }
 439                /* Definitely possible for devices to support both of these. */
 440                if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
 441                        if (!indio_dev->trig) {
 442                                printk(KERN_INFO
 443                                       "Buffer not started: no trigger\n");
 444                                ret = -EINVAL;
 445                                goto error_ret;
 446                        }
 447                        indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
 448                } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
 449                        indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
 450                else { /* should never be reached */
 451                        ret = -EINVAL;
 452                        goto error_ret;
 453                }
 454
 455                if (indio_dev->setup_ops->postenable) {
 456                        ret = indio_dev->setup_ops->postenable(indio_dev);
 457                        if (ret) {
 458                                printk(KERN_INFO
 459                                       "Buffer not started: "
 460                                       "postenable failed\n");
 461                                indio_dev->currentmode = previous_mode;
 462                                if (indio_dev->setup_ops->postdisable)
 463                                        indio_dev->setup_ops->
 464                                                postdisable(indio_dev);
 465                                goto error_ret;
 466                        }
 467                }
 468        } else {
 469                if (indio_dev->setup_ops->predisable) {
 470                        ret = indio_dev->setup_ops->predisable(indio_dev);
 471                        if (ret)
 472                                goto error_ret;
 473                }
 474                indio_dev->currentmode = INDIO_DIRECT_MODE;
 475                if (indio_dev->setup_ops->postdisable) {
 476                        ret = indio_dev->setup_ops->postdisable(indio_dev);
 477                        if (ret)
 478                                goto error_ret;
 479                }
 480        }
 481done:
 482        mutex_unlock(&indio_dev->mlock);
 483        return len;
 484
 485error_ret:
 486        mutex_unlock(&indio_dev->mlock);
 487        return ret;
 488}
 489EXPORT_SYMBOL(iio_buffer_store_enable);
 490
 491ssize_t iio_buffer_show_enable(struct device *dev,
 492                               struct device_attribute *attr,
 493                               char *buf)
 494{
 495        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 496        return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
 497}
 498EXPORT_SYMBOL(iio_buffer_show_enable);
 499
 500/* note NULL used as error indicator as it doesn't make sense. */
 501static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 502                                          unsigned int masklength,
 503                                          const unsigned long *mask)
 504{
 505        if (bitmap_empty(mask, masklength))
 506                return NULL;
 507        while (*av_masks) {
 508                if (bitmap_subset(mask, av_masks, masklength))
 509                        return av_masks;
 510                av_masks += BITS_TO_LONGS(masklength);
 511        }
 512        return NULL;
 513}
 514
 515static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
 516                                  bool timestamp)
 517{
 518        const struct iio_chan_spec *ch;
 519        unsigned bytes = 0;
 520        int length, i;
 521
 522        /* How much space will the demuxed element take? */
 523        for_each_set_bit(i, mask,
 524                         indio_dev->masklength) {
 525                ch = iio_find_channel_from_si(indio_dev, i);
 526                length = ch->scan_type.storagebits / 8;
 527                bytes = ALIGN(bytes, length);
 528                bytes += length;
 529        }
 530        if (timestamp) {
 531                ch = iio_find_channel_from_si(indio_dev,
 532                                              indio_dev->scan_index_timestamp);
 533                length = ch->scan_type.storagebits / 8;
 534                bytes = ALIGN(bytes, length);
 535                bytes += length;
 536        }
 537        return bytes;
 538}
 539
 540int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
 541{
 542        struct iio_buffer *buffer = indio_dev->buffer;
 543        dev_dbg(&indio_dev->dev, "%s\n", __func__);
 544
 545        /* How much space will the demuxed element take? */
 546        indio_dev->scan_bytes =
 547                iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
 548                                       buffer->scan_timestamp);
 549        buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes);
 550
 551        /* What scan mask do we actually have ?*/
 552        if (indio_dev->available_scan_masks)
 553                indio_dev->active_scan_mask =
 554                        iio_scan_mask_match(indio_dev->available_scan_masks,
 555                                            indio_dev->masklength,
 556                                            buffer->scan_mask);
 557        else
 558                indio_dev->active_scan_mask = buffer->scan_mask;
 559
 560        if (indio_dev->active_scan_mask == NULL)
 561                return -EINVAL;
 562
 563        iio_update_demux(indio_dev);
 564
 565        if (indio_dev->info->update_scan_mode)
 566                return indio_dev->info
 567                        ->update_scan_mode(indio_dev,
 568                                           indio_dev->active_scan_mask);
 569        return 0;
 570}
 571EXPORT_SYMBOL(iio_sw_buffer_preenable);
 572
 573/**
 574 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 575 * @indio_dev: the iio device
 576 * @mask: scan mask to be checked
 577 *
 578 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 579 * can be used for devices where only one channel can be active for sampling at
 580 * a time.
 581 */
 582bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
 583        const unsigned long *mask)
 584{
 585        return bitmap_weight(mask, indio_dev->masklength) == 1;
 586}
 587EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
 588
 589static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 590        const unsigned long *mask)
 591{
 592        if (!indio_dev->setup_ops->validate_scan_mask)
 593                return true;
 594
 595        return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 596}
 597
 598/**
 599 * iio_scan_mask_set() - set particular bit in the scan mask
 600 * @buffer: the buffer whose scan mask we are interested in
 601 * @bit: the bit to be set.
 602 **/
 603int iio_scan_mask_set(struct iio_dev *indio_dev,
 604                      struct iio_buffer *buffer, int bit)
 605{
 606        const unsigned long *mask;
 607        unsigned long *trialmask;
 608
 609        trialmask = kmalloc(sizeof(*trialmask)*
 610                            BITS_TO_LONGS(indio_dev->masklength),
 611                            GFP_KERNEL);
 612
 613        if (trialmask == NULL)
 614                return -ENOMEM;
 615        if (!indio_dev->masklength) {
 616                WARN_ON("trying to set scanmask prior to registering buffer\n");
 617                goto err_invalid_mask;
 618        }
 619        bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
 620        set_bit(bit, trialmask);
 621
 622        if (!iio_validate_scan_mask(indio_dev, trialmask))
 623                goto err_invalid_mask;
 624
 625        if (indio_dev->available_scan_masks) {
 626                mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 627                                           indio_dev->masklength,
 628                                           trialmask);
 629                if (!mask)
 630                        goto err_invalid_mask;
 631        }
 632        bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
 633
 634        kfree(trialmask);
 635
 636        return 0;
 637
 638err_invalid_mask:
 639        kfree(trialmask);
 640        return -EINVAL;
 641}
 642EXPORT_SYMBOL_GPL(iio_scan_mask_set);
 643
 644int iio_scan_mask_query(struct iio_dev *indio_dev,
 645                        struct iio_buffer *buffer, int bit)
 646{
 647        if (bit > indio_dev->masklength)
 648                return -EINVAL;
 649
 650        if (!buffer->scan_mask)
 651                return 0;
 652
 653        return test_bit(bit, buffer->scan_mask);
 654};
 655EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 656
 657/**
 658 * struct iio_demux_table() - table describing demux memcpy ops
 659 * @from:       index to copy from
 660 * @to:         index to copy to
 661 * @length:     how many bytes to copy
 662 * @l:          list head used for management
 663 */
 664struct iio_demux_table {
 665        unsigned from;
 666        unsigned to;
 667        unsigned length;
 668        struct list_head l;
 669};
 670
 671static unsigned char *iio_demux(struct iio_buffer *buffer,
 672                                 unsigned char *datain)
 673{
 674        struct iio_demux_table *t;
 675
 676        if (list_empty(&buffer->demux_list))
 677                return datain;
 678        list_for_each_entry(t, &buffer->demux_list, l)
 679                memcpy(buffer->demux_bounce + t->to,
 680                       datain + t->from, t->length);
 681
 682        return buffer->demux_bounce;
 683}
 684
 685int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
 686{
 687        unsigned char *dataout = iio_demux(buffer, data);
 688
 689        return buffer->access->store_to(buffer, dataout);
 690}
 691EXPORT_SYMBOL_GPL(iio_push_to_buffer);
 692
 693static void iio_buffer_demux_free(struct iio_buffer *buffer)
 694{
 695        struct iio_demux_table *p, *q;
 696        list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
 697                list_del(&p->l);
 698                kfree(p);
 699        }
 700}
 701
 702int iio_update_demux(struct iio_dev *indio_dev)
 703{
 704        const struct iio_chan_spec *ch;
 705        struct iio_buffer *buffer = indio_dev->buffer;
 706        int ret, in_ind = -1, out_ind, length;
 707        unsigned in_loc = 0, out_loc = 0;
 708        struct iio_demux_table *p;
 709
 710        /* Clear out any old demux */
 711        iio_buffer_demux_free(buffer);
 712        kfree(buffer->demux_bounce);
 713        buffer->demux_bounce = NULL;
 714
 715        /* First work out which scan mode we will actually have */
 716        if (bitmap_equal(indio_dev->active_scan_mask,
 717                         buffer->scan_mask,
 718                         indio_dev->masklength))
 719                return 0;
 720
 721        /* Now we have the two masks, work from least sig and build up sizes */
 722        for_each_set_bit(out_ind,
 723                         indio_dev->active_scan_mask,
 724                         indio_dev->masklength) {
 725                in_ind = find_next_bit(indio_dev->active_scan_mask,
 726                                       indio_dev->masklength,
 727                                       in_ind + 1);
 728                while (in_ind != out_ind) {
 729                        in_ind = find_next_bit(indio_dev->active_scan_mask,
 730                                               indio_dev->masklength,
 731                                               in_ind + 1);
 732                        ch = iio_find_channel_from_si(indio_dev, in_ind);
 733                        length = ch->scan_type.storagebits/8;
 734                        /* Make sure we are aligned */
 735                        in_loc += length;
 736                        if (in_loc % length)
 737                                in_loc += length - in_loc % length;
 738                }
 739                p = kmalloc(sizeof(*p), GFP_KERNEL);
 740                if (p == NULL) {
 741                        ret = -ENOMEM;
 742                        goto error_clear_mux_table;
 743                }
 744                ch = iio_find_channel_from_si(indio_dev, in_ind);
 745                length = ch->scan_type.storagebits/8;
 746                if (out_loc % length)
 747                        out_loc += length - out_loc % length;
 748                if (in_loc % length)
 749                        in_loc += length - in_loc % length;
 750                p->from = in_loc;
 751                p->to = out_loc;
 752                p->length = length;
 753                list_add_tail(&p->l, &buffer->demux_list);
 754                out_loc += length;
 755                in_loc += length;
 756        }
 757        /* Relies on scan_timestamp being last */
 758        if (buffer->scan_timestamp) {
 759                p = kmalloc(sizeof(*p), GFP_KERNEL);
 760                if (p == NULL) {
 761                        ret = -ENOMEM;
 762                        goto error_clear_mux_table;
 763                }
 764                ch = iio_find_channel_from_si(indio_dev,
 765                        indio_dev->scan_index_timestamp);
 766                length = ch->scan_type.storagebits/8;
 767                if (out_loc % length)
 768                        out_loc += length - out_loc % length;
 769                if (in_loc % length)
 770                        in_loc += length - in_loc % length;
 771                p->from = in_loc;
 772                p->to = out_loc;
 773                p->length = length;
 774                list_add_tail(&p->l, &buffer->demux_list);
 775                out_loc += length;
 776                in_loc += length;
 777        }
 778        buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
 779        if (buffer->demux_bounce == NULL) {
 780                ret = -ENOMEM;
 781                goto error_clear_mux_table;
 782        }
 783        return 0;
 784
 785error_clear_mux_table:
 786        iio_buffer_demux_free(buffer);
 787
 788        return ret;
 789}
 790EXPORT_SYMBOL_GPL(iio_update_demux);
 791
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.