linux/drivers/iio/inkern.c
<<
>>
Prefs
   1/* The industrial I/O core in kernel channel mapping
   2 *
   3 * Copyright (c) 2011 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 */
   9#include <linux/err.h>
  10#include <linux/export.h>
  11#include <linux/slab.h>
  12#include <linux/mutex.h>
  13#include <linux/of.h>
  14
  15#include <linux/iio/iio.h>
  16#include "iio_core.h"
  17#include <linux/iio/machine.h>
  18#include <linux/iio/driver.h>
  19#include <linux/iio/consumer.h>
  20
  21struct iio_map_internal {
  22        struct iio_dev *indio_dev;
  23        struct iio_map *map;
  24        struct list_head l;
  25};
  26
  27static LIST_HEAD(iio_map_list);
  28static DEFINE_MUTEX(iio_map_list_lock);
  29
  30int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  31{
  32        int i = 0, ret = 0;
  33        struct iio_map_internal *mapi;
  34
  35        if (maps == NULL)
  36                return 0;
  37
  38        mutex_lock(&iio_map_list_lock);
  39        while (maps[i].consumer_dev_name != NULL) {
  40                mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  41                if (mapi == NULL) {
  42                        ret = -ENOMEM;
  43                        goto error_ret;
  44                }
  45                mapi->map = &maps[i];
  46                mapi->indio_dev = indio_dev;
  47                list_add(&mapi->l, &iio_map_list);
  48                i++;
  49        }
  50error_ret:
  51        mutex_unlock(&iio_map_list_lock);
  52
  53        return ret;
  54}
  55EXPORT_SYMBOL_GPL(iio_map_array_register);
  56
  57
  58/*
  59 * Remove all map entries associated with the given iio device
  60 */
  61int iio_map_array_unregister(struct iio_dev *indio_dev)
  62{
  63        int ret = -ENODEV;
  64        struct iio_map_internal *mapi;
  65        struct list_head *pos, *tmp;
  66
  67        mutex_lock(&iio_map_list_lock);
  68        list_for_each_safe(pos, tmp, &iio_map_list) {
  69                mapi = list_entry(pos, struct iio_map_internal, l);
  70                if (indio_dev == mapi->indio_dev) {
  71                        list_del(&mapi->l);
  72                        kfree(mapi);
  73                        ret = 0;
  74                }
  75        }
  76        mutex_unlock(&iio_map_list_lock);
  77        return ret;
  78}
  79EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  80
  81static const struct iio_chan_spec
  82*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  83{
  84        int i;
  85        const struct iio_chan_spec *chan = NULL;
  86
  87        for (i = 0; i < indio_dev->num_channels; i++)
  88                if (indio_dev->channels[i].datasheet_name &&
  89                    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  90                        chan = &indio_dev->channels[i];
  91                        break;
  92                }
  93        return chan;
  94}
  95
  96#ifdef CONFIG_OF
  97
  98static int iio_dev_node_match(struct device *dev, void *data)
  99{
 100        return dev->of_node == data && dev->type == &iio_device_type;
 101}
 102
 103static int __of_iio_channel_get(struct iio_channel *channel,
 104                                struct device_node *np, int index)
 105{
 106        struct device *idev;
 107        struct iio_dev *indio_dev;
 108        int err;
 109        struct of_phandle_args iiospec;
 110
 111        err = of_parse_phandle_with_args(np, "io-channels",
 112                                         "#io-channel-cells",
 113                                         index, &iiospec);
 114        if (err)
 115                return err;
 116
 117        idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
 118                               iio_dev_node_match);
 119        of_node_put(iiospec.np);
 120        if (idev == NULL)
 121                return -EPROBE_DEFER;
 122
 123        indio_dev = dev_to_iio_dev(idev);
 124        channel->indio_dev = indio_dev;
 125        index = iiospec.args_count ? iiospec.args[0] : 0;
 126        if (index >= indio_dev->num_channels) {
 127                err = -EINVAL;
 128                goto err_put;
 129        }
 130        channel->channel = &indio_dev->channels[index];
 131
 132        return 0;
 133
 134err_put:
 135        iio_device_put(indio_dev);
 136        return err;
 137}
 138
 139static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
 140{
 141        struct iio_channel *channel;
 142        int err;
 143
 144        if (index < 0)
 145                return ERR_PTR(-EINVAL);
 146
 147        channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 148        if (channel == NULL)
 149                return ERR_PTR(-ENOMEM);
 150
 151        err = __of_iio_channel_get(channel, np, index);
 152        if (err)
 153                goto err_free_channel;
 154
 155        return channel;
 156
 157err_free_channel:
 158        kfree(channel);
 159        return ERR_PTR(err);
 160}
 161
 162static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
 163                                                      const char *name)
 164{
 165        struct iio_channel *chan = NULL;
 166
 167        /* Walk up the tree of devices looking for a matching iio channel */
 168        while (np) {
 169                int index = 0;
 170
 171                /*
 172                 * For named iio channels, first look up the name in the
 173                 * "io-channel-names" property.  If it cannot be found, the
 174                 * index will be an error code, and of_iio_channel_get()
 175                 * will fail.
 176                 */
 177                if (name)
 178                        index = of_property_match_string(np, "io-channel-names",
 179                                                         name);
 180                chan = of_iio_channel_get(np, index);
 181                if (!IS_ERR(chan))
 182                        break;
 183                else if (name && index >= 0) {
 184                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
 185                                np->full_name, name ? name : "", index);
 186                        return chan;
 187                }
 188
 189                /*
 190                 * No matching IIO channel found on this node.
 191                 * If the parent node has a "io-channel-ranges" property,
 192                 * then we can try one of its channels.
 193                 */
 194                np = np->parent;
 195                if (np && !of_get_property(np, "io-channel-ranges", NULL))
 196                        break;
 197        }
 198        return chan;
 199}
 200
 201static struct iio_channel *of_iio_channel_get_all(struct device *dev)
 202{
 203        struct iio_channel *chans;
 204        int i, mapind, nummaps = 0;
 205        int ret;
 206
 207        do {
 208                ret = of_parse_phandle_with_args(dev->of_node,
 209                                                 "io-channels",
 210                                                 "#io-channel-cells",
 211                                                 nummaps, NULL);
 212                if (ret < 0)
 213                        break;
 214        } while (++nummaps);
 215
 216        if (nummaps == 0)       /* no error, return NULL to search map table */
 217                return NULL;
 218
 219        /* NULL terminated array to save passing size */
 220        chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
 221        if (chans == NULL)
 222                return ERR_PTR(-ENOMEM);
 223
 224        /* Search for OF matches */
 225        for (mapind = 0; mapind < nummaps; mapind++) {
 226                ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
 227                                           mapind);
 228                if (ret)
 229                        goto error_free_chans;
 230        }
 231        return chans;
 232
 233error_free_chans:
 234        for (i = 0; i < mapind; i++)
 235                iio_device_put(chans[i].indio_dev);
 236        kfree(chans);
 237        return ERR_PTR(ret);
 238}
 239
 240#else /* CONFIG_OF */
 241
 242static inline struct iio_channel *
 243of_iio_channel_get_by_name(struct device_node *np, const char *name)
 244{
 245        return NULL;
 246}
 247
 248static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
 249{
 250        return NULL;
 251}
 252
 253#endif /* CONFIG_OF */
 254
 255static struct iio_channel *iio_channel_get_sys(const char *name,
 256                                               const char *channel_name)
 257{
 258        struct iio_map_internal *c_i = NULL, *c = NULL;
 259        struct iio_channel *channel;
 260        int err;
 261
 262        if (name == NULL && channel_name == NULL)
 263                return ERR_PTR(-ENODEV);
 264
 265        /* first find matching entry the channel map */
 266        mutex_lock(&iio_map_list_lock);
 267        list_for_each_entry(c_i, &iio_map_list, l) {
 268                if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
 269                    (channel_name &&
 270                     strcmp(channel_name, c_i->map->consumer_channel) != 0))
 271                        continue;
 272                c = c_i;
 273                iio_device_get(c->indio_dev);
 274                break;
 275        }
 276        mutex_unlock(&iio_map_list_lock);
 277        if (c == NULL)
 278                return ERR_PTR(-ENODEV);
 279
 280        channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 281        if (channel == NULL) {
 282                err = -ENOMEM;
 283                goto error_no_mem;
 284        }
 285
 286        channel->indio_dev = c->indio_dev;
 287
 288        if (c->map->adc_channel_label) {
 289                channel->channel =
 290                        iio_chan_spec_from_name(channel->indio_dev,
 291                                                c->map->adc_channel_label);
 292
 293                if (channel->channel == NULL) {
 294                        err = -EINVAL;
 295                        goto error_no_chan;
 296                }
 297        }
 298
 299        return channel;
 300
 301error_no_chan:
 302        kfree(channel);
 303error_no_mem:
 304        iio_device_put(c->indio_dev);
 305        return ERR_PTR(err);
 306}
 307
 308struct iio_channel *iio_channel_get(struct device *dev,
 309                                    const char *channel_name)
 310{
 311        const char *name = dev ? dev_name(dev) : NULL;
 312        struct iio_channel *channel;
 313
 314        if (dev) {
 315                channel = of_iio_channel_get_by_name(dev->of_node,
 316                                                     channel_name);
 317                if (channel != NULL)
 318                        return channel;
 319        }
 320        return iio_channel_get_sys(name, channel_name);
 321}
 322EXPORT_SYMBOL_GPL(iio_channel_get);
 323
 324void iio_channel_release(struct iio_channel *channel)
 325{
 326        iio_device_put(channel->indio_dev);
 327        kfree(channel);
 328}
 329EXPORT_SYMBOL_GPL(iio_channel_release);
 330
 331struct iio_channel *iio_channel_get_all(struct device *dev)
 332{
 333        const char *name;
 334        struct iio_channel *chans;
 335        struct iio_map_internal *c = NULL;
 336        int nummaps = 0;
 337        int mapind = 0;
 338        int i, ret;
 339
 340        if (dev == NULL)
 341                return ERR_PTR(-EINVAL);
 342
 343        chans = of_iio_channel_get_all(dev);
 344        if (chans)
 345                return chans;
 346
 347        name = dev_name(dev);
 348
 349        mutex_lock(&iio_map_list_lock);
 350        /* first count the matching maps */
 351        list_for_each_entry(c, &iio_map_list, l)
 352                if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 353                        continue;
 354                else
 355                        nummaps++;
 356
 357        if (nummaps == 0) {
 358                ret = -ENODEV;
 359                goto error_ret;
 360        }
 361
 362        /* NULL terminated array to save passing size */
 363        chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
 364        if (chans == NULL) {
 365                ret = -ENOMEM;
 366                goto error_ret;
 367        }
 368
 369        /* for each map fill in the chans element */
 370        list_for_each_entry(c, &iio_map_list, l) {
 371                if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 372                        continue;
 373                chans[mapind].indio_dev = c->indio_dev;
 374                chans[mapind].data = c->map->consumer_data;
 375                chans[mapind].channel =
 376                        iio_chan_spec_from_name(chans[mapind].indio_dev,
 377                                                c->map->adc_channel_label);
 378                if (chans[mapind].channel == NULL) {
 379                        ret = -EINVAL;
 380                        goto error_free_chans;
 381                }
 382                iio_device_get(chans[mapind].indio_dev);
 383                mapind++;
 384        }
 385        if (mapind == 0) {
 386                ret = -ENODEV;
 387                goto error_free_chans;
 388        }
 389        mutex_unlock(&iio_map_list_lock);
 390
 391        return chans;
 392
 393error_free_chans:
 394        for (i = 0; i < nummaps; i++)
 395                iio_device_put(chans[i].indio_dev);
 396        kfree(chans);
 397error_ret:
 398        mutex_unlock(&iio_map_list_lock);
 399
 400        return ERR_PTR(ret);
 401}
 402EXPORT_SYMBOL_GPL(iio_channel_get_all);
 403
 404void iio_channel_release_all(struct iio_channel *channels)
 405{
 406        struct iio_channel *chan = &channels[0];
 407
 408        while (chan->indio_dev) {
 409                iio_device_put(chan->indio_dev);
 410                chan++;
 411        }
 412        kfree(channels);
 413}
 414EXPORT_SYMBOL_GPL(iio_channel_release_all);
 415
 416static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 417        enum iio_chan_info_enum info)
 418{
 419        int unused;
 420
 421        if (val2 == NULL)
 422                val2 = &unused;
 423
 424        return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
 425                                                val, val2, info);
 426}
 427
 428int iio_read_channel_raw(struct iio_channel *chan, int *val)
 429{
 430        int ret;
 431
 432        mutex_lock(&chan->indio_dev->info_exist_lock);
 433        if (chan->indio_dev->info == NULL) {
 434                ret = -ENODEV;
 435                goto err_unlock;
 436        }
 437
 438        ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 439err_unlock:
 440        mutex_unlock(&chan->indio_dev->info_exist_lock);
 441
 442        return ret;
 443}
 444EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 445
 446static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
 447        int raw, int *processed, unsigned int scale)
 448{
 449        int scale_type, scale_val, scale_val2, offset;
 450        s64 raw64 = raw;
 451        int ret;
 452
 453        ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
 454        if (ret >= 0)
 455                raw64 += offset;
 456
 457        scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
 458                                        IIO_CHAN_INFO_SCALE);
 459        if (scale_type < 0)
 460                return scale_type;
 461
 462        switch (scale_type) {
 463        case IIO_VAL_INT:
 464                *processed = raw64 * scale_val;
 465                break;
 466        case IIO_VAL_INT_PLUS_MICRO:
 467                if (scale_val2 < 0)
 468                        *processed = -raw64 * scale_val;
 469                else
 470                        *processed = raw64 * scale_val;
 471                *processed += div_s64(raw64 * (s64)scale_val2 * scale,
 472                                      1000000LL);
 473                break;
 474        case IIO_VAL_INT_PLUS_NANO:
 475                if (scale_val2 < 0)
 476                        *processed = -raw64 * scale_val;
 477                else
 478                        *processed = raw64 * scale_val;
 479                *processed += div_s64(raw64 * (s64)scale_val2 * scale,
 480                                      1000000000LL);
 481                break;
 482        case IIO_VAL_FRACTIONAL:
 483                *processed = div_s64(raw64 * (s64)scale_val * scale,
 484                                     scale_val2);
 485                break;
 486        case IIO_VAL_FRACTIONAL_LOG2:
 487                *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
 488                break;
 489        default:
 490                return -EINVAL;
 491        }
 492
 493        return 0;
 494}
 495
 496int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 497        int *processed, unsigned int scale)
 498{
 499        int ret;
 500
 501        mutex_lock(&chan->indio_dev->info_exist_lock);
 502        if (chan->indio_dev->info == NULL) {
 503                ret = -ENODEV;
 504                goto err_unlock;
 505        }
 506
 507        ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
 508                                                        scale);
 509err_unlock:
 510        mutex_unlock(&chan->indio_dev->info_exist_lock);
 511
 512        return ret;
 513}
 514EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 515
 516int iio_read_channel_processed(struct iio_channel *chan, int *val)
 517{
 518        int ret;
 519
 520        mutex_lock(&chan->indio_dev->info_exist_lock);
 521        if (chan->indio_dev->info == NULL) {
 522                ret = -ENODEV;
 523                goto err_unlock;
 524        }
 525
 526        if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
 527                ret = iio_channel_read(chan, val, NULL,
 528                                       IIO_CHAN_INFO_PROCESSED);
 529        } else {
 530                ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 531                if (ret < 0)
 532                        goto err_unlock;
 533                ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
 534        }
 535
 536err_unlock:
 537        mutex_unlock(&chan->indio_dev->info_exist_lock);
 538
 539        return ret;
 540}
 541EXPORT_SYMBOL_GPL(iio_read_channel_processed);
 542
 543int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
 544{
 545        int ret;
 546
 547        mutex_lock(&chan->indio_dev->info_exist_lock);
 548        if (chan->indio_dev->info == NULL) {
 549                ret = -ENODEV;
 550                goto err_unlock;
 551        }
 552
 553        ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
 554err_unlock:
 555        mutex_unlock(&chan->indio_dev->info_exist_lock);
 556
 557        return ret;
 558}
 559EXPORT_SYMBOL_GPL(iio_read_channel_scale);
 560
 561int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
 562{
 563        int ret = 0;
 564        /* Need to verify underlying driver has not gone away */
 565
 566        mutex_lock(&chan->indio_dev->info_exist_lock);
 567        if (chan->indio_dev->info == NULL) {
 568                ret = -ENODEV;
 569                goto err_unlock;
 570        }
 571
 572        *type = chan->channel->type;
 573err_unlock:
 574        mutex_unlock(&chan->indio_dev->info_exist_lock);
 575
 576        return ret;
 577}
 578EXPORT_SYMBOL_GPL(iio_get_channel_type);
 579
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.