linux/drivers/iio/inkern.c
<<
>>
Prefs
   1/* The industrial I/O core in kernel channel mapping
   2 *
   3 * Copyright (c) 2011 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 */
   9#include <linux/err.h>
  10#include <linux/export.h>
  11#include <linux/slab.h>
  12#include <linux/mutex.h>
  13
  14#include <linux/iio/iio.h>
  15#include "iio_core.h"
  16#include <linux/iio/machine.h>
  17#include <linux/iio/driver.h>
  18#include <linux/iio/consumer.h>
  19
  20struct iio_map_internal {
  21        struct iio_dev *indio_dev;
  22        struct iio_map *map;
  23        struct list_head l;
  24};
  25
  26static LIST_HEAD(iio_map_list);
  27static DEFINE_MUTEX(iio_map_list_lock);
  28
  29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  30{
  31        int i = 0, ret = 0;
  32        struct iio_map_internal *mapi;
  33
  34        if (maps == NULL)
  35                return 0;
  36
  37        mutex_lock(&iio_map_list_lock);
  38        while (maps[i].consumer_dev_name != NULL) {
  39                mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  40                if (mapi == NULL) {
  41                        ret = -ENOMEM;
  42                        goto error_ret;
  43                }
  44                mapi->map = &maps[i];
  45                mapi->indio_dev = indio_dev;
  46                list_add(&mapi->l, &iio_map_list);
  47                i++;
  48        }
  49error_ret:
  50        mutex_unlock(&iio_map_list_lock);
  51
  52        return ret;
  53}
  54EXPORT_SYMBOL_GPL(iio_map_array_register);
  55
  56
  57/*
  58 * Remove all map entries associated with the given iio device
  59 */
  60int iio_map_array_unregister(struct iio_dev *indio_dev)
  61{
  62        int ret = -ENODEV;
  63        struct iio_map_internal *mapi;
  64        struct list_head *pos, *tmp;
  65
  66        mutex_lock(&iio_map_list_lock);
  67        list_for_each_safe(pos, tmp, &iio_map_list) {
  68                mapi = list_entry(pos, struct iio_map_internal, l);
  69                if (indio_dev == mapi->indio_dev) {
  70                        list_del(&mapi->l);
  71                        kfree(mapi);
  72                        ret = 0;
  73                }
  74        }
  75        mutex_unlock(&iio_map_list_lock);
  76        return ret;
  77}
  78EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  79
  80static const struct iio_chan_spec
  81*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  82{
  83        int i;
  84        const struct iio_chan_spec *chan = NULL;
  85
  86        for (i = 0; i < indio_dev->num_channels; i++)
  87                if (indio_dev->channels[i].datasheet_name &&
  88                    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  89                        chan = &indio_dev->channels[i];
  90                        break;
  91                }
  92        return chan;
  93}
  94
  95
  96static struct iio_channel *iio_channel_get_sys(const char *name,
  97                                               const char *channel_name)
  98{
  99        struct iio_map_internal *c_i = NULL, *c = NULL;
 100        struct iio_channel *channel;
 101        int err;
 102
 103        if (name == NULL && channel_name == NULL)
 104                return ERR_PTR(-ENODEV);
 105
 106        /* first find matching entry the channel map */
 107        mutex_lock(&iio_map_list_lock);
 108        list_for_each_entry(c_i, &iio_map_list, l) {
 109                if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
 110                    (channel_name &&
 111                     strcmp(channel_name, c_i->map->consumer_channel) != 0))
 112                        continue;
 113                c = c_i;
 114                iio_device_get(c->indio_dev);
 115                break;
 116        }
 117        mutex_unlock(&iio_map_list_lock);
 118        if (c == NULL)
 119                return ERR_PTR(-ENODEV);
 120
 121        channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 122        if (channel == NULL) {
 123                err = -ENOMEM;
 124                goto error_no_mem;
 125        }
 126
 127        channel->indio_dev = c->indio_dev;
 128
 129        if (c->map->adc_channel_label) {
 130                channel->channel =
 131                        iio_chan_spec_from_name(channel->indio_dev,
 132                                                c->map->adc_channel_label);
 133
 134                if (channel->channel == NULL) {
 135                        err = -EINVAL;
 136                        goto error_no_chan;
 137                }
 138        }
 139
 140        return channel;
 141
 142error_no_chan:
 143        kfree(channel);
 144error_no_mem:
 145        iio_device_put(c->indio_dev);
 146        return ERR_PTR(err);
 147}
 148
 149struct iio_channel *iio_channel_get(struct device *dev,
 150                                    const char *channel_name)
 151{
 152        const char *name = dev ? dev_name(dev) : NULL;
 153
 154        return iio_channel_get_sys(name, channel_name);
 155}
 156EXPORT_SYMBOL_GPL(iio_channel_get);
 157
 158void iio_channel_release(struct iio_channel *channel)
 159{
 160        iio_device_put(channel->indio_dev);
 161        kfree(channel);
 162}
 163EXPORT_SYMBOL_GPL(iio_channel_release);
 164
 165struct iio_channel *iio_channel_get_all(struct device *dev)
 166{
 167        const char *name;
 168        struct iio_channel *chans;
 169        struct iio_map_internal *c = NULL;
 170        int nummaps = 0;
 171        int mapind = 0;
 172        int i, ret;
 173
 174        if (dev == NULL)
 175                return ERR_PTR(-EINVAL);
 176        name = dev_name(dev);
 177
 178        mutex_lock(&iio_map_list_lock);
 179        /* first count the matching maps */
 180        list_for_each_entry(c, &iio_map_list, l)
 181                if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 182                        continue;
 183                else
 184                        nummaps++;
 185
 186        if (nummaps == 0) {
 187                ret = -ENODEV;
 188                goto error_ret;
 189        }
 190
 191        /* NULL terminated array to save passing size */
 192        chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
 193        if (chans == NULL) {
 194                ret = -ENOMEM;
 195                goto error_ret;
 196        }
 197
 198        /* for each map fill in the chans element */
 199        list_for_each_entry(c, &iio_map_list, l) {
 200                if (name && strcmp(name, c->map->consumer_dev_name) != 0)
 201                        continue;
 202                chans[mapind].indio_dev = c->indio_dev;
 203                chans[mapind].data = c->map->consumer_data;
 204                chans[mapind].channel =
 205                        iio_chan_spec_from_name(chans[mapind].indio_dev,
 206                                                c->map->adc_channel_label);
 207                if (chans[mapind].channel == NULL) {
 208                        ret = -EINVAL;
 209                        goto error_free_chans;
 210                }
 211                iio_device_get(chans[mapind].indio_dev);
 212                mapind++;
 213        }
 214        if (mapind == 0) {
 215                ret = -ENODEV;
 216                goto error_free_chans;
 217        }
 218        mutex_unlock(&iio_map_list_lock);
 219
 220        return chans;
 221
 222error_free_chans:
 223        for (i = 0; i < nummaps; i++)
 224                iio_device_put(chans[i].indio_dev);
 225        kfree(chans);
 226error_ret:
 227        mutex_unlock(&iio_map_list_lock);
 228
 229        return ERR_PTR(ret);
 230}
 231EXPORT_SYMBOL_GPL(iio_channel_get_all);
 232
 233void iio_channel_release_all(struct iio_channel *channels)
 234{
 235        struct iio_channel *chan = &channels[0];
 236
 237        while (chan->indio_dev) {
 238                iio_device_put(chan->indio_dev);
 239                chan++;
 240        }
 241        kfree(channels);
 242}
 243EXPORT_SYMBOL_GPL(iio_channel_release_all);
 244
 245static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 246        enum iio_chan_info_enum info)
 247{
 248        int unused;
 249
 250        if (val2 == NULL)
 251                val2 = &unused;
 252
 253        return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
 254                                                val, val2, info);
 255}
 256
 257int iio_read_channel_raw(struct iio_channel *chan, int *val)
 258{
 259        int ret;
 260
 261        mutex_lock(&chan->indio_dev->info_exist_lock);
 262        if (chan->indio_dev->info == NULL) {
 263                ret = -ENODEV;
 264                goto err_unlock;
 265        }
 266
 267        ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 268err_unlock:
 269        mutex_unlock(&chan->indio_dev->info_exist_lock);
 270
 271        return ret;
 272}
 273EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 274
 275static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
 276        int raw, int *processed, unsigned int scale)
 277{
 278        int scale_type, scale_val, scale_val2, offset;
 279        s64 raw64 = raw;
 280        int ret;
 281
 282        ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
 283        if (ret == 0)
 284                raw64 += offset;
 285
 286        scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
 287                                        IIO_CHAN_INFO_SCALE);
 288        if (scale_type < 0)
 289                return scale_type;
 290
 291        switch (scale_type) {
 292        case IIO_VAL_INT:
 293                *processed = raw64 * scale_val;
 294                break;
 295        case IIO_VAL_INT_PLUS_MICRO:
 296                if (scale_val2 < 0)
 297                        *processed = -raw64 * scale_val;
 298                else
 299                        *processed = raw64 * scale_val;
 300                *processed += div_s64(raw64 * (s64)scale_val2 * scale,
 301                                      1000000LL);
 302                break;
 303        case IIO_VAL_INT_PLUS_NANO:
 304                if (scale_val2 < 0)
 305                        *processed = -raw64 * scale_val;
 306                else
 307                        *processed = raw64 * scale_val;
 308                *processed += div_s64(raw64 * (s64)scale_val2 * scale,
 309                                      1000000000LL);
 310                break;
 311        case IIO_VAL_FRACTIONAL:
 312                *processed = div_s64(raw64 * (s64)scale_val * scale,
 313                                     scale_val2);
 314                break;
 315        case IIO_VAL_FRACTIONAL_LOG2:
 316                *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
 317                break;
 318        default:
 319                return -EINVAL;
 320        }
 321
 322        return 0;
 323}
 324
 325int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 326        int *processed, unsigned int scale)
 327{
 328        int ret;
 329
 330        mutex_lock(&chan->indio_dev->info_exist_lock);
 331        if (chan->indio_dev->info == NULL) {
 332                ret = -ENODEV;
 333                goto err_unlock;
 334        }
 335
 336        ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
 337                                                        scale);
 338err_unlock:
 339        mutex_unlock(&chan->indio_dev->info_exist_lock);
 340
 341        return ret;
 342}
 343EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 344
 345int iio_read_channel_processed(struct iio_channel *chan, int *val)
 346{
 347        int ret;
 348
 349        mutex_lock(&chan->indio_dev->info_exist_lock);
 350        if (chan->indio_dev->info == NULL) {
 351                ret = -ENODEV;
 352                goto err_unlock;
 353        }
 354
 355        if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
 356                ret = iio_channel_read(chan, val, NULL,
 357                                       IIO_CHAN_INFO_PROCESSED);
 358        } else {
 359                ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 360                if (ret < 0)
 361                        goto err_unlock;
 362                ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
 363        }
 364
 365err_unlock:
 366        mutex_unlock(&chan->indio_dev->info_exist_lock);
 367
 368        return ret;
 369}
 370EXPORT_SYMBOL_GPL(iio_read_channel_processed);
 371
 372int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
 373{
 374        int ret;
 375
 376        mutex_lock(&chan->indio_dev->info_exist_lock);
 377        if (chan->indio_dev->info == NULL) {
 378                ret = -ENODEV;
 379                goto err_unlock;
 380        }
 381
 382        ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
 383err_unlock:
 384        mutex_unlock(&chan->indio_dev->info_exist_lock);
 385
 386        return ret;
 387}
 388EXPORT_SYMBOL_GPL(iio_read_channel_scale);
 389
 390int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
 391{
 392        int ret = 0;
 393        /* Need to verify underlying driver has not gone away */
 394
 395        mutex_lock(&chan->indio_dev->info_exist_lock);
 396        if (chan->indio_dev->info == NULL) {
 397                ret = -ENODEV;
 398                goto err_unlock;
 399        }
 400
 401        *type = chan->channel->type;
 402err_unlock:
 403        mutex_unlock(&chan->indio_dev->info_exist_lock);
 404
 405        return ret;
 406}
 407EXPORT_SYMBOL_GPL(iio_get_channel_type);
 408
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.