linux/net/caif/caif_dev.c
<<
>>
Prefs
   1/*
   2 * CAIF Interface registration.
   3 * Copyright (C) ST-Ericsson AB 2010
   4 * Author:      Sjur Brendeland/sjur.brandeland@stericsson.com
   5 * License terms: GNU General Public License (GPL) version 2
   6 *
   7 * Borrowed heavily from file: pn_dev.c. Thanks to
   8 *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
   9 *  and Sakari Ailus <sakari.ailus@nokia.com>
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  13
  14#include <linux/kernel.h>
  15#include <linux/if_arp.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/mutex.h>
  19#include <linux/module.h>
  20#include <linux/spinlock.h>
  21#include <net/netns/generic.h>
  22#include <net/net_namespace.h>
  23#include <net/pkt_sched.h>
  24#include <net/caif/caif_device.h>
  25#include <net/caif/caif_layer.h>
  26#include <net/caif/cfpkt.h>
  27#include <net/caif/cfcnfg.h>
  28#include <net/caif/cfserl.h>
  29
  30MODULE_LICENSE("GPL");
  31
  32/* Used for local tracking of the CAIF net devices */
  33struct caif_device_entry {
  34        struct cflayer layer;
  35        struct list_head list;
  36        struct net_device *netdev;
  37        int __percpu *pcpu_refcnt;
  38        spinlock_t flow_lock;
  39        struct sk_buff *xoff_skb;
  40        void (*xoff_skb_dtor)(struct sk_buff *skb);
  41        bool xoff;
  42};
  43
  44struct caif_device_entry_list {
  45        struct list_head list;
  46        /* Protects simulanous deletes in list */
  47        struct mutex lock;
  48};
  49
  50struct caif_net {
  51        struct cfcnfg *cfg;
  52        struct caif_device_entry_list caifdevs;
  53};
  54
  55static int caif_net_id;
  56static int q_high = 50; /* Percent */
  57
  58struct cfcnfg *get_cfcnfg(struct net *net)
  59{
  60        struct caif_net *caifn;
  61        caifn = net_generic(net, caif_net_id);
  62        return caifn->cfg;
  63}
  64EXPORT_SYMBOL(get_cfcnfg);
  65
  66static struct caif_device_entry_list *caif_device_list(struct net *net)
  67{
  68        struct caif_net *caifn;
  69        caifn = net_generic(net, caif_net_id);
  70        return &caifn->caifdevs;
  71}
  72
  73static void caifd_put(struct caif_device_entry *e)
  74{
  75        this_cpu_dec(*e->pcpu_refcnt);
  76}
  77
  78static void caifd_hold(struct caif_device_entry *e)
  79{
  80        this_cpu_inc(*e->pcpu_refcnt);
  81}
  82
  83static int caifd_refcnt_read(struct caif_device_entry *e)
  84{
  85        int i, refcnt = 0;
  86        for_each_possible_cpu(i)
  87                refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
  88        return refcnt;
  89}
  90
  91/* Allocate new CAIF device. */
  92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
  93{
  94        struct caif_device_entry_list *caifdevs;
  95        struct caif_device_entry *caifd;
  96
  97        caifdevs = caif_device_list(dev_net(dev));
  98
  99        caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 100        if (!caifd)
 101                return NULL;
 102        caifd->pcpu_refcnt = alloc_percpu(int);
 103        if (!caifd->pcpu_refcnt) {
 104                kfree(caifd);
 105                return NULL;
 106        }
 107        caifd->netdev = dev;
 108        dev_hold(dev);
 109        return caifd;
 110}
 111
 112static struct caif_device_entry *caif_get(struct net_device *dev)
 113{
 114        struct caif_device_entry_list *caifdevs =
 115            caif_device_list(dev_net(dev));
 116        struct caif_device_entry *caifd;
 117
 118        list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
 119                if (caifd->netdev == dev)
 120                        return caifd;
 121        }
 122        return NULL;
 123}
 124
 125void caif_flow_cb(struct sk_buff *skb)
 126{
 127        struct caif_device_entry *caifd;
 128        void (*dtor)(struct sk_buff *skb) = NULL;
 129        bool send_xoff;
 130
 131        WARN_ON(skb->dev == NULL);
 132
 133        rcu_read_lock();
 134        caifd = caif_get(skb->dev);
 135        caifd_hold(caifd);
 136        rcu_read_unlock();
 137
 138        spin_lock_bh(&caifd->flow_lock);
 139        send_xoff = caifd->xoff;
 140        caifd->xoff = 0;
 141        dtor = caifd->xoff_skb_dtor;
 142
 143        if (WARN_ON(caifd->xoff_skb != skb))
 144                skb = NULL;
 145
 146        caifd->xoff_skb = NULL;
 147        caifd->xoff_skb_dtor = NULL;
 148
 149        spin_unlock_bh(&caifd->flow_lock);
 150
 151        if (dtor && skb)
 152                dtor(skb);
 153
 154        if (send_xoff)
 155                caifd->layer.up->
 156                        ctrlcmd(caifd->layer.up,
 157                                _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
 158                                caifd->layer.id);
 159        caifd_put(caifd);
 160}
 161
 162static int transmit(struct cflayer *layer, struct cfpkt *pkt)
 163{
 164        int err, high = 0, qlen = 0;
 165        struct caif_dev_common *caifdev;
 166        struct caif_device_entry *caifd =
 167            container_of(layer, struct caif_device_entry, layer);
 168        struct sk_buff *skb;
 169        struct netdev_queue *txq;
 170
 171        rcu_read_lock_bh();
 172
 173        skb = cfpkt_tonative(pkt);
 174        skb->dev = caifd->netdev;
 175        skb_reset_network_header(skb);
 176        skb->protocol = htons(ETH_P_CAIF);
 177        caifdev = netdev_priv(caifd->netdev);
 178
 179        /* Check if we need to handle xoff */
 180        if (likely(caifd->netdev->tx_queue_len == 0))
 181                goto noxoff;
 182
 183        if (unlikely(caifd->xoff))
 184                goto noxoff;
 185
 186        if (likely(!netif_queue_stopped(caifd->netdev))) {
 187                /* If we run with a TX queue, check if the queue is too long*/
 188                txq = netdev_get_tx_queue(skb->dev, 0);
 189                qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
 190
 191                if (likely(qlen == 0))
 192                        goto noxoff;
 193
 194                high = (caifd->netdev->tx_queue_len * q_high) / 100;
 195                if (likely(qlen < high))
 196                        goto noxoff;
 197        }
 198
 199        /* Hold lock while accessing xoff */
 200        spin_lock_bh(&caifd->flow_lock);
 201        if (caifd->xoff) {
 202                spin_unlock_bh(&caifd->flow_lock);
 203                goto noxoff;
 204        }
 205
 206        /*
 207         * Handle flow off, we do this by temporary hi-jacking this
 208         * skb's destructor function, and replace it with our own
 209         * flow-on callback. The callback will set flow-on and call
 210         * the original destructor.
 211         */
 212
 213        pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
 214                        netif_queue_stopped(caifd->netdev),
 215                        qlen, high);
 216        caifd->xoff = 1;
 217        caifd->xoff_skb = skb;
 218        caifd->xoff_skb_dtor = skb->destructor;
 219        skb->destructor = caif_flow_cb;
 220        spin_unlock_bh(&caifd->flow_lock);
 221
 222        caifd->layer.up->ctrlcmd(caifd->layer.up,
 223                                        _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
 224                                        caifd->layer.id);
 225noxoff:
 226        rcu_read_unlock_bh();
 227
 228        err = dev_queue_xmit(skb);
 229        if (err > 0)
 230                err = -EIO;
 231
 232        return err;
 233}
 234
 235/*
 236 * Stuff received packets into the CAIF stack.
 237 * On error, returns non-zero and releases the skb.
 238 */
 239static int receive(struct sk_buff *skb, struct net_device *dev,
 240                   struct packet_type *pkttype, struct net_device *orig_dev)
 241{
 242        struct cfpkt *pkt;
 243        struct caif_device_entry *caifd;
 244        int err;
 245
 246        pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
 247
 248        rcu_read_lock();
 249        caifd = caif_get(dev);
 250
 251        if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
 252                        !netif_oper_up(caifd->netdev)) {
 253                rcu_read_unlock();
 254                kfree_skb(skb);
 255                return NET_RX_DROP;
 256        }
 257
 258        /* Hold reference to netdevice while using CAIF stack */
 259        caifd_hold(caifd);
 260        rcu_read_unlock();
 261
 262        err = caifd->layer.up->receive(caifd->layer.up, pkt);
 263
 264        /* For -EILSEQ the packet is not freed so so it now */
 265        if (err == -EILSEQ)
 266                cfpkt_destroy(pkt);
 267
 268        /* Release reference to stack upwards */
 269        caifd_put(caifd);
 270
 271        if (err != 0)
 272                err = NET_RX_DROP;
 273        return err;
 274}
 275
 276static struct packet_type caif_packet_type __read_mostly = {
 277        .type = cpu_to_be16(ETH_P_CAIF),
 278        .func = receive,
 279};
 280
 281static void dev_flowctrl(struct net_device *dev, int on)
 282{
 283        struct caif_device_entry *caifd;
 284
 285        rcu_read_lock();
 286
 287        caifd = caif_get(dev);
 288        if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
 289                rcu_read_unlock();
 290                return;
 291        }
 292
 293        caifd_hold(caifd);
 294        rcu_read_unlock();
 295
 296        caifd->layer.up->ctrlcmd(caifd->layer.up,
 297                                 on ?
 298                                 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
 299                                 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
 300                                 caifd->layer.id);
 301        caifd_put(caifd);
 302}
 303
 304void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
 305                        struct cflayer *link_support, int head_room,
 306                        struct cflayer **layer, int (**rcv_func)(
 307                                struct sk_buff *, struct net_device *,
 308                                struct packet_type *, struct net_device *))
 309{
 310        struct caif_device_entry *caifd;
 311        enum cfcnfg_phy_preference pref;
 312        struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
 313        struct caif_device_entry_list *caifdevs;
 314
 315        caifdevs = caif_device_list(dev_net(dev));
 316        caifd = caif_device_alloc(dev);
 317        if (!caifd)
 318                return;
 319        *layer = &caifd->layer;
 320        spin_lock_init(&caifd->flow_lock);
 321
 322        switch (caifdev->link_select) {
 323        case CAIF_LINK_HIGH_BANDW:
 324                pref = CFPHYPREF_HIGH_BW;
 325                break;
 326        case CAIF_LINK_LOW_LATENCY:
 327                pref = CFPHYPREF_LOW_LAT;
 328                break;
 329        default:
 330                pref = CFPHYPREF_HIGH_BW;
 331                break;
 332        }
 333        mutex_lock(&caifdevs->lock);
 334        list_add_rcu(&caifd->list, &caifdevs->list);
 335
 336        strncpy(caifd->layer.name, dev->name,
 337                sizeof(caifd->layer.name) - 1);
 338        caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
 339        caifd->layer.transmit = transmit;
 340        cfcnfg_add_phy_layer(cfg,
 341                                dev,
 342                                &caifd->layer,
 343                                pref,
 344                                link_support,
 345                                caifdev->use_fcs,
 346                                head_room);
 347        mutex_unlock(&caifdevs->lock);
 348        if (rcv_func)
 349                *rcv_func = receive;
 350}
 351EXPORT_SYMBOL(caif_enroll_dev);
 352
 353/* notify Caif of device events */
 354static int caif_device_notify(struct notifier_block *me, unsigned long what,
 355                              void *arg)
 356{
 357        struct net_device *dev = arg;
 358        struct caif_device_entry *caifd = NULL;
 359        struct caif_dev_common *caifdev;
 360        struct cfcnfg *cfg;
 361        struct cflayer *layer, *link_support;
 362        int head_room = 0;
 363        struct caif_device_entry_list *caifdevs;
 364
 365        cfg = get_cfcnfg(dev_net(dev));
 366        caifdevs = caif_device_list(dev_net(dev));
 367
 368        caifd = caif_get(dev);
 369        if (caifd == NULL && dev->type != ARPHRD_CAIF)
 370                return 0;
 371
 372        switch (what) {
 373        case NETDEV_REGISTER:
 374                if (caifd != NULL)
 375                        break;
 376
 377                caifdev = netdev_priv(dev);
 378
 379                link_support = NULL;
 380                if (caifdev->use_frag) {
 381                        head_room = 1;
 382                        link_support = cfserl_create(dev->ifindex,
 383                                                        caifdev->use_stx);
 384                        if (!link_support) {
 385                                pr_warn("Out of memory\n");
 386                                break;
 387                        }
 388                }
 389                caif_enroll_dev(dev, caifdev, link_support, head_room,
 390                                &layer, NULL);
 391                caifdev->flowctrl = dev_flowctrl;
 392                break;
 393
 394        case NETDEV_UP:
 395                rcu_read_lock();
 396
 397                caifd = caif_get(dev);
 398                if (caifd == NULL) {
 399                        rcu_read_unlock();
 400                        break;
 401                }
 402
 403                caifd->xoff = 0;
 404                cfcnfg_set_phy_state(cfg, &caifd->layer, true);
 405                rcu_read_unlock();
 406
 407                break;
 408
 409        case NETDEV_DOWN:
 410                rcu_read_lock();
 411
 412                caifd = caif_get(dev);
 413                if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
 414                        rcu_read_unlock();
 415                        return -EINVAL;
 416                }
 417
 418                cfcnfg_set_phy_state(cfg, &caifd->layer, false);
 419                caifd_hold(caifd);
 420                rcu_read_unlock();
 421
 422                caifd->layer.up->ctrlcmd(caifd->layer.up,
 423                                         _CAIF_CTRLCMD_PHYIF_DOWN_IND,
 424                                         caifd->layer.id);
 425
 426                spin_lock_bh(&caifd->flow_lock);
 427
 428                /*
 429                 * Replace our xoff-destructor with original destructor.
 430                 * We trust that skb->destructor *always* is called before
 431                 * the skb reference is invalid. The hijacked SKB destructor
 432                 * takes the flow_lock so manipulating the skb->destructor here
 433                 * should be safe.
 434                */
 435                if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
 436                        caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
 437
 438                caifd->xoff = 0;
 439                caifd->xoff_skb_dtor = NULL;
 440                caifd->xoff_skb = NULL;
 441
 442                spin_unlock_bh(&caifd->flow_lock);
 443                caifd_put(caifd);
 444                break;
 445
 446        case NETDEV_UNREGISTER:
 447                mutex_lock(&caifdevs->lock);
 448
 449                caifd = caif_get(dev);
 450                if (caifd == NULL) {
 451                        mutex_unlock(&caifdevs->lock);
 452                        break;
 453                }
 454                list_del_rcu(&caifd->list);
 455
 456                /*
 457                 * NETDEV_UNREGISTER is called repeatedly until all reference
 458                 * counts for the net-device are released. If references to
 459                 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
 460                 * the next call to NETDEV_UNREGISTER.
 461                 *
 462                 * If any packets are in flight down the CAIF Stack,
 463                 * cfcnfg_del_phy_layer will return nonzero.
 464                 * If no packets are in flight, the CAIF Stack associated
 465                 * with the net-device un-registering is freed.
 466                 */
 467
 468                if (caifd_refcnt_read(caifd) != 0 ||
 469                        cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
 470
 471                        pr_info("Wait for device inuse\n");
 472                        /* Enrole device if CAIF Stack is still in use */
 473                        list_add_rcu(&caifd->list, &caifdevs->list);
 474                        mutex_unlock(&caifdevs->lock);
 475                        break;
 476                }
 477
 478                synchronize_rcu();
 479                dev_put(caifd->netdev);
 480                free_percpu(caifd->pcpu_refcnt);
 481                kfree(caifd);
 482
 483                mutex_unlock(&caifdevs->lock);
 484                break;
 485        }
 486        return 0;
 487}
 488
 489static struct notifier_block caif_device_notifier = {
 490        .notifier_call = caif_device_notify,
 491        .priority = 0,
 492};
 493
 494/* Per-namespace Caif devices handling */
 495static int caif_init_net(struct net *net)
 496{
 497        struct caif_net *caifn = net_generic(net, caif_net_id);
 498        INIT_LIST_HEAD(&caifn->caifdevs.list);
 499        mutex_init(&caifn->caifdevs.lock);
 500
 501        caifn->cfg = cfcnfg_create();
 502        if (!caifn->cfg)
 503                return -ENOMEM;
 504
 505        return 0;
 506}
 507
 508static void caif_exit_net(struct net *net)
 509{
 510        struct caif_device_entry *caifd, *tmp;
 511        struct caif_device_entry_list *caifdevs =
 512            caif_device_list(net);
 513        struct cfcnfg *cfg =  get_cfcnfg(net);
 514
 515        rtnl_lock();
 516        mutex_lock(&caifdevs->lock);
 517
 518        list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
 519                int i = 0;
 520                list_del_rcu(&caifd->list);
 521                cfcnfg_set_phy_state(cfg, &caifd->layer, false);
 522
 523                while (i < 10 &&
 524                        (caifd_refcnt_read(caifd) != 0 ||
 525                        cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
 526
 527                        pr_info("Wait for device inuse\n");
 528                        msleep(250);
 529                        i++;
 530                }
 531                synchronize_rcu();
 532                dev_put(caifd->netdev);
 533                free_percpu(caifd->pcpu_refcnt);
 534                kfree(caifd);
 535        }
 536        cfcnfg_remove(cfg);
 537
 538        mutex_unlock(&caifdevs->lock);
 539        rtnl_unlock();
 540}
 541
 542static struct pernet_operations caif_net_ops = {
 543        .init = caif_init_net,
 544        .exit = caif_exit_net,
 545        .id   = &caif_net_id,
 546        .size = sizeof(struct caif_net),
 547};
 548
 549/* Initialize Caif devices list */
 550static int __init caif_device_init(void)
 551{
 552        int result;
 553
 554        result = register_pernet_subsys(&caif_net_ops);
 555
 556        if (result)
 557                return result;
 558
 559        register_netdevice_notifier(&caif_device_notifier);
 560        dev_add_pack(&caif_packet_type);
 561
 562        return result;
 563}
 564
 565static void __exit caif_device_exit(void)
 566{
 567        unregister_pernet_subsys(&caif_net_ops);
 568        unregister_netdevice_notifier(&caif_device_notifier);
 569        dev_remove_pack(&caif_packet_type);
 570}
 571
 572module_init(caif_device_init);
 573module_exit(caif_device_exit);
 574