linux/net/core/net-sysfs.c
<<
>>
Prefs
   1/*
   2 * net-sysfs.c - network device class and attributes
   3 *
   4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/capability.h>
  13#include <linux/kernel.h>
  14#include <linux/netdevice.h>
  15#include <linux/if_arp.h>
  16#include <linux/slab.h>
  17#include <linux/nsproxy.h>
  18#include <net/sock.h>
  19#include <net/net_namespace.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/wireless.h>
  22#include <linux/vmalloc.h>
  23#include <linux/export.h>
  24#include <linux/jiffies.h>
  25#include <net/wext.h>
  26
  27#include "net-sysfs.h"
  28
  29#ifdef CONFIG_SYSFS
  30static const char fmt_hex[] = "%#x\n";
  31static const char fmt_long_hex[] = "%#lx\n";
  32static const char fmt_dec[] = "%d\n";
  33static const char fmt_udec[] = "%u\n";
  34static const char fmt_ulong[] = "%lu\n";
  35static const char fmt_u64[] = "%llu\n";
  36
  37static inline int dev_isalive(const struct net_device *dev)
  38{
  39        return dev->reg_state <= NETREG_REGISTERED;
  40}
  41
  42/* use same locking rules as GIF* ioctl's */
  43static ssize_t netdev_show(const struct device *dev,
  44                           struct device_attribute *attr, char *buf,
  45                           ssize_t (*format)(const struct net_device *, char *))
  46{
  47        struct net_device *net = to_net_dev(dev);
  48        ssize_t ret = -EINVAL;
  49
  50        read_lock(&dev_base_lock);
  51        if (dev_isalive(net))
  52                ret = (*format)(net, buf);
  53        read_unlock(&dev_base_lock);
  54
  55        return ret;
  56}
  57
  58/* generate a show function for simple field */
  59#define NETDEVICE_SHOW(field, format_string)                            \
  60static ssize_t format_##field(const struct net_device *net, char *buf)  \
  61{                                                                       \
  62        return sprintf(buf, format_string, net->field);                 \
  63}                                                                       \
  64static ssize_t show_##field(struct device *dev,                         \
  65                            struct device_attribute *attr, char *buf)   \
  66{                                                                       \
  67        return netdev_show(dev, attr, buf, format_##field);             \
  68}
  69
  70
  71/* use same locking and permission rules as SIF* ioctl's */
  72static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
  73                            const char *buf, size_t len,
  74                            int (*set)(struct net_device *, unsigned long))
  75{
  76        struct net_device *net = to_net_dev(dev);
  77        unsigned long new;
  78        int ret = -EINVAL;
  79
  80        if (!capable(CAP_NET_ADMIN))
  81                return -EPERM;
  82
  83        ret = kstrtoul(buf, 0, &new);
  84        if (ret)
  85                goto err;
  86
  87        if (!rtnl_trylock())
  88                return restart_syscall();
  89
  90        if (dev_isalive(net)) {
  91                if ((ret = (*set)(net, new)) == 0)
  92                        ret = len;
  93        }
  94        rtnl_unlock();
  95 err:
  96        return ret;
  97}
  98
  99NETDEVICE_SHOW(dev_id, fmt_hex);
 100NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 101NETDEVICE_SHOW(addr_len, fmt_dec);
 102NETDEVICE_SHOW(iflink, fmt_dec);
 103NETDEVICE_SHOW(ifindex, fmt_dec);
 104NETDEVICE_SHOW(type, fmt_dec);
 105NETDEVICE_SHOW(link_mode, fmt_dec);
 106
 107/* use same locking rules as GIFHWADDR ioctl's */
 108static ssize_t show_address(struct device *dev, struct device_attribute *attr,
 109                            char *buf)
 110{
 111        struct net_device *net = to_net_dev(dev);
 112        ssize_t ret = -EINVAL;
 113
 114        read_lock(&dev_base_lock);
 115        if (dev_isalive(net))
 116                ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
 117        read_unlock(&dev_base_lock);
 118        return ret;
 119}
 120
 121static ssize_t show_broadcast(struct device *dev,
 122                            struct device_attribute *attr, char *buf)
 123{
 124        struct net_device *net = to_net_dev(dev);
 125        if (dev_isalive(net))
 126                return sysfs_format_mac(buf, net->broadcast, net->addr_len);
 127        return -EINVAL;
 128}
 129
 130static ssize_t show_carrier(struct device *dev,
 131                            struct device_attribute *attr, char *buf)
 132{
 133        struct net_device *netdev = to_net_dev(dev);
 134        if (netif_running(netdev)) {
 135                return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
 136        }
 137        return -EINVAL;
 138}
 139
 140static ssize_t show_speed(struct device *dev,
 141                          struct device_attribute *attr, char *buf)
 142{
 143        struct net_device *netdev = to_net_dev(dev);
 144        int ret = -EINVAL;
 145
 146        if (!rtnl_trylock())
 147                return restart_syscall();
 148
 149        if (netif_running(netdev)) {
 150                struct ethtool_cmd cmd;
 151                if (!__ethtool_get_settings(netdev, &cmd))
 152                        ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
 153        }
 154        rtnl_unlock();
 155        return ret;
 156}
 157
 158static ssize_t show_duplex(struct device *dev,
 159                           struct device_attribute *attr, char *buf)
 160{
 161        struct net_device *netdev = to_net_dev(dev);
 162        int ret = -EINVAL;
 163
 164        if (!rtnl_trylock())
 165                return restart_syscall();
 166
 167        if (netif_running(netdev)) {
 168                struct ethtool_cmd cmd;
 169                if (!__ethtool_get_settings(netdev, &cmd)) {
 170                        const char *duplex;
 171                        switch (cmd.duplex) {
 172                        case DUPLEX_HALF:
 173                                duplex = "half";
 174                                break;
 175                        case DUPLEX_FULL:
 176                                duplex = "full";
 177                                break;
 178                        default:
 179                                duplex = "unknown";
 180                                break;
 181                        }
 182                        ret = sprintf(buf, "%s\n", duplex);
 183                }
 184        }
 185        rtnl_unlock();
 186        return ret;
 187}
 188
 189static ssize_t show_dormant(struct device *dev,
 190                            struct device_attribute *attr, char *buf)
 191{
 192        struct net_device *netdev = to_net_dev(dev);
 193
 194        if (netif_running(netdev))
 195                return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
 196
 197        return -EINVAL;
 198}
 199
 200static const char *const operstates[] = {
 201        "unknown",
 202        "notpresent", /* currently unused */
 203        "down",
 204        "lowerlayerdown",
 205        "testing", /* currently unused */
 206        "dormant",
 207        "up"
 208};
 209
 210static ssize_t show_operstate(struct device *dev,
 211                              struct device_attribute *attr, char *buf)
 212{
 213        const struct net_device *netdev = to_net_dev(dev);
 214        unsigned char operstate;
 215
 216        read_lock(&dev_base_lock);
 217        operstate = netdev->operstate;
 218        if (!netif_running(netdev))
 219                operstate = IF_OPER_DOWN;
 220        read_unlock(&dev_base_lock);
 221
 222        if (operstate >= ARRAY_SIZE(operstates))
 223                return -EINVAL; /* should not happen */
 224
 225        return sprintf(buf, "%s\n", operstates[operstate]);
 226}
 227
 228/* read-write attributes */
 229NETDEVICE_SHOW(mtu, fmt_dec);
 230
 231static int change_mtu(struct net_device *net, unsigned long new_mtu)
 232{
 233        return dev_set_mtu(net, (int) new_mtu);
 234}
 235
 236static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
 237                         const char *buf, size_t len)
 238{
 239        return netdev_store(dev, attr, buf, len, change_mtu);
 240}
 241
 242NETDEVICE_SHOW(flags, fmt_hex);
 243
 244static int change_flags(struct net_device *net, unsigned long new_flags)
 245{
 246        return dev_change_flags(net, (unsigned int) new_flags);
 247}
 248
 249static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
 250                           const char *buf, size_t len)
 251{
 252        return netdev_store(dev, attr, buf, len, change_flags);
 253}
 254
 255NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
 256
 257static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
 258{
 259        net->tx_queue_len = new_len;
 260        return 0;
 261}
 262
 263static ssize_t store_tx_queue_len(struct device *dev,
 264                                  struct device_attribute *attr,
 265                                  const char *buf, size_t len)
 266{
 267        return netdev_store(dev, attr, buf, len, change_tx_queue_len);
 268}
 269
 270static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
 271                             const char *buf, size_t len)
 272{
 273        struct net_device *netdev = to_net_dev(dev);
 274        size_t count = len;
 275        ssize_t ret;
 276
 277        if (!capable(CAP_NET_ADMIN))
 278                return -EPERM;
 279
 280        /* ignore trailing newline */
 281        if (len >  0 && buf[len - 1] == '\n')
 282                --count;
 283
 284        if (!rtnl_trylock())
 285                return restart_syscall();
 286        ret = dev_set_alias(netdev, buf, count);
 287        rtnl_unlock();
 288
 289        return ret < 0 ? ret : len;
 290}
 291
 292static ssize_t show_ifalias(struct device *dev,
 293                            struct device_attribute *attr, char *buf)
 294{
 295        const struct net_device *netdev = to_net_dev(dev);
 296        ssize_t ret = 0;
 297
 298        if (!rtnl_trylock())
 299                return restart_syscall();
 300        if (netdev->ifalias)
 301                ret = sprintf(buf, "%s\n", netdev->ifalias);
 302        rtnl_unlock();
 303        return ret;
 304}
 305
 306NETDEVICE_SHOW(group, fmt_dec);
 307
 308static int change_group(struct net_device *net, unsigned long new_group)
 309{
 310        dev_set_group(net, (int) new_group);
 311        return 0;
 312}
 313
 314static ssize_t store_group(struct device *dev, struct device_attribute *attr,
 315                         const char *buf, size_t len)
 316{
 317        return netdev_store(dev, attr, buf, len, change_group);
 318}
 319
 320static struct device_attribute net_class_attributes[] = {
 321        __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
 322        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
 323        __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
 324        __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
 325        __ATTR(iflink, S_IRUGO, show_iflink, NULL),
 326        __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
 327        __ATTR(type, S_IRUGO, show_type, NULL),
 328        __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
 329        __ATTR(address, S_IRUGO, show_address, NULL),
 330        __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
 331        __ATTR(carrier, S_IRUGO, show_carrier, NULL),
 332        __ATTR(speed, S_IRUGO, show_speed, NULL),
 333        __ATTR(duplex, S_IRUGO, show_duplex, NULL),
 334        __ATTR(dormant, S_IRUGO, show_dormant, NULL),
 335        __ATTR(operstate, S_IRUGO, show_operstate, NULL),
 336        __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
 337        __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
 338        __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
 339               store_tx_queue_len),
 340        __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
 341        {}
 342};
 343
 344/* Show a given an attribute in the statistics group */
 345static ssize_t netstat_show(const struct device *d,
 346                            struct device_attribute *attr, char *buf,
 347                            unsigned long offset)
 348{
 349        struct net_device *dev = to_net_dev(d);
 350        ssize_t ret = -EINVAL;
 351
 352        WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
 353                        offset % sizeof(u64) != 0);
 354
 355        read_lock(&dev_base_lock);
 356        if (dev_isalive(dev)) {
 357                struct rtnl_link_stats64 temp;
 358                const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 359
 360                ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
 361        }
 362        read_unlock(&dev_base_lock);
 363        return ret;
 364}
 365
 366/* generate a read-only statistics attribute */
 367#define NETSTAT_ENTRY(name)                                             \
 368static ssize_t show_##name(struct device *d,                            \
 369                           struct device_attribute *attr, char *buf)    \
 370{                                                                       \
 371        return netstat_show(d, attr, buf,                               \
 372                            offsetof(struct rtnl_link_stats64, name));  \
 373}                                                                       \
 374static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 375
 376NETSTAT_ENTRY(rx_packets);
 377NETSTAT_ENTRY(tx_packets);
 378NETSTAT_ENTRY(rx_bytes);
 379NETSTAT_ENTRY(tx_bytes);
 380NETSTAT_ENTRY(rx_errors);
 381NETSTAT_ENTRY(tx_errors);
 382NETSTAT_ENTRY(rx_dropped);
 383NETSTAT_ENTRY(tx_dropped);
 384NETSTAT_ENTRY(multicast);
 385NETSTAT_ENTRY(collisions);
 386NETSTAT_ENTRY(rx_length_errors);
 387NETSTAT_ENTRY(rx_over_errors);
 388NETSTAT_ENTRY(rx_crc_errors);
 389NETSTAT_ENTRY(rx_frame_errors);
 390NETSTAT_ENTRY(rx_fifo_errors);
 391NETSTAT_ENTRY(rx_missed_errors);
 392NETSTAT_ENTRY(tx_aborted_errors);
 393NETSTAT_ENTRY(tx_carrier_errors);
 394NETSTAT_ENTRY(tx_fifo_errors);
 395NETSTAT_ENTRY(tx_heartbeat_errors);
 396NETSTAT_ENTRY(tx_window_errors);
 397NETSTAT_ENTRY(rx_compressed);
 398NETSTAT_ENTRY(tx_compressed);
 399
 400static struct attribute *netstat_attrs[] = {
 401        &dev_attr_rx_packets.attr,
 402        &dev_attr_tx_packets.attr,
 403        &dev_attr_rx_bytes.attr,
 404        &dev_attr_tx_bytes.attr,
 405        &dev_attr_rx_errors.attr,
 406        &dev_attr_tx_errors.attr,
 407        &dev_attr_rx_dropped.attr,
 408        &dev_attr_tx_dropped.attr,
 409        &dev_attr_multicast.attr,
 410        &dev_attr_collisions.attr,
 411        &dev_attr_rx_length_errors.attr,
 412        &dev_attr_rx_over_errors.attr,
 413        &dev_attr_rx_crc_errors.attr,
 414        &dev_attr_rx_frame_errors.attr,
 415        &dev_attr_rx_fifo_errors.attr,
 416        &dev_attr_rx_missed_errors.attr,
 417        &dev_attr_tx_aborted_errors.attr,
 418        &dev_attr_tx_carrier_errors.attr,
 419        &dev_attr_tx_fifo_errors.attr,
 420        &dev_attr_tx_heartbeat_errors.attr,
 421        &dev_attr_tx_window_errors.attr,
 422        &dev_attr_rx_compressed.attr,
 423        &dev_attr_tx_compressed.attr,
 424        NULL
 425};
 426
 427
 428static struct attribute_group netstat_group = {
 429        .name  = "statistics",
 430        .attrs  = netstat_attrs,
 431};
 432
 433#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
 434static struct attribute *wireless_attrs[] = {
 435        NULL
 436};
 437
 438static struct attribute_group wireless_group = {
 439        .name = "wireless",
 440        .attrs = wireless_attrs,
 441};
 442#endif
 443#endif /* CONFIG_SYSFS */
 444
 445#ifdef CONFIG_RPS
 446/*
 447 * RX queue sysfs structures and functions.
 448 */
 449struct rx_queue_attribute {
 450        struct attribute attr;
 451        ssize_t (*show)(struct netdev_rx_queue *queue,
 452            struct rx_queue_attribute *attr, char *buf);
 453        ssize_t (*store)(struct netdev_rx_queue *queue,
 454            struct rx_queue_attribute *attr, const char *buf, size_t len);
 455};
 456#define to_rx_queue_attr(_attr) container_of(_attr,             \
 457    struct rx_queue_attribute, attr)
 458
 459#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
 460
 461static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
 462                                  char *buf)
 463{
 464        struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
 465        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 466
 467        if (!attribute->show)
 468                return -EIO;
 469
 470        return attribute->show(queue, attribute, buf);
 471}
 472
 473static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
 474                                   const char *buf, size_t count)
 475{
 476        struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
 477        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 478
 479        if (!attribute->store)
 480                return -EIO;
 481
 482        return attribute->store(queue, attribute, buf, count);
 483}
 484
 485static const struct sysfs_ops rx_queue_sysfs_ops = {
 486        .show = rx_queue_attr_show,
 487        .store = rx_queue_attr_store,
 488};
 489
 490static ssize_t show_rps_map(struct netdev_rx_queue *queue,
 491                            struct rx_queue_attribute *attribute, char *buf)
 492{
 493        struct rps_map *map;
 494        cpumask_var_t mask;
 495        size_t len = 0;
 496        int i;
 497
 498        if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 499                return -ENOMEM;
 500
 501        rcu_read_lock();
 502        map = rcu_dereference(queue->rps_map);
 503        if (map)
 504                for (i = 0; i < map->len; i++)
 505                        cpumask_set_cpu(map->cpus[i], mask);
 506
 507        len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
 508        if (PAGE_SIZE - len < 3) {
 509                rcu_read_unlock();
 510                free_cpumask_var(mask);
 511                return -EINVAL;
 512        }
 513        rcu_read_unlock();
 514
 515        free_cpumask_var(mask);
 516        len += sprintf(buf + len, "\n");
 517        return len;
 518}
 519
 520static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 521                      struct rx_queue_attribute *attribute,
 522                      const char *buf, size_t len)
 523{
 524        struct rps_map *old_map, *map;
 525        cpumask_var_t mask;
 526        int err, cpu, i;
 527        static DEFINE_SPINLOCK(rps_map_lock);
 528
 529        if (!capable(CAP_NET_ADMIN))
 530                return -EPERM;
 531
 532        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 533                return -ENOMEM;
 534
 535        err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
 536        if (err) {
 537                free_cpumask_var(mask);
 538                return err;
 539        }
 540
 541        map = kzalloc(max_t(unsigned int,
 542            RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
 543            GFP_KERNEL);
 544        if (!map) {
 545                free_cpumask_var(mask);
 546                return -ENOMEM;
 547        }
 548
 549        i = 0;
 550        for_each_cpu_and(cpu, mask, cpu_online_mask)
 551                map->cpus[i++] = cpu;
 552
 553        if (i)
 554                map->len = i;
 555        else {
 556                kfree(map);
 557                map = NULL;
 558        }
 559
 560        spin_lock(&rps_map_lock);
 561        old_map = rcu_dereference_protected(queue->rps_map,
 562                                            lockdep_is_held(&rps_map_lock));
 563        rcu_assign_pointer(queue->rps_map, map);
 564        spin_unlock(&rps_map_lock);
 565
 566        if (map)
 567                static_key_slow_inc(&rps_needed);
 568        if (old_map) {
 569                kfree_rcu(old_map, rcu);
 570                static_key_slow_dec(&rps_needed);
 571        }
 572        free_cpumask_var(mask);
 573        return len;
 574}
 575
 576static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
 577                                           struct rx_queue_attribute *attr,
 578                                           char *buf)
 579{
 580        struct rps_dev_flow_table *flow_table;
 581        unsigned long val = 0;
 582
 583        rcu_read_lock();
 584        flow_table = rcu_dereference(queue->rps_flow_table);
 585        if (flow_table)
 586                val = (unsigned long)flow_table->mask + 1;
 587        rcu_read_unlock();
 588
 589        return sprintf(buf, "%lu\n", val);
 590}
 591
 592static void rps_dev_flow_table_release_work(struct work_struct *work)
 593{
 594        struct rps_dev_flow_table *table = container_of(work,
 595            struct rps_dev_flow_table, free_work);
 596
 597        vfree(table);
 598}
 599
 600static void rps_dev_flow_table_release(struct rcu_head *rcu)
 601{
 602        struct rps_dev_flow_table *table = container_of(rcu,
 603            struct rps_dev_flow_table, rcu);
 604
 605        INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
 606        schedule_work(&table->free_work);
 607}
 608
 609static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
 610                                     struct rx_queue_attribute *attr,
 611                                     const char *buf, size_t len)
 612{
 613        unsigned long mask, count;
 614        struct rps_dev_flow_table *table, *old_table;
 615        static DEFINE_SPINLOCK(rps_dev_flow_lock);
 616        int rc;
 617
 618        if (!capable(CAP_NET_ADMIN))
 619                return -EPERM;
 620
 621        rc = kstrtoul(buf, 0, &count);
 622        if (rc < 0)
 623                return rc;
 624
 625        if (count) {
 626                mask = count - 1;
 627                /* mask = roundup_pow_of_two(count) - 1;
 628                 * without overflows...
 629                 */
 630                while ((mask | (mask >> 1)) != mask)
 631                        mask |= (mask >> 1);
 632                /* On 64 bit arches, must check mask fits in table->mask (u32),
 633                 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
 634                 * doesnt overflow.
 635                 */
 636#if BITS_PER_LONG > 32
 637                if (mask > (unsigned long)(u32)mask)
 638                        return -EINVAL;
 639#else
 640                if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
 641                                / sizeof(struct rps_dev_flow)) {
 642                        /* Enforce a limit to prevent overflow */
 643                        return -EINVAL;
 644                }
 645#endif
 646                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
 647                if (!table)
 648                        return -ENOMEM;
 649
 650                table->mask = mask;
 651                for (count = 0; count <= mask; count++)
 652                        table->flows[count].cpu = RPS_NO_CPU;
 653        } else
 654                table = NULL;
 655
 656        spin_lock(&rps_dev_flow_lock);
 657        old_table = rcu_dereference_protected(queue->rps_flow_table,
 658                                              lockdep_is_held(&rps_dev_flow_lock));
 659        rcu_assign_pointer(queue->rps_flow_table, table);
 660        spin_unlock(&rps_dev_flow_lock);
 661
 662        if (old_table)
 663                call_rcu(&old_table->rcu, rps_dev_flow_table_release);
 664
 665        return len;
 666}
 667
 668static struct rx_queue_attribute rps_cpus_attribute =
 669        __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
 670
 671
 672static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
 673        __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
 674            show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
 675
 676static struct attribute *rx_queue_default_attrs[] = {
 677        &rps_cpus_attribute.attr,
 678        &rps_dev_flow_table_cnt_attribute.attr,
 679        NULL
 680};
 681
 682static void rx_queue_release(struct kobject *kobj)
 683{
 684        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 685        struct rps_map *map;
 686        struct rps_dev_flow_table *flow_table;
 687
 688
 689        map = rcu_dereference_protected(queue->rps_map, 1);
 690        if (map) {
 691                RCU_INIT_POINTER(queue->rps_map, NULL);
 692                kfree_rcu(map, rcu);
 693        }
 694
 695        flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
 696        if (flow_table) {
 697                RCU_INIT_POINTER(queue->rps_flow_table, NULL);
 698                call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
 699        }
 700
 701        memset(kobj, 0, sizeof(*kobj));
 702        dev_put(queue->dev);
 703}
 704
 705static struct kobj_type rx_queue_ktype = {
 706        .sysfs_ops = &rx_queue_sysfs_ops,
 707        .release = rx_queue_release,
 708        .default_attrs = rx_queue_default_attrs,
 709};
 710
 711static int rx_queue_add_kobject(struct net_device *net, int index)
 712{
 713        struct netdev_rx_queue *queue = net->_rx + index;
 714        struct kobject *kobj = &queue->kobj;
 715        int error = 0;
 716
 717        kobj->kset = net->queues_kset;
 718        error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
 719            "rx-%u", index);
 720        if (error) {
 721                kobject_put(kobj);
 722                return error;
 723        }
 724
 725        kobject_uevent(kobj, KOBJ_ADD);
 726        dev_hold(queue->dev);
 727
 728        return error;
 729}
 730#endif /* CONFIG_RPS */
 731
 732int
 733net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 734{
 735#ifdef CONFIG_RPS
 736        int i;
 737        int error = 0;
 738
 739        for (i = old_num; i < new_num; i++) {
 740                error = rx_queue_add_kobject(net, i);
 741                if (error) {
 742                        new_num = old_num;
 743                        break;
 744                }
 745        }
 746
 747        while (--i >= new_num)
 748                kobject_put(&net->_rx[i].kobj);
 749
 750        return error;
 751#else
 752        return 0;
 753#endif
 754}
 755
 756#ifdef CONFIG_SYSFS
 757/*
 758 * netdev_queue sysfs structures and functions.
 759 */
 760struct netdev_queue_attribute {
 761        struct attribute attr;
 762        ssize_t (*show)(struct netdev_queue *queue,
 763            struct netdev_queue_attribute *attr, char *buf);
 764        ssize_t (*store)(struct netdev_queue *queue,
 765            struct netdev_queue_attribute *attr, const char *buf, size_t len);
 766};
 767#define to_netdev_queue_attr(_attr) container_of(_attr,         \
 768    struct netdev_queue_attribute, attr)
 769
 770#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
 771
 772static ssize_t netdev_queue_attr_show(struct kobject *kobj,
 773                                      struct attribute *attr, char *buf)
 774{
 775        struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
 776        struct netdev_queue *queue = to_netdev_queue(kobj);
 777
 778        if (!attribute->show)
 779                return -EIO;
 780
 781        return attribute->show(queue, attribute, buf);
 782}
 783
 784static ssize_t netdev_queue_attr_store(struct kobject *kobj,
 785                                       struct attribute *attr,
 786                                       const char *buf, size_t count)
 787{
 788        struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
 789        struct netdev_queue *queue = to_netdev_queue(kobj);
 790
 791        if (!attribute->store)
 792                return -EIO;
 793
 794        return attribute->store(queue, attribute, buf, count);
 795}
 796
 797static const struct sysfs_ops netdev_queue_sysfs_ops = {
 798        .show = netdev_queue_attr_show,
 799        .store = netdev_queue_attr_store,
 800};
 801
 802static ssize_t show_trans_timeout(struct netdev_queue *queue,
 803                                  struct netdev_queue_attribute *attribute,
 804                                  char *buf)
 805{
 806        unsigned long trans_timeout;
 807
 808        spin_lock_irq(&queue->_xmit_lock);
 809        trans_timeout = queue->trans_timeout;
 810        spin_unlock_irq(&queue->_xmit_lock);
 811
 812        return sprintf(buf, "%lu", trans_timeout);
 813}
 814
 815static struct netdev_queue_attribute queue_trans_timeout =
 816        __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
 817
 818#ifdef CONFIG_BQL
 819/*
 820 * Byte queue limits sysfs structures and functions.
 821 */
 822static ssize_t bql_show(char *buf, unsigned int value)
 823{
 824        return sprintf(buf, "%u\n", value);
 825}
 826
 827static ssize_t bql_set(const char *buf, const size_t count,
 828                       unsigned int *pvalue)
 829{
 830        unsigned int value;
 831        int err;
 832
 833        if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
 834                value = DQL_MAX_LIMIT;
 835        else {
 836                err = kstrtouint(buf, 10, &value);
 837                if (err < 0)
 838                        return err;
 839                if (value > DQL_MAX_LIMIT)
 840                        return -EINVAL;
 841        }
 842
 843        *pvalue = value;
 844
 845        return count;
 846}
 847
 848static ssize_t bql_show_hold_time(struct netdev_queue *queue,
 849                                  struct netdev_queue_attribute *attr,
 850                                  char *buf)
 851{
 852        struct dql *dql = &queue->dql;
 853
 854        return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
 855}
 856
 857static ssize_t bql_set_hold_time(struct netdev_queue *queue,
 858                                 struct netdev_queue_attribute *attribute,
 859                                 const char *buf, size_t len)
 860{
 861        struct dql *dql = &queue->dql;
 862        unsigned int value;
 863        int err;
 864
 865        err = kstrtouint(buf, 10, &value);
 866        if (err < 0)
 867                return err;
 868
 869        dql->slack_hold_time = msecs_to_jiffies(value);
 870
 871        return len;
 872}
 873
 874static struct netdev_queue_attribute bql_hold_time_attribute =
 875        __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
 876            bql_set_hold_time);
 877
 878static ssize_t bql_show_inflight(struct netdev_queue *queue,
 879                                 struct netdev_queue_attribute *attr,
 880                                 char *buf)
 881{
 882        struct dql *dql = &queue->dql;
 883
 884        return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
 885}
 886
 887static struct netdev_queue_attribute bql_inflight_attribute =
 888        __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
 889
 890#define BQL_ATTR(NAME, FIELD)                                           \
 891static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
 892                                 struct netdev_queue_attribute *attr,   \
 893                                 char *buf)                             \
 894{                                                                       \
 895        return bql_show(buf, queue->dql.FIELD);                         \
 896}                                                                       \
 897                                                                        \
 898static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
 899                                struct netdev_queue_attribute *attr,    \
 900                                const char *buf, size_t len)            \
 901{                                                                       \
 902        return bql_set(buf, len, &queue->dql.FIELD);                    \
 903}                                                                       \
 904                                                                        \
 905static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
 906        __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
 907            bql_set_ ## NAME);
 908
 909BQL_ATTR(limit, limit)
 910BQL_ATTR(limit_max, max_limit)
 911BQL_ATTR(limit_min, min_limit)
 912
 913static struct attribute *dql_attrs[] = {
 914        &bql_limit_attribute.attr,
 915        &bql_limit_max_attribute.attr,
 916        &bql_limit_min_attribute.attr,
 917        &bql_hold_time_attribute.attr,
 918        &bql_inflight_attribute.attr,
 919        NULL
 920};
 921
 922static struct attribute_group dql_group = {
 923        .name  = "byte_queue_limits",
 924        .attrs  = dql_attrs,
 925};
 926#endif /* CONFIG_BQL */
 927
 928#ifdef CONFIG_XPS
 929static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 930{
 931        struct net_device *dev = queue->dev;
 932        int i;
 933
 934        for (i = 0; i < dev->num_tx_queues; i++)
 935                if (queue == &dev->_tx[i])
 936                        break;
 937
 938        BUG_ON(i >= dev->num_tx_queues);
 939
 940        return i;
 941}
 942
 943
 944static ssize_t show_xps_map(struct netdev_queue *queue,
 945                            struct netdev_queue_attribute *attribute, char *buf)
 946{
 947        struct net_device *dev = queue->dev;
 948        struct xps_dev_maps *dev_maps;
 949        cpumask_var_t mask;
 950        unsigned long index;
 951        size_t len = 0;
 952        int i;
 953
 954        if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 955                return -ENOMEM;
 956
 957        index = get_netdev_queue_index(queue);
 958
 959        rcu_read_lock();
 960        dev_maps = rcu_dereference(dev->xps_maps);
 961        if (dev_maps) {
 962                for_each_possible_cpu(i) {
 963                        struct xps_map *map =
 964                            rcu_dereference(dev_maps->cpu_map[i]);
 965                        if (map) {
 966                                int j;
 967                                for (j = 0; j < map->len; j++) {
 968                                        if (map->queues[j] == index) {
 969                                                cpumask_set_cpu(i, mask);
 970                                                break;
 971                                        }
 972                                }
 973                        }
 974                }
 975        }
 976        rcu_read_unlock();
 977
 978        len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
 979        if (PAGE_SIZE - len < 3) {
 980                free_cpumask_var(mask);
 981                return -EINVAL;
 982        }
 983
 984        free_cpumask_var(mask);
 985        len += sprintf(buf + len, "\n");
 986        return len;
 987}
 988
 989static DEFINE_MUTEX(xps_map_mutex);
 990#define xmap_dereference(P)             \
 991        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 992
 993static void xps_queue_release(struct netdev_queue *queue)
 994{
 995        struct net_device *dev = queue->dev;
 996        struct xps_dev_maps *dev_maps;
 997        struct xps_map *map;
 998        unsigned long index;
 999        int i, pos, nonempty = 0;
1000
1001        index = get_netdev_queue_index(queue);
1002
1003        mutex_lock(&xps_map_mutex);
1004        dev_maps = xmap_dereference(dev->xps_maps);
1005
1006        if (dev_maps) {
1007                for_each_possible_cpu(i) {
1008                        map = xmap_dereference(dev_maps->cpu_map[i]);
1009                        if (!map)
1010                                continue;
1011
1012                        for (pos = 0; pos < map->len; pos++)
1013                                if (map->queues[pos] == index)
1014                                        break;
1015
1016                        if (pos < map->len) {
1017                                if (map->len > 1)
1018                                        map->queues[pos] =
1019                                            map->queues[--map->len];
1020                                else {
1021                                        RCU_INIT_POINTER(dev_maps->cpu_map[i],
1022                                            NULL);
1023                                        kfree_rcu(map, rcu);
1024                                        map = NULL;
1025                                }
1026                        }
1027                        if (map)
1028                                nonempty = 1;
1029                }
1030
1031                if (!nonempty) {
1032                        RCU_INIT_POINTER(dev->xps_maps, NULL);
1033                        kfree_rcu(dev_maps, rcu);
1034                }
1035        }
1036        mutex_unlock(&xps_map_mutex);
1037}
1038
1039static ssize_t store_xps_map(struct netdev_queue *queue,
1040                      struct netdev_queue_attribute *attribute,
1041                      const char *buf, size_t len)
1042{
1043        struct net_device *dev = queue->dev;
1044        cpumask_var_t mask;
1045        int err, i, cpu, pos, map_len, alloc_len, need_set;
1046        unsigned long index;
1047        struct xps_map *map, *new_map;
1048        struct xps_dev_maps *dev_maps, *new_dev_maps;
1049        int nonempty = 0;
1050        int numa_node_id = -2;
1051
1052        if (!capable(CAP_NET_ADMIN))
1053                return -EPERM;
1054
1055        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1056                return -ENOMEM;
1057
1058        index = get_netdev_queue_index(queue);
1059
1060        err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1061        if (err) {
1062                free_cpumask_var(mask);
1063                return err;
1064        }
1065
1066        new_dev_maps = kzalloc(max_t(unsigned int,
1067            XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1068        if (!new_dev_maps) {
1069                free_cpumask_var(mask);
1070                return -ENOMEM;
1071        }
1072
1073        mutex_lock(&xps_map_mutex);
1074
1075        dev_maps = xmap_dereference(dev->xps_maps);
1076
1077        for_each_possible_cpu(cpu) {
1078                map = dev_maps ?
1079                        xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1080                new_map = map;
1081                if (map) {
1082                        for (pos = 0; pos < map->len; pos++)
1083                                if (map->queues[pos] == index)
1084                                        break;
1085                        map_len = map->len;
1086                        alloc_len = map->alloc_len;
1087                } else
1088                        pos = map_len = alloc_len = 0;
1089
1090                need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1091#ifdef CONFIG_NUMA
1092                if (need_set) {
1093                        if (numa_node_id == -2)
1094                                numa_node_id = cpu_to_node(cpu);
1095                        else if (numa_node_id != cpu_to_node(cpu))
1096                                numa_node_id = -1;
1097                }
1098#endif
1099                if (need_set && pos >= map_len) {
1100                        /* Need to add queue to this CPU's map */
1101                        if (map_len >= alloc_len) {
1102                                alloc_len = alloc_len ?
1103                                    2 * alloc_len : XPS_MIN_MAP_ALLOC;
1104                                new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1105                                                       GFP_KERNEL,
1106                                                       cpu_to_node(cpu));
1107                                if (!new_map)
1108                                        goto error;
1109                                new_map->alloc_len = alloc_len;
1110                                for (i = 0; i < map_len; i++)
1111                                        new_map->queues[i] = map->queues[i];
1112                                new_map->len = map_len;
1113                        }
1114                        new_map->queues[new_map->len++] = index;
1115                } else if (!need_set && pos < map_len) {
1116                        /* Need to remove queue from this CPU's map */
1117                        if (map_len > 1)
1118                                new_map->queues[pos] =
1119                                    new_map->queues[--new_map->len];
1120                        else
1121                                new_map = NULL;
1122                }
1123                RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1124        }
1125
1126        /* Cleanup old maps */
1127        for_each_possible_cpu(cpu) {
1128                map = dev_maps ?
1129                        xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1130                if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1131                        kfree_rcu(map, rcu);
1132                if (new_dev_maps->cpu_map[cpu])
1133                        nonempty = 1;
1134        }
1135
1136        if (nonempty) {
1137                rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1138        } else {
1139                kfree(new_dev_maps);
1140                RCU_INIT_POINTER(dev->xps_maps, NULL);
1141        }
1142
1143        if (dev_maps)
1144                kfree_rcu(dev_maps, rcu);
1145
1146        netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1147                                            NUMA_NO_NODE);
1148
1149        mutex_unlock(&xps_map_mutex);
1150
1151        free_cpumask_var(mask);
1152        return len;
1153
1154error:
1155        mutex_unlock(&xps_map_mutex);
1156
1157        if (new_dev_maps)
1158                for_each_possible_cpu(i)
1159                        kfree(rcu_dereference_protected(
1160                                new_dev_maps->cpu_map[i],
1161                                1));
1162        kfree(new_dev_maps);
1163        free_cpumask_var(mask);
1164        return -ENOMEM;
1165}
1166
1167static struct netdev_queue_attribute xps_cpus_attribute =
1168    __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1169#endif /* CONFIG_XPS */
1170
1171static struct attribute *netdev_queue_default_attrs[] = {
1172        &queue_trans_timeout.attr,
1173#ifdef CONFIG_XPS
1174        &xps_cpus_attribute.attr,
1175#endif
1176        NULL
1177};
1178
1179static void netdev_queue_release(struct kobject *kobj)
1180{
1181        struct netdev_queue *queue = to_netdev_queue(kobj);
1182
1183#ifdef CONFIG_XPS
1184        xps_queue_release(queue);
1185#endif
1186
1187        memset(kobj, 0, sizeof(*kobj));
1188        dev_put(queue->dev);
1189}
1190
1191static struct kobj_type netdev_queue_ktype = {
1192        .sysfs_ops = &netdev_queue_sysfs_ops,
1193        .release = netdev_queue_release,
1194        .default_attrs = netdev_queue_default_attrs,
1195};
1196
1197static int netdev_queue_add_kobject(struct net_device *net, int index)
1198{
1199        struct netdev_queue *queue = net->_tx + index;
1200        struct kobject *kobj = &queue->kobj;
1201        int error = 0;
1202
1203        kobj->kset = net->queues_kset;
1204        error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1205            "tx-%u", index);
1206        if (error)
1207                goto exit;
1208
1209#ifdef CONFIG_BQL
1210        error = sysfs_create_group(kobj, &dql_group);
1211        if (error)
1212                goto exit;
1213#endif
1214
1215        kobject_uevent(kobj, KOBJ_ADD);
1216        dev_hold(queue->dev);
1217
1218        return 0;
1219exit:
1220        kobject_put(kobj);
1221        return error;
1222}
1223#endif /* CONFIG_SYSFS */
1224
1225int
1226netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1227{
1228#ifdef CONFIG_SYSFS
1229        int i;
1230        int error = 0;
1231
1232        for (i = old_num; i < new_num; i++) {
1233                error = netdev_queue_add_kobject(net, i);
1234                if (error) {
1235                        new_num = old_num;
1236                        break;
1237                }
1238        }
1239
1240        while (--i >= new_num) {
1241                struct netdev_queue *queue = net->_tx + i;
1242
1243#ifdef CONFIG_BQL
1244                sysfs_remove_group(&queue->kobj, &dql_group);
1245#endif
1246                kobject_put(&queue->kobj);
1247        }
1248
1249        return error;
1250#else
1251        return 0;
1252#endif /* CONFIG_SYSFS */
1253}
1254
1255static int register_queue_kobjects(struct net_device *net)
1256{
1257        int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1258
1259#ifdef CONFIG_SYSFS
1260        net->queues_kset = kset_create_and_add("queues",
1261            NULL, &net->dev.kobj);
1262        if (!net->queues_kset)
1263                return -ENOMEM;
1264#endif
1265
1266#ifdef CONFIG_RPS
1267        real_rx = net->real_num_rx_queues;
1268#endif
1269        real_tx = net->real_num_tx_queues;
1270
1271        error = net_rx_queue_update_kobjects(net, 0, real_rx);
1272        if (error)
1273                goto error;
1274        rxq = real_rx;
1275
1276        error = netdev_queue_update_kobjects(net, 0, real_tx);
1277        if (error)
1278                goto error;
1279        txq = real_tx;
1280
1281        return 0;
1282
1283error:
1284        netdev_queue_update_kobjects(net, txq, 0);
1285        net_rx_queue_update_kobjects(net, rxq, 0);
1286        return error;
1287}
1288
1289static void remove_queue_kobjects(struct net_device *net)
1290{
1291        int real_rx = 0, real_tx = 0;
1292
1293#ifdef CONFIG_RPS
1294        real_rx = net->real_num_rx_queues;
1295#endif
1296        real_tx = net->real_num_tx_queues;
1297
1298        net_rx_queue_update_kobjects(net, real_rx, 0);
1299        netdev_queue_update_kobjects(net, real_tx, 0);
1300#ifdef CONFIG_SYSFS
1301        kset_unregister(net->queues_kset);
1302#endif
1303}
1304
1305static void *net_grab_current_ns(void)
1306{
1307        struct net *ns = current->nsproxy->net_ns;
1308#ifdef CONFIG_NET_NS
1309        if (ns)
1310                atomic_inc(&ns->passive);
1311#endif
1312        return ns;
1313}
1314
1315static const void *net_initial_ns(void)
1316{
1317        return &init_net;
1318}
1319
1320static const void *net_netlink_ns(struct sock *sk)
1321{
1322        return sock_net(sk);
1323}
1324
1325struct kobj_ns_type_operations net_ns_type_operations = {
1326        .type = KOBJ_NS_TYPE_NET,
1327        .grab_current_ns = net_grab_current_ns,
1328        .netlink_ns = net_netlink_ns,
1329        .initial_ns = net_initial_ns,
1330        .drop_ns = net_drop_ns,
1331};
1332EXPORT_SYMBOL_GPL(net_ns_type_operations);
1333
1334#ifdef CONFIG_HOTPLUG
1335static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1336{
1337        struct net_device *dev = to_net_dev(d);
1338        int retval;
1339
1340        /* pass interface to uevent. */
1341        retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1342        if (retval)
1343                goto exit;
1344
1345        /* pass ifindex to uevent.
1346         * ifindex is useful as it won't change (interface name may change)
1347         * and is what RtNetlink uses natively. */
1348        retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1349
1350exit:
1351        return retval;
1352}
1353#endif
1354
1355/*
1356 *      netdev_release -- destroy and free a dead device.
1357 *      Called when last reference to device kobject is gone.
1358 */
1359static void netdev_release(struct device *d)
1360{
1361        struct net_device *dev = to_net_dev(d);
1362
1363        BUG_ON(dev->reg_state != NETREG_RELEASED);
1364
1365        kfree(dev->ifalias);
1366        kfree((char *)dev - dev->padded);
1367}
1368
1369static const void *net_namespace(struct device *d)
1370{
1371        struct net_device *dev;
1372        dev = container_of(d, struct net_device, dev);
1373        return dev_net(dev);
1374}
1375
1376static struct class net_class = {
1377        .name = "net",
1378        .dev_release = netdev_release,
1379#ifdef CONFIG_SYSFS
1380        .dev_attrs = net_class_attributes,
1381#endif /* CONFIG_SYSFS */
1382#ifdef CONFIG_HOTPLUG
1383        .dev_uevent = netdev_uevent,
1384#endif
1385        .ns_type = &net_ns_type_operations,
1386        .namespace = net_namespace,
1387};
1388
1389/* Delete sysfs entries but hold kobject reference until after all
1390 * netdev references are gone.
1391 */
1392void netdev_unregister_kobject(struct net_device * net)
1393{
1394        struct device *dev = &(net->dev);
1395
1396        kobject_get(&dev->kobj);
1397
1398        remove_queue_kobjects(net);
1399
1400        device_del(dev);
1401}
1402
1403/* Create sysfs entries for network device. */
1404int netdev_register_kobject(struct net_device *net)
1405{
1406        struct device *dev = &(net->dev);
1407        const struct attribute_group **groups = net->sysfs_groups;
1408        int error = 0;
1409
1410        device_initialize(dev);
1411        dev->class = &net_class;
1412        dev->platform_data = net;
1413        dev->groups = groups;
1414
1415        dev_set_name(dev, "%s", net->name);
1416
1417#ifdef CONFIG_SYSFS
1418        /* Allow for a device specific group */
1419        if (*groups)
1420                groups++;
1421
1422        *groups++ = &netstat_group;
1423
1424#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1425        if (net->ieee80211_ptr)
1426                *groups++ = &wireless_group;
1427#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1428        else if (net->wireless_handlers)
1429                *groups++ = &wireless_group;
1430#endif
1431#endif
1432#endif /* CONFIG_SYSFS */
1433
1434        error = device_add(dev);
1435        if (error)
1436                return error;
1437
1438        error = register_queue_kobjects(net);
1439        if (error) {
1440                device_del(dev);
1441                return error;
1442        }
1443
1444        return error;
1445}
1446
1447int netdev_class_create_file(struct class_attribute *class_attr)
1448{
1449        return class_create_file(&net_class, class_attr);
1450}
1451EXPORT_SYMBOL(netdev_class_create_file);
1452
1453void netdev_class_remove_file(struct class_attribute *class_attr)
1454{
1455        class_remove_file(&net_class, class_attr);
1456}
1457EXPORT_SYMBOL(netdev_class_remove_file);
1458
1459int netdev_kobject_init(void)
1460{
1461        kobj_ns_type_register(&net_ns_type_operations);
1462        return class_register(&net_class);
1463}
1464
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.