linux/net/core/net-sysfs.c
<<
>>
Prefs
   1/*
   2 * net-sysfs.c - network device class and attributes
   3 *
   4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/capability.h>
  13#include <linux/kernel.h>
  14#include <linux/netdevice.h>
  15#include <linux/if_arp.h>
  16#include <linux/slab.h>
  17#include <linux/nsproxy.h>
  18#include <net/sock.h>
  19#include <net/net_namespace.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/wireless.h>
  22#include <linux/vmalloc.h>
  23#include <linux/export.h>
  24#include <linux/jiffies.h>
  25#include <net/wext.h>
  26
  27#include "net-sysfs.h"
  28
  29#ifdef CONFIG_SYSFS
  30static const char fmt_hex[] = "%#x\n";
  31static const char fmt_long_hex[] = "%#lx\n";
  32static const char fmt_dec[] = "%d\n";
  33static const char fmt_udec[] = "%u\n";
  34static const char fmt_ulong[] = "%lu\n";
  35static const char fmt_u64[] = "%llu\n";
  36
  37static inline int dev_isalive(const struct net_device *dev)
  38{
  39        return dev->reg_state <= NETREG_REGISTERED;
  40}
  41
  42/* use same locking rules as GIF* ioctl's */
  43static ssize_t netdev_show(const struct device *dev,
  44                           struct device_attribute *attr, char *buf,
  45                           ssize_t (*format)(const struct net_device *, char *))
  46{
  47        struct net_device *net = to_net_dev(dev);
  48        ssize_t ret = -EINVAL;
  49
  50        read_lock(&dev_base_lock);
  51        if (dev_isalive(net))
  52                ret = (*format)(net, buf);
  53        read_unlock(&dev_base_lock);
  54
  55        return ret;
  56}
  57
  58/* generate a show function for simple field */
  59#define NETDEVICE_SHOW(field, format_string)                            \
  60static ssize_t format_##field(const struct net_device *net, char *buf)  \
  61{                                                                       \
  62        return sprintf(buf, format_string, net->field);                 \
  63}                                                                       \
  64static ssize_t show_##field(struct device *dev,                         \
  65                            struct device_attribute *attr, char *buf)   \
  66{                                                                       \
  67        return netdev_show(dev, attr, buf, format_##field);             \
  68}
  69
  70
  71/* use same locking and permission rules as SIF* ioctl's */
  72static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
  73                            const char *buf, size_t len,
  74                            int (*set)(struct net_device *, unsigned long))
  75{
  76        struct net_device *net = to_net_dev(dev);
  77        unsigned long new;
  78        int ret = -EINVAL;
  79
  80        if (!capable(CAP_NET_ADMIN))
  81                return -EPERM;
  82
  83        ret = kstrtoul(buf, 0, &new);
  84        if (ret)
  85                goto err;
  86
  87        if (!rtnl_trylock())
  88                return restart_syscall();
  89
  90        if (dev_isalive(net)) {
  91                if ((ret = (*set)(net, new)) == 0)
  92                        ret = len;
  93        }
  94        rtnl_unlock();
  95 err:
  96        return ret;
  97}
  98
  99NETDEVICE_SHOW(dev_id, fmt_hex);
 100NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 101NETDEVICE_SHOW(addr_len, fmt_dec);
 102NETDEVICE_SHOW(iflink, fmt_dec);
 103NETDEVICE_SHOW(ifindex, fmt_dec);
 104NETDEVICE_SHOW(type, fmt_dec);
 105NETDEVICE_SHOW(link_mode, fmt_dec);
 106
 107/* use same locking rules as GIFHWADDR ioctl's */
 108static ssize_t show_address(struct device *dev, struct device_attribute *attr,
 109                            char *buf)
 110{
 111        struct net_device *net = to_net_dev(dev);
 112        ssize_t ret = -EINVAL;
 113
 114        read_lock(&dev_base_lock);
 115        if (dev_isalive(net))
 116                ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
 117        read_unlock(&dev_base_lock);
 118        return ret;
 119}
 120
 121static ssize_t show_broadcast(struct device *dev,
 122                            struct device_attribute *attr, char *buf)
 123{
 124        struct net_device *net = to_net_dev(dev);
 125        if (dev_isalive(net))
 126                return sysfs_format_mac(buf, net->broadcast, net->addr_len);
 127        return -EINVAL;
 128}
 129
 130static ssize_t show_carrier(struct device *dev,
 131                            struct device_attribute *attr, char *buf)
 132{
 133        struct net_device *netdev = to_net_dev(dev);
 134        if (netif_running(netdev)) {
 135                return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
 136        }
 137        return -EINVAL;
 138}
 139
 140static ssize_t show_speed(struct device *dev,
 141                          struct device_attribute *attr, char *buf)
 142{
 143        struct net_device *netdev = to_net_dev(dev);
 144        int ret = -EINVAL;
 145
 146        if (!rtnl_trylock())
 147                return restart_syscall();
 148
 149        if (netif_running(netdev)) {
 150                struct ethtool_cmd cmd;
 151                if (!__ethtool_get_settings(netdev, &cmd))
 152                        ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
 153        }
 154        rtnl_unlock();
 155        return ret;
 156}
 157
 158static ssize_t show_duplex(struct device *dev,
 159                           struct device_attribute *attr, char *buf)
 160{
 161        struct net_device *netdev = to_net_dev(dev);
 162        int ret = -EINVAL;
 163
 164        if (!rtnl_trylock())
 165                return restart_syscall();
 166
 167        if (netif_running(netdev)) {
 168                struct ethtool_cmd cmd;
 169                if (!__ethtool_get_settings(netdev, &cmd))
 170                        ret = sprintf(buf, "%s\n",
 171                                      cmd.duplex ? "full" : "half");
 172        }
 173        rtnl_unlock();
 174        return ret;
 175}
 176
 177static ssize_t show_dormant(struct device *dev,
 178                            struct device_attribute *attr, char *buf)
 179{
 180        struct net_device *netdev = to_net_dev(dev);
 181
 182        if (netif_running(netdev))
 183                return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
 184
 185        return -EINVAL;
 186}
 187
 188static const char *const operstates[] = {
 189        "unknown",
 190        "notpresent", /* currently unused */
 191        "down",
 192        "lowerlayerdown",
 193        "testing", /* currently unused */
 194        "dormant",
 195        "up"
 196};
 197
 198static ssize_t show_operstate(struct device *dev,
 199                              struct device_attribute *attr, char *buf)
 200{
 201        const struct net_device *netdev = to_net_dev(dev);
 202        unsigned char operstate;
 203
 204        read_lock(&dev_base_lock);
 205        operstate = netdev->operstate;
 206        if (!netif_running(netdev))
 207                operstate = IF_OPER_DOWN;
 208        read_unlock(&dev_base_lock);
 209
 210        if (operstate >= ARRAY_SIZE(operstates))
 211                return -EINVAL; /* should not happen */
 212
 213        return sprintf(buf, "%s\n", operstates[operstate]);
 214}
 215
 216/* read-write attributes */
 217NETDEVICE_SHOW(mtu, fmt_dec);
 218
 219static int change_mtu(struct net_device *net, unsigned long new_mtu)
 220{
 221        return dev_set_mtu(net, (int) new_mtu);
 222}
 223
 224static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
 225                         const char *buf, size_t len)
 226{
 227        return netdev_store(dev, attr, buf, len, change_mtu);
 228}
 229
 230NETDEVICE_SHOW(flags, fmt_hex);
 231
 232static int change_flags(struct net_device *net, unsigned long new_flags)
 233{
 234        return dev_change_flags(net, (unsigned int) new_flags);
 235}
 236
 237static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
 238                           const char *buf, size_t len)
 239{
 240        return netdev_store(dev, attr, buf, len, change_flags);
 241}
 242
 243NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
 244
 245static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
 246{
 247        net->tx_queue_len = new_len;
 248        return 0;
 249}
 250
 251static ssize_t store_tx_queue_len(struct device *dev,
 252                                  struct device_attribute *attr,
 253                                  const char *buf, size_t len)
 254{
 255        return netdev_store(dev, attr, buf, len, change_tx_queue_len);
 256}
 257
 258static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
 259                             const char *buf, size_t len)
 260{
 261        struct net_device *netdev = to_net_dev(dev);
 262        size_t count = len;
 263        ssize_t ret;
 264
 265        if (!capable(CAP_NET_ADMIN))
 266                return -EPERM;
 267
 268        /* ignore trailing newline */
 269        if (len >  0 && buf[len - 1] == '\n')
 270                --count;
 271
 272        if (!rtnl_trylock())
 273                return restart_syscall();
 274        ret = dev_set_alias(netdev, buf, count);
 275        rtnl_unlock();
 276
 277        return ret < 0 ? ret : len;
 278}
 279
 280static ssize_t show_ifalias(struct device *dev,
 281                            struct device_attribute *attr, char *buf)
 282{
 283        const struct net_device *netdev = to_net_dev(dev);
 284        ssize_t ret = 0;
 285
 286        if (!rtnl_trylock())
 287                return restart_syscall();
 288        if (netdev->ifalias)
 289                ret = sprintf(buf, "%s\n", netdev->ifalias);
 290        rtnl_unlock();
 291        return ret;
 292}
 293
 294NETDEVICE_SHOW(group, fmt_dec);
 295
 296static int change_group(struct net_device *net, unsigned long new_group)
 297{
 298        dev_set_group(net, (int) new_group);
 299        return 0;
 300}
 301
 302static ssize_t store_group(struct device *dev, struct device_attribute *attr,
 303                         const char *buf, size_t len)
 304{
 305        return netdev_store(dev, attr, buf, len, change_group);
 306}
 307
 308static struct device_attribute net_class_attributes[] = {
 309        __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
 310        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
 311        __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
 312        __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
 313        __ATTR(iflink, S_IRUGO, show_iflink, NULL),
 314        __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
 315        __ATTR(type, S_IRUGO, show_type, NULL),
 316        __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
 317        __ATTR(address, S_IRUGO, show_address, NULL),
 318        __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
 319        __ATTR(carrier, S_IRUGO, show_carrier, NULL),
 320        __ATTR(speed, S_IRUGO, show_speed, NULL),
 321        __ATTR(duplex, S_IRUGO, show_duplex, NULL),
 322        __ATTR(dormant, S_IRUGO, show_dormant, NULL),
 323        __ATTR(operstate, S_IRUGO, show_operstate, NULL),
 324        __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
 325        __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
 326        __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
 327               store_tx_queue_len),
 328        __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
 329        {}
 330};
 331
 332/* Show a given an attribute in the statistics group */
 333static ssize_t netstat_show(const struct device *d,
 334                            struct device_attribute *attr, char *buf,
 335                            unsigned long offset)
 336{
 337        struct net_device *dev = to_net_dev(d);
 338        ssize_t ret = -EINVAL;
 339
 340        WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
 341                        offset % sizeof(u64) != 0);
 342
 343        read_lock(&dev_base_lock);
 344        if (dev_isalive(dev)) {
 345                struct rtnl_link_stats64 temp;
 346                const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 347
 348                ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
 349        }
 350        read_unlock(&dev_base_lock);
 351        return ret;
 352}
 353
 354/* generate a read-only statistics attribute */
 355#define NETSTAT_ENTRY(name)                                             \
 356static ssize_t show_##name(struct device *d,                            \
 357                           struct device_attribute *attr, char *buf)    \
 358{                                                                       \
 359        return netstat_show(d, attr, buf,                               \
 360                            offsetof(struct rtnl_link_stats64, name));  \
 361}                                                                       \
 362static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 363
 364NETSTAT_ENTRY(rx_packets);
 365NETSTAT_ENTRY(tx_packets);
 366NETSTAT_ENTRY(rx_bytes);
 367NETSTAT_ENTRY(tx_bytes);
 368NETSTAT_ENTRY(rx_errors);
 369NETSTAT_ENTRY(tx_errors);
 370NETSTAT_ENTRY(rx_dropped);
 371NETSTAT_ENTRY(tx_dropped);
 372NETSTAT_ENTRY(multicast);
 373NETSTAT_ENTRY(collisions);
 374NETSTAT_ENTRY(rx_length_errors);
 375NETSTAT_ENTRY(rx_over_errors);
 376NETSTAT_ENTRY(rx_crc_errors);
 377NETSTAT_ENTRY(rx_frame_errors);
 378NETSTAT_ENTRY(rx_fifo_errors);
 379NETSTAT_ENTRY(rx_missed_errors);
 380NETSTAT_ENTRY(tx_aborted_errors);
 381NETSTAT_ENTRY(tx_carrier_errors);
 382NETSTAT_ENTRY(tx_fifo_errors);
 383NETSTAT_ENTRY(tx_heartbeat_errors);
 384NETSTAT_ENTRY(tx_window_errors);
 385NETSTAT_ENTRY(rx_compressed);
 386NETSTAT_ENTRY(tx_compressed);
 387
 388static struct attribute *netstat_attrs[] = {
 389        &dev_attr_rx_packets.attr,
 390        &dev_attr_tx_packets.attr,
 391        &dev_attr_rx_bytes.attr,
 392        &dev_attr_tx_bytes.attr,
 393        &dev_attr_rx_errors.attr,
 394        &dev_attr_tx_errors.attr,
 395        &dev_attr_rx_dropped.attr,
 396        &dev_attr_tx_dropped.attr,
 397        &dev_attr_multicast.attr,
 398        &dev_attr_collisions.attr,
 399        &dev_attr_rx_length_errors.attr,
 400        &dev_attr_rx_over_errors.attr,
 401        &dev_attr_rx_crc_errors.attr,
 402        &dev_attr_rx_frame_errors.attr,
 403        &dev_attr_rx_fifo_errors.attr,
 404        &dev_attr_rx_missed_errors.attr,
 405        &dev_attr_tx_aborted_errors.attr,
 406        &dev_attr_tx_carrier_errors.attr,
 407        &dev_attr_tx_fifo_errors.attr,
 408        &dev_attr_tx_heartbeat_errors.attr,
 409        &dev_attr_tx_window_errors.attr,
 410        &dev_attr_rx_compressed.attr,
 411        &dev_attr_tx_compressed.attr,
 412        NULL
 413};
 414
 415
 416static struct attribute_group netstat_group = {
 417        .name  = "statistics",
 418        .attrs  = netstat_attrs,
 419};
 420
 421#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
 422static struct attribute *wireless_attrs[] = {
 423        NULL
 424};
 425
 426static struct attribute_group wireless_group = {
 427        .name = "wireless",
 428        .attrs = wireless_attrs,
 429};
 430#endif
 431#endif /* CONFIG_SYSFS */
 432
 433#ifdef CONFIG_RPS
 434/*
 435 * RX queue sysfs structures and functions.
 436 */
 437struct rx_queue_attribute {
 438        struct attribute attr;
 439        ssize_t (*show)(struct netdev_rx_queue *queue,
 440            struct rx_queue_attribute *attr, char *buf);
 441        ssize_t (*store)(struct netdev_rx_queue *queue,
 442            struct rx_queue_attribute *attr, const char *buf, size_t len);
 443};
 444#define to_rx_queue_attr(_attr) container_of(_attr,             \
 445    struct rx_queue_attribute, attr)
 446
 447#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
 448
 449static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
 450                                  char *buf)
 451{
 452        struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
 453        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 454
 455        if (!attribute->show)
 456                return -EIO;
 457
 458        return attribute->show(queue, attribute, buf);
 459}
 460
 461static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
 462                                   const char *buf, size_t count)
 463{
 464        struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
 465        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 466
 467        if (!attribute->store)
 468                return -EIO;
 469
 470        return attribute->store(queue, attribute, buf, count);
 471}
 472
 473static const struct sysfs_ops rx_queue_sysfs_ops = {
 474        .show = rx_queue_attr_show,
 475        .store = rx_queue_attr_store,
 476};
 477
 478static ssize_t show_rps_map(struct netdev_rx_queue *queue,
 479                            struct rx_queue_attribute *attribute, char *buf)
 480{
 481        struct rps_map *map;
 482        cpumask_var_t mask;
 483        size_t len = 0;
 484        int i;
 485
 486        if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 487                return -ENOMEM;
 488
 489        rcu_read_lock();
 490        map = rcu_dereference(queue->rps_map);
 491        if (map)
 492                for (i = 0; i < map->len; i++)
 493                        cpumask_set_cpu(map->cpus[i], mask);
 494
 495        len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
 496        if (PAGE_SIZE - len < 3) {
 497                rcu_read_unlock();
 498                free_cpumask_var(mask);
 499                return -EINVAL;
 500        }
 501        rcu_read_unlock();
 502
 503        free_cpumask_var(mask);
 504        len += sprintf(buf + len, "\n");
 505        return len;
 506}
 507
 508static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 509                      struct rx_queue_attribute *attribute,
 510                      const char *buf, size_t len)
 511{
 512        struct rps_map *old_map, *map;
 513        cpumask_var_t mask;
 514        int err, cpu, i;
 515        static DEFINE_SPINLOCK(rps_map_lock);
 516
 517        if (!capable(CAP_NET_ADMIN))
 518                return -EPERM;
 519
 520        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 521                return -ENOMEM;
 522
 523        err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
 524        if (err) {
 525                free_cpumask_var(mask);
 526                return err;
 527        }
 528
 529        map = kzalloc(max_t(unsigned int,
 530            RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
 531            GFP_KERNEL);
 532        if (!map) {
 533                free_cpumask_var(mask);
 534                return -ENOMEM;
 535        }
 536
 537        i = 0;
 538        for_each_cpu_and(cpu, mask, cpu_online_mask)
 539                map->cpus[i++] = cpu;
 540
 541        if (i)
 542                map->len = i;
 543        else {
 544                kfree(map);
 545                map = NULL;
 546        }
 547
 548        spin_lock(&rps_map_lock);
 549        old_map = rcu_dereference_protected(queue->rps_map,
 550                                            lockdep_is_held(&rps_map_lock));
 551        rcu_assign_pointer(queue->rps_map, map);
 552        spin_unlock(&rps_map_lock);
 553
 554        if (map)
 555                static_key_slow_inc(&rps_needed);
 556        if (old_map) {
 557                kfree_rcu(old_map, rcu);
 558                static_key_slow_dec(&rps_needed);
 559        }
 560        free_cpumask_var(mask);
 561        return len;
 562}
 563
 564static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
 565                                           struct rx_queue_attribute *attr,
 566                                           char *buf)
 567{
 568        struct rps_dev_flow_table *flow_table;
 569        unsigned long val = 0;
 570
 571        rcu_read_lock();
 572        flow_table = rcu_dereference(queue->rps_flow_table);
 573        if (flow_table)
 574                val = (unsigned long)flow_table->mask + 1;
 575        rcu_read_unlock();
 576
 577        return sprintf(buf, "%lu\n", val);
 578}
 579
 580static void rps_dev_flow_table_release_work(struct work_struct *work)
 581{
 582        struct rps_dev_flow_table *table = container_of(work,
 583            struct rps_dev_flow_table, free_work);
 584
 585        vfree(table);
 586}
 587
 588static void rps_dev_flow_table_release(struct rcu_head *rcu)
 589{
 590        struct rps_dev_flow_table *table = container_of(rcu,
 591            struct rps_dev_flow_table, rcu);
 592
 593        INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
 594        schedule_work(&table->free_work);
 595}
 596
 597static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
 598                                     struct rx_queue_attribute *attr,
 599                                     const char *buf, size_t len)
 600{
 601        unsigned long mask, count;
 602        struct rps_dev_flow_table *table, *old_table;
 603        static DEFINE_SPINLOCK(rps_dev_flow_lock);
 604        int rc;
 605
 606        if (!capable(CAP_NET_ADMIN))
 607                return -EPERM;
 608
 609        rc = kstrtoul(buf, 0, &count);
 610        if (rc < 0)
 611                return rc;
 612
 613        if (count) {
 614                mask = count - 1;
 615                /* mask = roundup_pow_of_two(count) - 1;
 616                 * without overflows...
 617                 */
 618                while ((mask | (mask >> 1)) != mask)
 619                        mask |= (mask >> 1);
 620                /* On 64 bit arches, must check mask fits in table->mask (u32),
 621                 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
 622                 * doesnt overflow.
 623                 */
 624#if BITS_PER_LONG > 32
 625                if (mask > (unsigned long)(u32)mask)
 626                        return -EINVAL;
 627#else
 628                if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
 629                                / sizeof(struct rps_dev_flow)) {
 630                        /* Enforce a limit to prevent overflow */
 631                        return -EINVAL;
 632                }
 633#endif
 634                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
 635                if (!table)
 636                        return -ENOMEM;
 637
 638                table->mask = mask;
 639                for (count = 0; count <= mask; count++)
 640                        table->flows[count].cpu = RPS_NO_CPU;
 641        } else
 642                table = NULL;
 643
 644        spin_lock(&rps_dev_flow_lock);
 645        old_table = rcu_dereference_protected(queue->rps_flow_table,
 646                                              lockdep_is_held(&rps_dev_flow_lock));
 647        rcu_assign_pointer(queue->rps_flow_table, table);
 648        spin_unlock(&rps_dev_flow_lock);
 649
 650        if (old_table)
 651                call_rcu(&old_table->rcu, rps_dev_flow_table_release);
 652
 653        return len;
 654}
 655
 656static struct rx_queue_attribute rps_cpus_attribute =
 657        __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
 658
 659
 660static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
 661        __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
 662            show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
 663
 664static struct attribute *rx_queue_default_attrs[] = {
 665        &rps_cpus_attribute.attr,
 666        &rps_dev_flow_table_cnt_attribute.attr,
 667        NULL
 668};
 669
 670static void rx_queue_release(struct kobject *kobj)
 671{
 672        struct netdev_rx_queue *queue = to_rx_queue(kobj);
 673        struct rps_map *map;
 674        struct rps_dev_flow_table *flow_table;
 675
 676
 677        map = rcu_dereference_protected(queue->rps_map, 1);
 678        if (map) {
 679                RCU_INIT_POINTER(queue->rps_map, NULL);
 680                kfree_rcu(map, rcu);
 681        }
 682
 683        flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
 684        if (flow_table) {
 685                RCU_INIT_POINTER(queue->rps_flow_table, NULL);
 686                call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
 687        }
 688
 689        memset(kobj, 0, sizeof(*kobj));
 690        dev_put(queue->dev);
 691}
 692
 693static struct kobj_type rx_queue_ktype = {
 694        .sysfs_ops = &rx_queue_sysfs_ops,
 695        .release = rx_queue_release,
 696        .default_attrs = rx_queue_default_attrs,
 697};
 698
 699static int rx_queue_add_kobject(struct net_device *net, int index)
 700{
 701        struct netdev_rx_queue *queue = net->_rx + index;
 702        struct kobject *kobj = &queue->kobj;
 703        int error = 0;
 704
 705        kobj->kset = net->queues_kset;
 706        error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
 707            "rx-%u", index);
 708        if (error) {
 709                kobject_put(kobj);
 710                return error;
 711        }
 712
 713        kobject_uevent(kobj, KOBJ_ADD);
 714        dev_hold(queue->dev);
 715
 716        return error;
 717}
 718#endif /* CONFIG_RPS */
 719
 720int
 721net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 722{
 723#ifdef CONFIG_RPS
 724        int i;
 725        int error = 0;
 726
 727        for (i = old_num; i < new_num; i++) {
 728                error = rx_queue_add_kobject(net, i);
 729                if (error) {
 730                        new_num = old_num;
 731                        break;
 732                }
 733        }
 734
 735        while (--i >= new_num)
 736                kobject_put(&net->_rx[i].kobj);
 737
 738        return error;
 739#else
 740        return 0;
 741#endif
 742}
 743
 744#ifdef CONFIG_SYSFS
 745/*
 746 * netdev_queue sysfs structures and functions.
 747 */
 748struct netdev_queue_attribute {
 749        struct attribute attr;
 750        ssize_t (*show)(struct netdev_queue *queue,
 751            struct netdev_queue_attribute *attr, char *buf);
 752        ssize_t (*store)(struct netdev_queue *queue,
 753            struct netdev_queue_attribute *attr, const char *buf, size_t len);
 754};
 755#define to_netdev_queue_attr(_attr) container_of(_attr,         \
 756    struct netdev_queue_attribute, attr)
 757
 758#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
 759
 760static ssize_t netdev_queue_attr_show(struct kobject *kobj,
 761                                      struct attribute *attr, char *buf)
 762{
 763        struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
 764        struct netdev_queue *queue = to_netdev_queue(kobj);
 765
 766        if (!attribute->show)
 767                return -EIO;
 768
 769        return attribute->show(queue, attribute, buf);
 770}
 771
 772static ssize_t netdev_queue_attr_store(struct kobject *kobj,
 773                                       struct attribute *attr,
 774                                       const char *buf, size_t count)
 775{
 776        struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
 777        struct netdev_queue *queue = to_netdev_queue(kobj);
 778
 779        if (!attribute->store)
 780                return -EIO;
 781
 782        return attribute->store(queue, attribute, buf, count);
 783}
 784
 785static const struct sysfs_ops netdev_queue_sysfs_ops = {
 786        .show = netdev_queue_attr_show,
 787        .store = netdev_queue_attr_store,
 788};
 789
 790static ssize_t show_trans_timeout(struct netdev_queue *queue,
 791                                  struct netdev_queue_attribute *attribute,
 792                                  char *buf)
 793{
 794        unsigned long trans_timeout;
 795
 796        spin_lock_irq(&queue->_xmit_lock);
 797        trans_timeout = queue->trans_timeout;
 798        spin_unlock_irq(&queue->_xmit_lock);
 799
 800        return sprintf(buf, "%lu", trans_timeout);
 801}
 802
 803static struct netdev_queue_attribute queue_trans_timeout =
 804        __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
 805
 806#ifdef CONFIG_BQL
 807/*
 808 * Byte queue limits sysfs structures and functions.
 809 */
 810static ssize_t bql_show(char *buf, unsigned int value)
 811{
 812        return sprintf(buf, "%u\n", value);
 813}
 814
 815static ssize_t bql_set(const char *buf, const size_t count,
 816                       unsigned int *pvalue)
 817{
 818        unsigned int value;
 819        int err;
 820
 821        if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
 822                value = DQL_MAX_LIMIT;
 823        else {
 824                err = kstrtouint(buf, 10, &value);
 825                if (err < 0)
 826                        return err;
 827                if (value > DQL_MAX_LIMIT)
 828                        return -EINVAL;
 829        }
 830
 831        *pvalue = value;
 832
 833        return count;
 834}
 835
 836static ssize_t bql_show_hold_time(struct netdev_queue *queue,
 837                                  struct netdev_queue_attribute *attr,
 838                                  char *buf)
 839{
 840        struct dql *dql = &queue->dql;
 841
 842        return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
 843}
 844
 845static ssize_t bql_set_hold_time(struct netdev_queue *queue,
 846                                 struct netdev_queue_attribute *attribute,
 847                                 const char *buf, size_t len)
 848{
 849        struct dql *dql = &queue->dql;
 850        unsigned int value;
 851        int err;
 852
 853        err = kstrtouint(buf, 10, &value);
 854        if (err < 0)
 855                return err;
 856
 857        dql->slack_hold_time = msecs_to_jiffies(value);
 858
 859        return len;
 860}
 861
 862static struct netdev_queue_attribute bql_hold_time_attribute =
 863        __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
 864            bql_set_hold_time);
 865
 866static ssize_t bql_show_inflight(struct netdev_queue *queue,
 867                                 struct netdev_queue_attribute *attr,
 868                                 char *buf)
 869{
 870        struct dql *dql = &queue->dql;
 871
 872        return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
 873}
 874
 875static struct netdev_queue_attribute bql_inflight_attribute =
 876        __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
 877
 878#define BQL_ATTR(NAME, FIELD)                                           \
 879static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
 880                                 struct netdev_queue_attribute *attr,   \
 881                                 char *buf)                             \
 882{                                                                       \
 883        return bql_show(buf, queue->dql.FIELD);                         \
 884}                                                                       \
 885                                                                        \
 886static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
 887                                struct netdev_queue_attribute *attr,    \
 888                                const char *buf, size_t len)            \
 889{                                                                       \
 890        return bql_set(buf, len, &queue->dql.FIELD);                    \
 891}                                                                       \
 892                                                                        \
 893static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
 894        __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
 895            bql_set_ ## NAME);
 896
 897BQL_ATTR(limit, limit)
 898BQL_ATTR(limit_max, max_limit)
 899BQL_ATTR(limit_min, min_limit)
 900
 901static struct attribute *dql_attrs[] = {
 902        &bql_limit_attribute.attr,
 903        &bql_limit_max_attribute.attr,
 904        &bql_limit_min_attribute.attr,
 905        &bql_hold_time_attribute.attr,
 906        &bql_inflight_attribute.attr,
 907        NULL
 908};
 909
 910static struct attribute_group dql_group = {
 911        .name  = "byte_queue_limits",
 912        .attrs  = dql_attrs,
 913};
 914#endif /* CONFIG_BQL */
 915
 916#ifdef CONFIG_XPS
 917static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 918{
 919        struct net_device *dev = queue->dev;
 920        int i;
 921
 922        for (i = 0; i < dev->num_tx_queues; i++)
 923                if (queue == &dev->_tx[i])
 924                        break;
 925
 926        BUG_ON(i >= dev->num_tx_queues);
 927
 928        return i;
 929}
 930
 931
 932static ssize_t show_xps_map(struct netdev_queue *queue,
 933                            struct netdev_queue_attribute *attribute, char *buf)
 934{
 935        struct net_device *dev = queue->dev;
 936        struct xps_dev_maps *dev_maps;
 937        cpumask_var_t mask;
 938        unsigned long index;
 939        size_t len = 0;
 940        int i;
 941
 942        if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 943                return -ENOMEM;
 944
 945        index = get_netdev_queue_index(queue);
 946
 947        rcu_read_lock();
 948        dev_maps = rcu_dereference(dev->xps_maps);
 949        if (dev_maps) {
 950                for_each_possible_cpu(i) {
 951                        struct xps_map *map =
 952                            rcu_dereference(dev_maps->cpu_map[i]);
 953                        if (map) {
 954                                int j;
 955                                for (j = 0; j < map->len; j++) {
 956                                        if (map->queues[j] == index) {
 957                                                cpumask_set_cpu(i, mask);
 958                                                break;
 959                                        }
 960                                }
 961                        }
 962                }
 963        }
 964        rcu_read_unlock();
 965
 966        len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
 967        if (PAGE_SIZE - len < 3) {
 968                free_cpumask_var(mask);
 969                return -EINVAL;
 970        }
 971
 972        free_cpumask_var(mask);
 973        len += sprintf(buf + len, "\n");
 974        return len;
 975}
 976
 977static DEFINE_MUTEX(xps_map_mutex);
 978#define xmap_dereference(P)             \
 979        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 980
 981static void xps_queue_release(struct netdev_queue *queue)
 982{
 983        struct net_device *dev = queue->dev;
 984        struct xps_dev_maps *dev_maps;
 985        struct xps_map *map;
 986        unsigned long index;
 987        int i, pos, nonempty = 0;
 988
 989        index = get_netdev_queue_index(queue);
 990
 991        mutex_lock(&xps_map_mutex);
 992        dev_maps = xmap_dereference(dev->xps_maps);
 993
 994        if (dev_maps) {
 995                for_each_possible_cpu(i) {
 996                        map = xmap_dereference(dev_maps->cpu_map[i]);
 997                        if (!map)
 998                                continue;
 999
1000                        for (pos = 0; pos < map->len; pos++)
1001                                if (map->queues[pos] == index)
1002                                        break;
1003
1004                        if (pos < map->len) {
1005                                if (map->len > 1)
1006                                        map->queues[pos] =
1007                                            map->queues[--map->len];
1008                                else {
1009                                        RCU_INIT_POINTER(dev_maps->cpu_map[i],
1010                                            NULL);
1011                                        kfree_rcu(map, rcu);
1012                                        map = NULL;
1013                                }
1014                        }
1015                        if (map)
1016                                nonempty = 1;
1017                }
1018
1019                if (!nonempty) {
1020                        RCU_INIT_POINTER(dev->xps_maps, NULL);
1021                        kfree_rcu(dev_maps, rcu);
1022                }
1023        }
1024        mutex_unlock(&xps_map_mutex);
1025}
1026
1027static ssize_t store_xps_map(struct netdev_queue *queue,
1028                      struct netdev_queue_attribute *attribute,
1029                      const char *buf, size_t len)
1030{
1031        struct net_device *dev = queue->dev;
1032        cpumask_var_t mask;
1033        int err, i, cpu, pos, map_len, alloc_len, need_set;
1034        unsigned long index;
1035        struct xps_map *map, *new_map;
1036        struct xps_dev_maps *dev_maps, *new_dev_maps;
1037        int nonempty = 0;
1038        int numa_node_id = -2;
1039
1040        if (!capable(CAP_NET_ADMIN))
1041                return -EPERM;
1042
1043        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1044                return -ENOMEM;
1045
1046        index = get_netdev_queue_index(queue);
1047
1048        err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1049        if (err) {
1050                free_cpumask_var(mask);
1051                return err;
1052        }
1053
1054        new_dev_maps = kzalloc(max_t(unsigned int,
1055            XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1056        if (!new_dev_maps) {
1057                free_cpumask_var(mask);
1058                return -ENOMEM;
1059        }
1060
1061        mutex_lock(&xps_map_mutex);
1062
1063        dev_maps = xmap_dereference(dev->xps_maps);
1064
1065        for_each_possible_cpu(cpu) {
1066                map = dev_maps ?
1067                        xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1068                new_map = map;
1069                if (map) {
1070                        for (pos = 0; pos < map->len; pos++)
1071                                if (map->queues[pos] == index)
1072                                        break;
1073                        map_len = map->len;
1074                        alloc_len = map->alloc_len;
1075                } else
1076                        pos = map_len = alloc_len = 0;
1077
1078                need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1079#ifdef CONFIG_NUMA
1080                if (need_set) {
1081                        if (numa_node_id == -2)
1082                                numa_node_id = cpu_to_node(cpu);
1083                        else if (numa_node_id != cpu_to_node(cpu))
1084                                numa_node_id = -1;
1085                }
1086#endif
1087                if (need_set && pos >= map_len) {
1088                        /* Need to add queue to this CPU's map */
1089                        if (map_len >= alloc_len) {
1090                                alloc_len = alloc_len ?
1091                                    2 * alloc_len : XPS_MIN_MAP_ALLOC;
1092                                new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1093                                                       GFP_KERNEL,
1094                                                       cpu_to_node(cpu));
1095                                if (!new_map)
1096                                        goto error;
1097                                new_map->alloc_len = alloc_len;
1098                                for (i = 0; i < map_len; i++)
1099                                        new_map->queues[i] = map->queues[i];
1100                                new_map->len = map_len;
1101                        }
1102                        new_map->queues[new_map->len++] = index;
1103                } else if (!need_set && pos < map_len) {
1104                        /* Need to remove queue from this CPU's map */
1105                        if (map_len > 1)
1106                                new_map->queues[pos] =
1107                                    new_map->queues[--new_map->len];
1108                        else
1109                                new_map = NULL;
1110                }
1111                RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1112        }
1113
1114        /* Cleanup old maps */
1115        for_each_possible_cpu(cpu) {
1116                map = dev_maps ?
1117                        xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1118                if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1119                        kfree_rcu(map, rcu);
1120                if (new_dev_maps->cpu_map[cpu])
1121                        nonempty = 1;
1122        }
1123
1124        if (nonempty) {
1125                rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1126        } else {
1127                kfree(new_dev_maps);
1128                RCU_INIT_POINTER(dev->xps_maps, NULL);
1129        }
1130
1131        if (dev_maps)
1132                kfree_rcu(dev_maps, rcu);
1133
1134        netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1135                                            NUMA_NO_NODE);
1136
1137        mutex_unlock(&xps_map_mutex);
1138
1139        free_cpumask_var(mask);
1140        return len;
1141
1142error:
1143        mutex_unlock(&xps_map_mutex);
1144
1145        if (new_dev_maps)
1146                for_each_possible_cpu(i)
1147                        kfree(rcu_dereference_protected(
1148                                new_dev_maps->cpu_map[i],
1149                                1));
1150        kfree(new_dev_maps);
1151        free_cpumask_var(mask);
1152        return -ENOMEM;
1153}
1154
1155static struct netdev_queue_attribute xps_cpus_attribute =
1156    __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1157#endif /* CONFIG_XPS */
1158
1159static struct attribute *netdev_queue_default_attrs[] = {
1160        &queue_trans_timeout.attr,
1161#ifdef CONFIG_XPS
1162        &xps_cpus_attribute.attr,
1163#endif
1164        NULL
1165};
1166
1167static void netdev_queue_release(struct kobject *kobj)
1168{
1169        struct netdev_queue *queue = to_netdev_queue(kobj);
1170
1171#ifdef CONFIG_XPS
1172        xps_queue_release(queue);
1173#endif
1174
1175        memset(kobj, 0, sizeof(*kobj));
1176        dev_put(queue->dev);
1177}
1178
1179static struct kobj_type netdev_queue_ktype = {
1180        .sysfs_ops = &netdev_queue_sysfs_ops,
1181        .release = netdev_queue_release,
1182        .default_attrs = netdev_queue_default_attrs,
1183};
1184
1185static int netdev_queue_add_kobject(struct net_device *net, int index)
1186{
1187        struct netdev_queue *queue = net->_tx + index;
1188        struct kobject *kobj = &queue->kobj;
1189        int error = 0;
1190
1191        kobj->kset = net->queues_kset;
1192        error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1193            "tx-%u", index);
1194        if (error)
1195                goto exit;
1196
1197#ifdef CONFIG_BQL
1198        error = sysfs_create_group(kobj, &dql_group);
1199        if (error)
1200                goto exit;
1201#endif
1202
1203        kobject_uevent(kobj, KOBJ_ADD);
1204        dev_hold(queue->dev);
1205
1206        return 0;
1207exit:
1208        kobject_put(kobj);
1209        return error;
1210}
1211#endif /* CONFIG_SYSFS */
1212
1213int
1214netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1215{
1216#ifdef CONFIG_SYSFS
1217        int i;
1218        int error = 0;
1219
1220        for (i = old_num; i < new_num; i++) {
1221                error = netdev_queue_add_kobject(net, i);
1222                if (error) {
1223                        new_num = old_num;
1224                        break;
1225                }
1226        }
1227
1228        while (--i >= new_num) {
1229                struct netdev_queue *queue = net->_tx + i;
1230
1231#ifdef CONFIG_BQL
1232                sysfs_remove_group(&queue->kobj, &dql_group);
1233#endif
1234                kobject_put(&queue->kobj);
1235        }
1236
1237        return error;
1238#else
1239        return 0;
1240#endif /* CONFIG_SYSFS */
1241}
1242
1243static int register_queue_kobjects(struct net_device *net)
1244{
1245        int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1246
1247#ifdef CONFIG_SYSFS
1248        net->queues_kset = kset_create_and_add("queues",
1249            NULL, &net->dev.kobj);
1250        if (!net->queues_kset)
1251                return -ENOMEM;
1252#endif
1253
1254#ifdef CONFIG_RPS
1255        real_rx = net->real_num_rx_queues;
1256#endif
1257        real_tx = net->real_num_tx_queues;
1258
1259        error = net_rx_queue_update_kobjects(net, 0, real_rx);
1260        if (error)
1261                goto error;
1262        rxq = real_rx;
1263
1264        error = netdev_queue_update_kobjects(net, 0, real_tx);
1265        if (error)
1266                goto error;
1267        txq = real_tx;
1268
1269        return 0;
1270
1271error:
1272        netdev_queue_update_kobjects(net, txq, 0);
1273        net_rx_queue_update_kobjects(net, rxq, 0);
1274        return error;
1275}
1276
1277static void remove_queue_kobjects(struct net_device *net)
1278{
1279        int real_rx = 0, real_tx = 0;
1280
1281#ifdef CONFIG_RPS
1282        real_rx = net->real_num_rx_queues;
1283#endif
1284        real_tx = net->real_num_tx_queues;
1285
1286        net_rx_queue_update_kobjects(net, real_rx, 0);
1287        netdev_queue_update_kobjects(net, real_tx, 0);
1288#ifdef CONFIG_SYSFS
1289        kset_unregister(net->queues_kset);
1290#endif
1291}
1292
1293static void *net_grab_current_ns(void)
1294{
1295        struct net *ns = current->nsproxy->net_ns;
1296#ifdef CONFIG_NET_NS
1297        if (ns)
1298                atomic_inc(&ns->passive);
1299#endif
1300        return ns;
1301}
1302
1303static const void *net_initial_ns(void)
1304{
1305        return &init_net;
1306}
1307
1308static const void *net_netlink_ns(struct sock *sk)
1309{
1310        return sock_net(sk);
1311}
1312
1313struct kobj_ns_type_operations net_ns_type_operations = {
1314        .type = KOBJ_NS_TYPE_NET,
1315        .grab_current_ns = net_grab_current_ns,
1316        .netlink_ns = net_netlink_ns,
1317        .initial_ns = net_initial_ns,
1318        .drop_ns = net_drop_ns,
1319};
1320EXPORT_SYMBOL_GPL(net_ns_type_operations);
1321
1322#ifdef CONFIG_HOTPLUG
1323static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1324{
1325        struct net_device *dev = to_net_dev(d);
1326        int retval;
1327
1328        /* pass interface to uevent. */
1329        retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1330        if (retval)
1331                goto exit;
1332
1333        /* pass ifindex to uevent.
1334         * ifindex is useful as it won't change (interface name may change)
1335         * and is what RtNetlink uses natively. */
1336        retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1337
1338exit:
1339        return retval;
1340}
1341#endif
1342
1343/*
1344 *      netdev_release -- destroy and free a dead device.
1345 *      Called when last reference to device kobject is gone.
1346 */
1347static void netdev_release(struct device *d)
1348{
1349        struct net_device *dev = to_net_dev(d);
1350
1351        BUG_ON(dev->reg_state != NETREG_RELEASED);
1352
1353        kfree(dev->ifalias);
1354        kfree((char *)dev - dev->padded);
1355}
1356
1357static const void *net_namespace(struct device *d)
1358{
1359        struct net_device *dev;
1360        dev = container_of(d, struct net_device, dev);
1361        return dev_net(dev);
1362}
1363
1364static struct class net_class = {
1365        .name = "net",
1366        .dev_release = netdev_release,
1367#ifdef CONFIG_SYSFS
1368        .dev_attrs = net_class_attributes,
1369#endif /* CONFIG_SYSFS */
1370#ifdef CONFIG_HOTPLUG
1371        .dev_uevent = netdev_uevent,
1372#endif
1373        .ns_type = &net_ns_type_operations,
1374        .namespace = net_namespace,
1375};
1376
1377/* Delete sysfs entries but hold kobject reference until after all
1378 * netdev references are gone.
1379 */
1380void netdev_unregister_kobject(struct net_device * net)
1381{
1382        struct device *dev = &(net->dev);
1383
1384        kobject_get(&dev->kobj);
1385
1386        remove_queue_kobjects(net);
1387
1388        device_del(dev);
1389}
1390
1391/* Create sysfs entries for network device. */
1392int netdev_register_kobject(struct net_device *net)
1393{
1394        struct device *dev = &(net->dev);
1395        const struct attribute_group **groups = net->sysfs_groups;
1396        int error = 0;
1397
1398        device_initialize(dev);
1399        dev->class = &net_class;
1400        dev->platform_data = net;
1401        dev->groups = groups;
1402
1403        dev_set_name(dev, "%s", net->name);
1404
1405#ifdef CONFIG_SYSFS
1406        /* Allow for a device specific group */
1407        if (*groups)
1408                groups++;
1409
1410        *groups++ = &netstat_group;
1411
1412#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1413        if (net->ieee80211_ptr)
1414                *groups++ = &wireless_group;
1415#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1416        else if (net->wireless_handlers)
1417                *groups++ = &wireless_group;
1418#endif
1419#endif
1420#endif /* CONFIG_SYSFS */
1421
1422        error = device_add(dev);
1423        if (error)
1424                return error;
1425
1426        error = register_queue_kobjects(net);
1427        if (error) {
1428                device_del(dev);
1429                return error;
1430        }
1431
1432        return error;
1433}
1434
1435int netdev_class_create_file(struct class_attribute *class_attr)
1436{
1437        return class_create_file(&net_class, class_attr);
1438}
1439EXPORT_SYMBOL(netdev_class_create_file);
1440
1441void netdev_class_remove_file(struct class_attribute *class_attr)
1442{
1443        class_remove_file(&net_class, class_attr);
1444}
1445EXPORT_SYMBOL(netdev_class_remove_file);
1446
1447int netdev_kobject_init(void)
1448{
1449        kobj_ns_type_register(&net_ns_type_operations);
1450        return class_register(&net_class);
1451}
1452
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.