linux/drivers/base/cpu.c
<<
>>
Prefs
   1/*
   2 * CPU subsystem support
   3 */
   4
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/sched.h>
   9#include <linux/cpu.h>
  10#include <linux/topology.h>
  11#include <linux/device.h>
  12#include <linux/node.h>
  13#include <linux/gfp.h>
  14#include <linux/slab.h>
  15#include <linux/percpu.h>
  16
  17#include "base.h"
  18
  19struct bus_type cpu_subsys = {
  20        .name = "cpu",
  21        .dev_name = "cpu",
  22};
  23EXPORT_SYMBOL_GPL(cpu_subsys);
  24
  25static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
  26
  27#ifdef CONFIG_HOTPLUG_CPU
  28static void change_cpu_under_node(struct cpu *cpu,
  29                        unsigned int from_nid, unsigned int to_nid)
  30{
  31        int cpuid = cpu->dev.id;
  32        unregister_cpu_under_node(cpuid, from_nid);
  33        register_cpu_under_node(cpuid, to_nid);
  34        cpu->node_id = to_nid;
  35}
  36
  37static ssize_t show_online(struct device *dev,
  38                           struct device_attribute *attr,
  39                           char *buf)
  40{
  41        struct cpu *cpu = container_of(dev, struct cpu, dev);
  42
  43        return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
  44}
  45
  46static ssize_t __ref store_online(struct device *dev,
  47                                  struct device_attribute *attr,
  48                                  const char *buf, size_t count)
  49{
  50        struct cpu *cpu = container_of(dev, struct cpu, dev);
  51        int cpuid = cpu->dev.id;
  52        int from_nid, to_nid;
  53        ssize_t ret;
  54
  55        cpu_hotplug_driver_lock();
  56        switch (buf[0]) {
  57        case '0':
  58                ret = cpu_down(cpuid);
  59                if (!ret)
  60                        kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
  61                break;
  62        case '1':
  63                from_nid = cpu_to_node(cpuid);
  64                ret = cpu_up(cpuid);
  65
  66                /*
  67                 * When hot adding memory to memoryless node and enabling a cpu
  68                 * on the node, node number of the cpu may internally change.
  69                 */
  70                to_nid = cpu_to_node(cpuid);
  71                if (from_nid != to_nid)
  72                        change_cpu_under_node(cpu, from_nid, to_nid);
  73
  74                if (!ret)
  75                        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
  76                break;
  77        default:
  78                ret = -EINVAL;
  79        }
  80        cpu_hotplug_driver_unlock();
  81
  82        if (ret >= 0)
  83                ret = count;
  84        return ret;
  85}
  86static DEVICE_ATTR(online, 0644, show_online, store_online);
  87
  88static void __cpuinit register_cpu_control(struct cpu *cpu)
  89{
  90        device_create_file(&cpu->dev, &dev_attr_online);
  91}
  92void unregister_cpu(struct cpu *cpu)
  93{
  94        int logical_cpu = cpu->dev.id;
  95
  96        unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
  97
  98        device_remove_file(&cpu->dev, &dev_attr_online);
  99
 100        device_unregister(&cpu->dev);
 101        per_cpu(cpu_sys_devices, logical_cpu) = NULL;
 102        return;
 103}
 104
 105#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
 106static ssize_t cpu_probe_store(struct device *dev,
 107                               struct device_attribute *attr,
 108                               const char *buf,
 109                               size_t count)
 110{
 111        return arch_cpu_probe(buf, count);
 112}
 113
 114static ssize_t cpu_release_store(struct device *dev,
 115                                 struct device_attribute *attr,
 116                                 const char *buf,
 117                                 size_t count)
 118{
 119        return arch_cpu_release(buf, count);
 120}
 121
 122static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
 123static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
 124#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
 125
 126#else /* ... !CONFIG_HOTPLUG_CPU */
 127static inline void register_cpu_control(struct cpu *cpu)
 128{
 129}
 130#endif /* CONFIG_HOTPLUG_CPU */
 131
 132#ifdef CONFIG_KEXEC
 133#include <linux/kexec.h>
 134
 135static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
 136                                char *buf)
 137{
 138        struct cpu *cpu = container_of(dev, struct cpu, dev);
 139        ssize_t rc;
 140        unsigned long long addr;
 141        int cpunum;
 142
 143        cpunum = cpu->dev.id;
 144
 145        /*
 146         * Might be reading other cpu's data based on which cpu read thread
 147         * has been scheduled. But cpu data (memory) is allocated once during
 148         * boot up and this data does not change there after. Hence this
 149         * operation should be safe. No locking required.
 150         */
 151        addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
 152        rc = sprintf(buf, "%Lx\n", addr);
 153        return rc;
 154}
 155static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 156
 157static ssize_t show_crash_notes_size(struct device *dev,
 158                                     struct device_attribute *attr,
 159                                     char *buf)
 160{
 161        ssize_t rc;
 162
 163        rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
 164        return rc;
 165}
 166static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
 167#endif
 168
 169/*
 170 * Print cpu online, possible, present, and system maps
 171 */
 172
 173struct cpu_attr {
 174        struct device_attribute attr;
 175        const struct cpumask *const * const map;
 176};
 177
 178static ssize_t show_cpus_attr(struct device *dev,
 179                              struct device_attribute *attr,
 180                              char *buf)
 181{
 182        struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
 183        int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
 184
 185        buf[n++] = '\n';
 186        buf[n] = '\0';
 187        return n;
 188}
 189
 190#define _CPU_ATTR(name, map) \
 191        { __ATTR(name, 0444, show_cpus_attr, NULL), map }
 192
 193/* Keep in sync with cpu_subsys_attrs */
 194static struct cpu_attr cpu_attrs[] = {
 195        _CPU_ATTR(online, &cpu_online_mask),
 196        _CPU_ATTR(possible, &cpu_possible_mask),
 197        _CPU_ATTR(present, &cpu_present_mask),
 198};
 199
 200/*
 201 * Print values for NR_CPUS and offlined cpus
 202 */
 203static ssize_t print_cpus_kernel_max(struct device *dev,
 204                                     struct device_attribute *attr, char *buf)
 205{
 206        int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
 207        return n;
 208}
 209static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
 210
 211/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
 212unsigned int total_cpus;
 213
 214static ssize_t print_cpus_offline(struct device *dev,
 215                                  struct device_attribute *attr, char *buf)
 216{
 217        int n = 0, len = PAGE_SIZE-2;
 218        cpumask_var_t offline;
 219
 220        /* display offline cpus < nr_cpu_ids */
 221        if (!alloc_cpumask_var(&offline, GFP_KERNEL))
 222                return -ENOMEM;
 223        cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
 224        n = cpulist_scnprintf(buf, len, offline);
 225        free_cpumask_var(offline);
 226
 227        /* display offline cpus >= nr_cpu_ids */
 228        if (total_cpus && nr_cpu_ids < total_cpus) {
 229                if (n && n < len)
 230                        buf[n++] = ',';
 231
 232                if (nr_cpu_ids == total_cpus-1)
 233                        n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
 234                else
 235                        n += snprintf(&buf[n], len - n, "%d-%d",
 236                                                      nr_cpu_ids, total_cpus-1);
 237        }
 238
 239        n += snprintf(&buf[n], len - n, "\n");
 240        return n;
 241}
 242static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
 243
 244static void cpu_device_release(struct device *dev)
 245{
 246        /*
 247         * This is an empty function to prevent the driver core from spitting a
 248         * warning at us.  Yes, I know this is directly opposite of what the
 249         * documentation for the driver core and kobjects say, and the author
 250         * of this code has already been publically ridiculed for doing
 251         * something as foolish as this.  However, at this point in time, it is
 252         * the only way to handle the issue of statically allocated cpu
 253         * devices.  The different architectures will have their cpu device
 254         * code reworked to properly handle this in the near future, so this
 255         * function will then be changed to correctly free up the memory held
 256         * by the cpu device.
 257         *
 258         * Never copy this way of doing things, or you too will be made fun of
 259         * on the linux-kernel list, you have been warned.
 260         */
 261}
 262
 263/*
 264 * register_cpu - Setup a sysfs device for a CPU.
 265 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
 266 *        sysfs for this CPU.
 267 * @num - CPU number to use when creating the device.
 268 *
 269 * Initialize and register the CPU device.
 270 */
 271int __cpuinit register_cpu(struct cpu *cpu, int num)
 272{
 273        int error;
 274
 275        cpu->node_id = cpu_to_node(num);
 276        memset(&cpu->dev, 0x00, sizeof(struct device));
 277        cpu->dev.id = num;
 278        cpu->dev.bus = &cpu_subsys;
 279        cpu->dev.release = cpu_device_release;
 280#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
 281        cpu->dev.bus->uevent = arch_cpu_uevent;
 282#endif
 283        error = device_register(&cpu->dev);
 284        if (!error && cpu->hotpluggable)
 285                register_cpu_control(cpu);
 286        if (!error)
 287                per_cpu(cpu_sys_devices, num) = &cpu->dev;
 288        if (!error)
 289                register_cpu_under_node(num, cpu_to_node(num));
 290
 291#ifdef CONFIG_KEXEC
 292        if (!error)
 293                error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
 294        if (!error)
 295                error = device_create_file(&cpu->dev,
 296                                           &dev_attr_crash_notes_size);
 297#endif
 298        return error;
 299}
 300
 301struct device *get_cpu_device(unsigned cpu)
 302{
 303        if (cpu < nr_cpu_ids && cpu_possible(cpu))
 304                return per_cpu(cpu_sys_devices, cpu);
 305        else
 306                return NULL;
 307}
 308EXPORT_SYMBOL_GPL(get_cpu_device);
 309
 310#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
 311static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
 312#endif
 313
 314static struct attribute *cpu_root_attrs[] = {
 315#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
 316        &dev_attr_probe.attr,
 317        &dev_attr_release.attr,
 318#endif
 319        &cpu_attrs[0].attr.attr,
 320        &cpu_attrs[1].attr.attr,
 321        &cpu_attrs[2].attr.attr,
 322        &dev_attr_kernel_max.attr,
 323        &dev_attr_offline.attr,
 324#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
 325        &dev_attr_modalias.attr,
 326#endif
 327        NULL
 328};
 329
 330static struct attribute_group cpu_root_attr_group = {
 331        .attrs = cpu_root_attrs,
 332};
 333
 334static const struct attribute_group *cpu_root_attr_groups[] = {
 335        &cpu_root_attr_group,
 336        NULL,
 337};
 338
 339bool cpu_is_hotpluggable(unsigned cpu)
 340{
 341        struct device *dev = get_cpu_device(cpu);
 342        return dev && container_of(dev, struct cpu, dev)->hotpluggable;
 343}
 344EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
 345
 346#ifdef CONFIG_GENERIC_CPU_DEVICES
 347static DEFINE_PER_CPU(struct cpu, cpu_devices);
 348#endif
 349
 350static void __init cpu_dev_register_generic(void)
 351{
 352#ifdef CONFIG_GENERIC_CPU_DEVICES
 353        int i;
 354
 355        for_each_possible_cpu(i) {
 356                if (register_cpu(&per_cpu(cpu_devices, i), i))
 357                        panic("Failed to register CPU device");
 358        }
 359#endif
 360}
 361
 362void __init cpu_dev_init(void)
 363{
 364        if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
 365                panic("Failed to register CPU subsystem");
 366
 367        cpu_dev_register_generic();
 368}
 369
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.