linux/include/linux/memory_hotplug.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_MEMORY_HOTPLUG_H
   3#define __LINUX_MEMORY_HOTPLUG_H
   4
   5#include <linux/mmzone.h>
   6#include <linux/spinlock.h>
   7#include <linux/notifier.h>
   8#include <linux/bug.h>
   9
  10struct page;
  11struct zone;
  12struct pglist_data;
  13struct mem_section;
  14struct memory_block;
  15struct resource;
  16struct vmem_altmap;
  17
  18#ifdef CONFIG_MEMORY_HOTPLUG
  19struct page *pfn_to_online_page(unsigned long pfn);
  20
  21/* Types for control the zone type of onlined and offlined memory */
  22enum {
  23        /* Offline the memory. */
  24        MMOP_OFFLINE = 0,
  25        /* Online the memory. Zone depends, see default_zone_for_pfn(). */
  26        MMOP_ONLINE,
  27        /* Online the memory to ZONE_NORMAL. */
  28        MMOP_ONLINE_KERNEL,
  29        /* Online the memory to ZONE_MOVABLE. */
  30        MMOP_ONLINE_MOVABLE,
  31};
  32
  33/* Flags for add_memory() and friends to specify memory hotplug details. */
  34typedef int __bitwise mhp_t;
  35
  36/* No special request */
  37#define MHP_NONE                ((__force mhp_t)0)
  38/*
  39 * Allow merging of the added System RAM resource with adjacent,
  40 * mergeable resources. After a successful call to add_memory_resource()
  41 * with this flag set, the resource pointer must no longer be used as it
  42 * might be stale, or the resource might have changed.
  43 */
  44#define MHP_MERGE_RESOURCE      ((__force mhp_t)BIT(0))
  45
  46/*
  47 * We want memmap (struct page array) to be self contained.
  48 * To do so, we will use the beginning of the hot-added range to build
  49 * the page tables for the memmap array that describes the entire range.
  50 * Only selected architectures support it with SPARSE_VMEMMAP.
  51 */
  52#define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
  53
  54/*
  55 * Extended parameters for memory hotplug:
  56 * altmap: alternative allocator for memmap array (optional)
  57 * pgprot: page protection flags to apply to newly created page tables
  58 *      (required)
  59 */
  60struct mhp_params {
  61        struct vmem_altmap *altmap;
  62        pgprot_t pgprot;
  63};
  64
  65bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
  66struct range mhp_get_pluggable_range(bool need_mapping);
  67
  68/*
  69 * Zone resizing functions
  70 *
  71 * Note: any attempt to resize a zone should has pgdat_resize_lock()
  72 * zone_span_writelock() both held. This ensure the size of a zone
  73 * can't be changed while pgdat_resize_lock() held.
  74 */
  75static inline unsigned zone_span_seqbegin(struct zone *zone)
  76{
  77        return read_seqbegin(&zone->span_seqlock);
  78}
  79static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  80{
  81        return read_seqretry(&zone->span_seqlock, iv);
  82}
  83static inline void zone_span_writelock(struct zone *zone)
  84{
  85        write_seqlock(&zone->span_seqlock);
  86}
  87static inline void zone_span_writeunlock(struct zone *zone)
  88{
  89        write_sequnlock(&zone->span_seqlock);
  90}
  91static inline void zone_seqlock_init(struct zone *zone)
  92{
  93        seqlock_init(&zone->span_seqlock);
  94}
  95extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
  96extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
  97extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
  98extern void adjust_present_page_count(struct zone *zone, long nr_pages);
  99/* VM interface that may be used by firmware interface */
 100extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
 101                                     struct zone *zone);
 102extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
 103extern int online_pages(unsigned long pfn, unsigned long nr_pages,
 104                        struct zone *zone);
 105extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 106                                         unsigned long end_pfn);
 107extern void __offline_isolated_pages(unsigned long start_pfn,
 108                                     unsigned long end_pfn);
 109
 110typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
 111
 112extern void generic_online_page(struct page *page, unsigned int order);
 113extern int set_online_page_callback(online_page_callback_t callback);
 114extern int restore_online_page_callback(online_page_callback_t callback);
 115
 116extern int try_online_node(int nid);
 117
 118extern int arch_add_memory(int nid, u64 start, u64 size,
 119                           struct mhp_params *params);
 120extern u64 max_mem_size;
 121
 122extern int mhp_online_type_from_str(const char *str);
 123
 124/* Default online_type (MMOP_*) when new memory blocks are added. */
 125extern int mhp_default_online_type;
 126/* If movable_node boot option specified */
 127extern bool movable_node_enabled;
 128static inline bool movable_node_is_enabled(void)
 129{
 130        return movable_node_enabled;
 131}
 132
 133extern void arch_remove_memory(int nid, u64 start, u64 size,
 134                               struct vmem_altmap *altmap);
 135extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
 136                           struct vmem_altmap *altmap);
 137
 138/* reasonably generic interface to expand the physical pages */
 139extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 140                       struct mhp_params *params);
 141
 142#ifndef CONFIG_ARCH_HAS_ADD_PAGES
 143static inline int add_pages(int nid, unsigned long start_pfn,
 144                unsigned long nr_pages, struct mhp_params *params)
 145{
 146        return __add_pages(nid, start_pfn, nr_pages, params);
 147}
 148#else /* ARCH_HAS_ADD_PAGES */
 149int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 150              struct mhp_params *params);
 151#endif /* ARCH_HAS_ADD_PAGES */
 152
 153#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
 154/*
 155 * For supporting node-hotadd, we have to allocate a new pgdat.
 156 *
 157 * If an arch has generic style NODE_DATA(),
 158 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
 159 *
 160 * In general, generic_alloc_nodedata() is used.
 161 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
 162 *
 163 */
 164extern pg_data_t *arch_alloc_nodedata(int nid);
 165extern void arch_free_nodedata(pg_data_t *pgdat);
 166extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
 167
 168#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 169
 170#define arch_alloc_nodedata(nid)        generic_alloc_nodedata(nid)
 171#define arch_free_nodedata(pgdat)       generic_free_nodedata(pgdat)
 172
 173#ifdef CONFIG_NUMA
 174/*
 175 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
 176 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
 177 *      Because, pgdat for the new node is not allocated/initialized yet itself.
 178 *      To use new node's memory, more consideration will be necessary.
 179 */
 180#define generic_alloc_nodedata(nid)                             \
 181({                                                              \
 182        kzalloc(sizeof(pg_data_t), GFP_KERNEL);                 \
 183})
 184/*
 185 * This definition is just for error path in node hotadd.
 186 * For node hotremove, we have to replace this.
 187 */
 188#define generic_free_nodedata(pgdat)    kfree(pgdat)
 189
 190extern pg_data_t *node_data[];
 191static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 192{
 193        node_data[nid] = pgdat;
 194}
 195
 196#else /* !CONFIG_NUMA */
 197
 198/* never called */
 199static inline pg_data_t *generic_alloc_nodedata(int nid)
 200{
 201        BUG();
 202        return NULL;
 203}
 204static inline void generic_free_nodedata(pg_data_t *pgdat)
 205{
 206}
 207static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 208{
 209}
 210#endif /* CONFIG_NUMA */
 211#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 212
 213void get_online_mems(void);
 214void put_online_mems(void);
 215
 216void mem_hotplug_begin(void);
 217void mem_hotplug_done(void);
 218
 219#else /* ! CONFIG_MEMORY_HOTPLUG */
 220#define pfn_to_online_page(pfn)                 \
 221({                                              \
 222        struct page *___page = NULL;            \
 223        if (pfn_valid(pfn))                     \
 224                ___page = pfn_to_page(pfn);     \
 225        ___page;                                \
 226 })
 227
 228static inline unsigned zone_span_seqbegin(struct zone *zone)
 229{
 230        return 0;
 231}
 232static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
 233{
 234        return 0;
 235}
 236static inline void zone_span_writelock(struct zone *zone) {}
 237static inline void zone_span_writeunlock(struct zone *zone) {}
 238static inline void zone_seqlock_init(struct zone *zone) {}
 239
 240static inline int try_online_node(int nid)
 241{
 242        return 0;
 243}
 244
 245static inline void get_online_mems(void) {}
 246static inline void put_online_mems(void) {}
 247
 248static inline void mem_hotplug_begin(void) {}
 249static inline void mem_hotplug_done(void) {}
 250
 251static inline bool movable_node_is_enabled(void)
 252{
 253        return false;
 254}
 255#endif /* ! CONFIG_MEMORY_HOTPLUG */
 256
 257/*
 258 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
 259 * platforms might override and use arch_get_mappable_range()
 260 * for internal non memory hotplug purposes.
 261 */
 262struct range arch_get_mappable_range(void);
 263
 264#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
 265/*
 266 * pgdat resizing functions
 267 */
 268static inline
 269void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
 270{
 271        spin_lock_irqsave(&pgdat->node_size_lock, *flags);
 272}
 273static inline
 274void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
 275{
 276        spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
 277}
 278static inline
 279void pgdat_resize_init(struct pglist_data *pgdat)
 280{
 281        spin_lock_init(&pgdat->node_size_lock);
 282}
 283#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 284/*
 285 * Stub functions for when hotplug is off
 286 */
 287static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
 288static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
 289static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
 290#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 291
 292#ifdef CONFIG_MEMORY_HOTREMOVE
 293
 294extern void try_offline_node(int nid);
 295extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 296extern int remove_memory(int nid, u64 start, u64 size);
 297extern void __remove_memory(int nid, u64 start, u64 size);
 298extern int offline_and_remove_memory(int nid, u64 start, u64 size);
 299
 300#else
 301static inline void try_offline_node(int nid) {}
 302
 303static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
 304{
 305        return -EINVAL;
 306}
 307
 308static inline int remove_memory(int nid, u64 start, u64 size)
 309{
 310        return -EBUSY;
 311}
 312
 313static inline void __remove_memory(int nid, u64 start, u64 size) {}
 314#endif /* CONFIG_MEMORY_HOTREMOVE */
 315
 316extern void set_zone_contiguous(struct zone *zone);
 317extern void clear_zone_contiguous(struct zone *zone);
 318
 319#ifdef CONFIG_MEMORY_HOTPLUG
 320extern void __ref free_area_init_core_hotplug(int nid);
 321extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
 322extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
 323extern int add_memory_resource(int nid, struct resource *resource,
 324                               mhp_t mhp_flags);
 325extern int add_memory_driver_managed(int nid, u64 start, u64 size,
 326                                     const char *resource_name,
 327                                     mhp_t mhp_flags);
 328extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 329                                   unsigned long nr_pages,
 330                                   struct vmem_altmap *altmap, int migratetype);
 331extern void remove_pfn_range_from_zone(struct zone *zone,
 332                                       unsigned long start_pfn,
 333                                       unsigned long nr_pages);
 334extern bool is_memblock_offlined(struct memory_block *mem);
 335extern int sparse_add_section(int nid, unsigned long pfn,
 336                unsigned long nr_pages, struct vmem_altmap *altmap);
 337extern void sparse_remove_section(struct mem_section *ms,
 338                unsigned long pfn, unsigned long nr_pages,
 339                unsigned long map_offset, struct vmem_altmap *altmap);
 340extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
 341                                          unsigned long pnum);
 342extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
 343                unsigned long nr_pages);
 344extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
 345                                      struct mhp_params *params);
 346void arch_remove_linear_mapping(u64 start, u64 size);
 347extern bool mhp_supports_memmap_on_memory(unsigned long size);
 348#endif /* CONFIG_MEMORY_HOTPLUG */
 349
 350#endif /* __LINUX_MEMORY_HOTPLUG_H */
 351
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.