linux/mm/highmem.c
<<
>>
Prefs
   1/*
   2 * High memory handling common code and variables.
   3 *
   4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
   5 *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
   6 *
   7 *
   8 * Redesigned the x86 32-bit VM architecture to deal with
   9 * 64-bit physical space. With current x86 CPUs this
  10 * means up to 64 Gigabytes physical RAM.
  11 *
  12 * Rewrote high memory support to move the page cache into
  13 * high memory. Implemented permanent (schedulable) kmaps
  14 * based on Linus' idea.
  15 *
  16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  17 */
  18
  19#include <linux/mm.h>
  20#include <linux/export.h>
  21#include <linux/swap.h>
  22#include <linux/bio.h>
  23#include <linux/pagemap.h>
  24#include <linux/mempool.h>
  25#include <linux/blkdev.h>
  26#include <linux/init.h>
  27#include <linux/hash.h>
  28#include <linux/highmem.h>
  29#include <linux/kgdb.h>
  30#include <asm/tlbflush.h>
  31
  32
  33#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
  34DEFINE_PER_CPU(int, __kmap_atomic_idx);
  35#endif
  36
  37/*
  38 * Virtual_count is not a pure "count".
  39 *  0 means that it is not mapped, and has not been mapped
  40 *    since a TLB flush - it is usable.
  41 *  1 means that there are no users, but it has been mapped
  42 *    since the last TLB flush - so we can't use it.
  43 *  n means that there are (n-1) current users of it.
  44 */
  45#ifdef CONFIG_HIGHMEM
  46
  47unsigned long totalhigh_pages __read_mostly;
  48EXPORT_SYMBOL(totalhigh_pages);
  49
  50
  51EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
  52
  53unsigned int nr_free_highpages (void)
  54{
  55        pg_data_t *pgdat;
  56        unsigned int pages = 0;
  57
  58        for_each_online_pgdat(pgdat) {
  59                pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  60                        NR_FREE_PAGES);
  61                if (zone_movable_is_highmem())
  62                        pages += zone_page_state(
  63                                        &pgdat->node_zones[ZONE_MOVABLE],
  64                                        NR_FREE_PAGES);
  65        }
  66
  67        return pages;
  68}
  69
  70static int pkmap_count[LAST_PKMAP];
  71static unsigned int last_pkmap_nr;
  72static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
  73
  74pte_t * pkmap_page_table;
  75
  76static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
  77
  78/*
  79 * Most architectures have no use for kmap_high_get(), so let's abstract
  80 * the disabling of IRQ out of the locking in that case to save on a
  81 * potential useless overhead.
  82 */
  83#ifdef ARCH_NEEDS_KMAP_HIGH_GET
  84#define lock_kmap()             spin_lock_irq(&kmap_lock)
  85#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
  86#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
  87#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
  88#else
  89#define lock_kmap()             spin_lock(&kmap_lock)
  90#define unlock_kmap()           spin_unlock(&kmap_lock)
  91#define lock_kmap_any(flags)    \
  92                do { spin_lock(&kmap_lock); (void)(flags); } while (0)
  93#define unlock_kmap_any(flags)  \
  94                do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
  95#endif
  96
  97static void flush_all_zero_pkmaps(void)
  98{
  99        int i;
 100        int need_flush = 0;
 101
 102        flush_cache_kmaps();
 103
 104        for (i = 0; i < LAST_PKMAP; i++) {
 105                struct page *page;
 106
 107                /*
 108                 * zero means we don't have anything to do,
 109                 * >1 means that it is still in use. Only
 110                 * a count of 1 means that it is free but
 111                 * needs to be unmapped
 112                 */
 113                if (pkmap_count[i] != 1)
 114                        continue;
 115                pkmap_count[i] = 0;
 116
 117                /* sanity check */
 118                BUG_ON(pte_none(pkmap_page_table[i]));
 119
 120                /*
 121                 * Don't need an atomic fetch-and-clear op here;
 122                 * no-one has the page mapped, and cannot get at
 123                 * its virtual address (and hence PTE) without first
 124                 * getting the kmap_lock (which is held here).
 125                 * So no dangers, even with speculative execution.
 126                 */
 127                page = pte_page(pkmap_page_table[i]);
 128                pte_clear(&init_mm, (unsigned long)page_address(page),
 129                          &pkmap_page_table[i]);
 130
 131                set_page_address(page, NULL);
 132                need_flush = 1;
 133        }
 134        if (need_flush)
 135                flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 136}
 137
 138/**
 139 * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
 140 */
 141void kmap_flush_unused(void)
 142{
 143        lock_kmap();
 144        flush_all_zero_pkmaps();
 145        unlock_kmap();
 146}
 147
 148static inline unsigned long map_new_virtual(struct page *page)
 149{
 150        unsigned long vaddr;
 151        int count;
 152
 153start:
 154        count = LAST_PKMAP;
 155        /* Find an empty entry */
 156        for (;;) {
 157                last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
 158                if (!last_pkmap_nr) {
 159                        flush_all_zero_pkmaps();
 160                        count = LAST_PKMAP;
 161                }
 162                if (!pkmap_count[last_pkmap_nr])
 163                        break;  /* Found a usable entry */
 164                if (--count)
 165                        continue;
 166
 167                /*
 168                 * Sleep for somebody else to unmap their entries
 169                 */
 170                {
 171                        DECLARE_WAITQUEUE(wait, current);
 172
 173                        __set_current_state(TASK_UNINTERRUPTIBLE);
 174                        add_wait_queue(&pkmap_map_wait, &wait);
 175                        unlock_kmap();
 176                        schedule();
 177                        remove_wait_queue(&pkmap_map_wait, &wait);
 178                        lock_kmap();
 179
 180                        /* Somebody else might have mapped it while we slept */
 181                        if (page_address(page))
 182                                return (unsigned long)page_address(page);
 183
 184                        /* Re-start */
 185                        goto start;
 186                }
 187        }
 188        vaddr = PKMAP_ADDR(last_pkmap_nr);
 189        set_pte_at(&init_mm, vaddr,
 190                   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
 191
 192        pkmap_count[last_pkmap_nr] = 1;
 193        set_page_address(page, (void *)vaddr);
 194
 195        return vaddr;
 196}
 197
 198/**
 199 * kmap_high - map a highmem page into memory
 200 * @page: &struct page to map
 201 *
 202 * Returns the page's virtual memory address.
 203 *
 204 * We cannot call this from interrupts, as it may block.
 205 */
 206void *kmap_high(struct page *page)
 207{
 208        unsigned long vaddr;
 209
 210        /*
 211         * For highmem pages, we can't trust "virtual" until
 212         * after we have the lock.
 213         */
 214        lock_kmap();
 215        vaddr = (unsigned long)page_address(page);
 216        if (!vaddr)
 217                vaddr = map_new_virtual(page);
 218        pkmap_count[PKMAP_NR(vaddr)]++;
 219        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
 220        unlock_kmap();
 221        return (void*) vaddr;
 222}
 223
 224EXPORT_SYMBOL(kmap_high);
 225
 226#ifdef ARCH_NEEDS_KMAP_HIGH_GET
 227/**
 228 * kmap_high_get - pin a highmem page into memory
 229 * @page: &struct page to pin
 230 *
 231 * Returns the page's current virtual memory address, or NULL if no mapping
 232 * exists.  If and only if a non null address is returned then a
 233 * matching call to kunmap_high() is necessary.
 234 *
 235 * This can be called from any context.
 236 */
 237void *kmap_high_get(struct page *page)
 238{
 239        unsigned long vaddr, flags;
 240
 241        lock_kmap_any(flags);
 242        vaddr = (unsigned long)page_address(page);
 243        if (vaddr) {
 244                BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
 245                pkmap_count[PKMAP_NR(vaddr)]++;
 246        }
 247        unlock_kmap_any(flags);
 248        return (void*) vaddr;
 249}
 250#endif
 251
 252/**
 253 * kunmap_high - unmap a highmem page into memory
 254 * @page: &struct page to unmap
 255 *
 256 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
 257 * only from user context.
 258 */
 259void kunmap_high(struct page *page)
 260{
 261        unsigned long vaddr;
 262        unsigned long nr;
 263        unsigned long flags;
 264        int need_wakeup;
 265
 266        lock_kmap_any(flags);
 267        vaddr = (unsigned long)page_address(page);
 268        BUG_ON(!vaddr);
 269        nr = PKMAP_NR(vaddr);
 270
 271        /*
 272         * A count must never go down to zero
 273         * without a TLB flush!
 274         */
 275        need_wakeup = 0;
 276        switch (--pkmap_count[nr]) {
 277        case 0:
 278                BUG();
 279        case 1:
 280                /*
 281                 * Avoid an unnecessary wake_up() function call.
 282                 * The common case is pkmap_count[] == 1, but
 283                 * no waiters.
 284                 * The tasks queued in the wait-queue are guarded
 285                 * by both the lock in the wait-queue-head and by
 286                 * the kmap_lock.  As the kmap_lock is held here,
 287                 * no need for the wait-queue-head's lock.  Simply
 288                 * test if the queue is empty.
 289                 */
 290                need_wakeup = waitqueue_active(&pkmap_map_wait);
 291        }
 292        unlock_kmap_any(flags);
 293
 294        /* do wake-up, if needed, race-free outside of the spin lock */
 295        if (need_wakeup)
 296                wake_up(&pkmap_map_wait);
 297}
 298
 299EXPORT_SYMBOL(kunmap_high);
 300#endif
 301
 302#if defined(HASHED_PAGE_VIRTUAL)
 303
 304#define PA_HASH_ORDER   7
 305
 306/*
 307 * Describes one page->virtual association
 308 */
 309struct page_address_map {
 310        struct page *page;
 311        void *virtual;
 312        struct list_head list;
 313};
 314
 315/*
 316 * page_address_map freelist, allocated from page_address_maps.
 317 */
 318static struct list_head page_address_pool;      /* freelist */
 319static spinlock_t pool_lock;                    /* protects page_address_pool */
 320
 321/*
 322 * Hash table bucket
 323 */
 324static struct page_address_slot {
 325        struct list_head lh;                    /* List of page_address_maps */
 326        spinlock_t lock;                        /* Protect this bucket's list */
 327} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
 328
 329static struct page_address_slot *page_slot(const struct page *page)
 330{
 331        return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
 332}
 333
 334/**
 335 * page_address - get the mapped virtual address of a page
 336 * @page: &struct page to get the virtual address of
 337 *
 338 * Returns the page's virtual address.
 339 */
 340void *page_address(const struct page *page)
 341{
 342        unsigned long flags;
 343        void *ret;
 344        struct page_address_slot *pas;
 345
 346        if (!PageHighMem(page))
 347                return lowmem_page_address(page);
 348
 349        pas = page_slot(page);
 350        ret = NULL;
 351        spin_lock_irqsave(&pas->lock, flags);
 352        if (!list_empty(&pas->lh)) {
 353                struct page_address_map *pam;
 354
 355                list_for_each_entry(pam, &pas->lh, list) {
 356                        if (pam->page == page) {
 357                                ret = pam->virtual;
 358                                goto done;
 359                        }
 360                }
 361        }
 362done:
 363        spin_unlock_irqrestore(&pas->lock, flags);
 364        return ret;
 365}
 366
 367EXPORT_SYMBOL(page_address);
 368
 369/**
 370 * set_page_address - set a page's virtual address
 371 * @page: &struct page to set
 372 * @virtual: virtual address to use
 373 */
 374void set_page_address(struct page *page, void *virtual)
 375{
 376        unsigned long flags;
 377        struct page_address_slot *pas;
 378        struct page_address_map *pam;
 379
 380        BUG_ON(!PageHighMem(page));
 381
 382        pas = page_slot(page);
 383        if (virtual) {          /* Add */
 384                BUG_ON(list_empty(&page_address_pool));
 385
 386                spin_lock_irqsave(&pool_lock, flags);
 387                pam = list_entry(page_address_pool.next,
 388                                struct page_address_map, list);
 389                list_del(&pam->list);
 390                spin_unlock_irqrestore(&pool_lock, flags);
 391
 392                pam->page = page;
 393                pam->virtual = virtual;
 394
 395                spin_lock_irqsave(&pas->lock, flags);
 396                list_add_tail(&pam->list, &pas->lh);
 397                spin_unlock_irqrestore(&pas->lock, flags);
 398        } else {                /* Remove */
 399                spin_lock_irqsave(&pas->lock, flags);
 400                list_for_each_entry(pam, &pas->lh, list) {
 401                        if (pam->page == page) {
 402                                list_del(&pam->list);
 403                                spin_unlock_irqrestore(&pas->lock, flags);
 404                                spin_lock_irqsave(&pool_lock, flags);
 405                                list_add_tail(&pam->list, &page_address_pool);
 406                                spin_unlock_irqrestore(&pool_lock, flags);
 407                                goto done;
 408                        }
 409                }
 410                spin_unlock_irqrestore(&pas->lock, flags);
 411        }
 412done:
 413        return;
 414}
 415
 416static struct page_address_map page_address_maps[LAST_PKMAP];
 417
 418void __init page_address_init(void)
 419{
 420        int i;
 421
 422        INIT_LIST_HEAD(&page_address_pool);
 423        for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
 424                list_add(&page_address_maps[i].list, &page_address_pool);
 425        for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
 426                INIT_LIST_HEAD(&page_address_htable[i].lh);
 427                spin_lock_init(&page_address_htable[i].lock);
 428        }
 429        spin_lock_init(&pool_lock);
 430}
 431
 432#endif  /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
 433
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.