linux/arch/s390/mm/hugetlbpage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  IBM System z Huge TLB Page Support for Kernel.
   4 *
   5 *    Copyright IBM Corp. 2007,2020
   6 *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
   7 */
   8
   9#define KMSG_COMPONENT "hugetlb"
  10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11
  12#include <linux/mm.h>
  13#include <linux/hugetlb.h>
  14#include <linux/mman.h>
  15#include <linux/sched/mm.h>
  16#include <linux/security.h>
  17
  18/*
  19 * If the bit selected by single-bit bitmask "a" is set within "x", move
  20 * it to the position indicated by single-bit bitmask "b".
  21 */
  22#define move_set_bit(x, a, b)   (((x) & (a)) >> ilog2(a) << ilog2(b))
  23
  24static inline unsigned long __pte_to_rste(pte_t pte)
  25{
  26        unsigned long rste;
  27
  28        /*
  29         * Convert encoding               pte bits      pmd / pud bits
  30         *                              lIR.uswrdy.p    dy..R...I...wr
  31         * empty                        010.000000.0 -> 00..0...1...00
  32         * prot-none, clean, old        111.000000.1 -> 00..1...1...00
  33         * prot-none, clean, young      111.000001.1 -> 01..1...1...00
  34         * prot-none, dirty, old        111.000010.1 -> 10..1...1...00
  35         * prot-none, dirty, young      111.000011.1 -> 11..1...1...00
  36         * read-only, clean, old        111.000100.1 -> 00..1...1...01
  37         * read-only, clean, young      101.000101.1 -> 01..1...0...01
  38         * read-only, dirty, old        111.000110.1 -> 10..1...1...01
  39         * read-only, dirty, young      101.000111.1 -> 11..1...0...01
  40         * read-write, clean, old       111.001100.1 -> 00..1...1...11
  41         * read-write, clean, young     101.001101.1 -> 01..1...0...11
  42         * read-write, dirty, old       110.001110.1 -> 10..0...1...11
  43         * read-write, dirty, young     100.001111.1 -> 11..0...0...11
  44         * HW-bits: R read-only, I invalid
  45         * SW-bits: p present, y young, d dirty, r read, w write, s special,
  46         *          u unused, l large
  47         */
  48        if (pte_present(pte)) {
  49                rste = pte_val(pte) & PAGE_MASK;
  50                rste |= move_set_bit(pte_val(pte), _PAGE_READ,
  51                                     _SEGMENT_ENTRY_READ);
  52                rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
  53                                     _SEGMENT_ENTRY_WRITE);
  54                rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
  55                                     _SEGMENT_ENTRY_INVALID);
  56                rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
  57                                     _SEGMENT_ENTRY_PROTECT);
  58                rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
  59                                     _SEGMENT_ENTRY_DIRTY);
  60                rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
  61                                     _SEGMENT_ENTRY_YOUNG);
  62#ifdef CONFIG_MEM_SOFT_DIRTY
  63                rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
  64                                     _SEGMENT_ENTRY_SOFT_DIRTY);
  65#endif
  66                rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
  67                                     _SEGMENT_ENTRY_NOEXEC);
  68        } else
  69                rste = _SEGMENT_ENTRY_EMPTY;
  70        return rste;
  71}
  72
  73static inline pte_t __rste_to_pte(unsigned long rste)
  74{
  75        int present;
  76        pte_t pte;
  77
  78        if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  79                present = pud_present(__pud(rste));
  80        else
  81                present = pmd_present(__pmd(rste));
  82
  83        /*
  84         * Convert encoding             pmd / pud bits      pte bits
  85         *                              dy..R...I...wr    lIR.uswrdy.p
  86         * empty                        00..0...1...00 -> 010.000000.0
  87         * prot-none, clean, old        00..1...1...00 -> 111.000000.1
  88         * prot-none, clean, young      01..1...1...00 -> 111.000001.1
  89         * prot-none, dirty, old        10..1...1...00 -> 111.000010.1
  90         * prot-none, dirty, young      11..1...1...00 -> 111.000011.1
  91         * read-only, clean, old        00..1...1...01 -> 111.000100.1
  92         * read-only, clean, young      01..1...0...01 -> 101.000101.1
  93         * read-only, dirty, old        10..1...1...01 -> 111.000110.1
  94         * read-only, dirty, young      11..1...0...01 -> 101.000111.1
  95         * read-write, clean, old       00..1...1...11 -> 111.001100.1
  96         * read-write, clean, young     01..1...0...11 -> 101.001101.1
  97         * read-write, dirty, old       10..0...1...11 -> 110.001110.1
  98         * read-write, dirty, young     11..0...0...11 -> 100.001111.1
  99         * HW-bits: R read-only, I invalid
 100         * SW-bits: p present, y young, d dirty, r read, w write, s special,
 101         *          u unused, l large
 102         */
 103        if (present) {
 104                pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
 105                pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
 106                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
 107                                             _PAGE_READ);
 108                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
 109                                             _PAGE_WRITE);
 110                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
 111                                             _PAGE_INVALID);
 112                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
 113                                             _PAGE_PROTECT);
 114                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
 115                                             _PAGE_DIRTY);
 116                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
 117                                             _PAGE_YOUNG);
 118#ifdef CONFIG_MEM_SOFT_DIRTY
 119                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
 120                                             _PAGE_SOFT_DIRTY);
 121#endif
 122                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
 123                                             _PAGE_NOEXEC);
 124        } else
 125                pte_val(pte) = _PAGE_INVALID;
 126        return pte;
 127}
 128
 129static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
 130{
 131        struct page *page;
 132        unsigned long size, paddr;
 133
 134        if (!mm_uses_skeys(mm) ||
 135            rste & _SEGMENT_ENTRY_INVALID)
 136                return;
 137
 138        if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
 139                page = pud_page(__pud(rste));
 140                size = PUD_SIZE;
 141                paddr = rste & PUD_MASK;
 142        } else {
 143                page = pmd_page(__pmd(rste));
 144                size = PMD_SIZE;
 145                paddr = rste & PMD_MASK;
 146        }
 147
 148        if (!test_and_set_bit(PG_arch_1, &page->flags))
 149                __storage_key_init_range(paddr, paddr + size - 1);
 150}
 151
 152void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 153                     pte_t *ptep, pte_t pte)
 154{
 155        unsigned long rste;
 156
 157        rste = __pte_to_rste(pte);
 158        if (!MACHINE_HAS_NX)
 159                rste &= ~_SEGMENT_ENTRY_NOEXEC;
 160
 161        /* Set correct table type for 2G hugepages */
 162        if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
 163                if (likely(pte_present(pte)))
 164                        rste |= _REGION3_ENTRY_LARGE;
 165                rste |= _REGION_ENTRY_TYPE_R3;
 166        } else if (likely(pte_present(pte)))
 167                rste |= _SEGMENT_ENTRY_LARGE;
 168
 169        clear_huge_pte_skeys(mm, rste);
 170        pte_val(*ptep) = rste;
 171}
 172
 173pte_t huge_ptep_get(pte_t *ptep)
 174{
 175        return __rste_to_pte(pte_val(*ptep));
 176}
 177
 178pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 179                              unsigned long addr, pte_t *ptep)
 180{
 181        pte_t pte = huge_ptep_get(ptep);
 182        pmd_t *pmdp = (pmd_t *) ptep;
 183        pud_t *pudp = (pud_t *) ptep;
 184
 185        if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 186                pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
 187        else
 188                pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
 189        return pte;
 190}
 191
 192pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 193                        unsigned long addr, unsigned long sz)
 194{
 195        pgd_t *pgdp;
 196        p4d_t *p4dp;
 197        pud_t *pudp;
 198        pmd_t *pmdp = NULL;
 199
 200        pgdp = pgd_offset(mm, addr);
 201        p4dp = p4d_alloc(mm, pgdp, addr);
 202        if (p4dp) {
 203                pudp = pud_alloc(mm, p4dp, addr);
 204                if (pudp) {
 205                        if (sz == PUD_SIZE)
 206                                return (pte_t *) pudp;
 207                        else if (sz == PMD_SIZE)
 208                                pmdp = pmd_alloc(mm, pudp, addr);
 209                }
 210        }
 211        return (pte_t *) pmdp;
 212}
 213
 214pte_t *huge_pte_offset(struct mm_struct *mm,
 215                       unsigned long addr, unsigned long sz)
 216{
 217        pgd_t *pgdp;
 218        p4d_t *p4dp;
 219        pud_t *pudp;
 220        pmd_t *pmdp = NULL;
 221
 222        pgdp = pgd_offset(mm, addr);
 223        if (pgd_present(*pgdp)) {
 224                p4dp = p4d_offset(pgdp, addr);
 225                if (p4d_present(*p4dp)) {
 226                        pudp = pud_offset(p4dp, addr);
 227                        if (pud_present(*pudp)) {
 228                                if (pud_large(*pudp))
 229                                        return (pte_t *) pudp;
 230                                pmdp = pmd_offset(pudp, addr);
 231                        }
 232                }
 233        }
 234        return (pte_t *) pmdp;
 235}
 236
 237int pmd_huge(pmd_t pmd)
 238{
 239        return pmd_large(pmd);
 240}
 241
 242int pud_huge(pud_t pud)
 243{
 244        return pud_large(pud);
 245}
 246
 247struct page *
 248follow_huge_pud(struct mm_struct *mm, unsigned long address,
 249                pud_t *pud, int flags)
 250{
 251        if (flags & FOLL_GET)
 252                return NULL;
 253
 254        return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
 255}
 256
 257bool __init arch_hugetlb_valid_size(unsigned long size)
 258{
 259        if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
 260                return true;
 261        else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
 262                return true;
 263        else
 264                return false;
 265}
 266
 267static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 268                unsigned long addr, unsigned long len,
 269                unsigned long pgoff, unsigned long flags)
 270{
 271        struct hstate *h = hstate_file(file);
 272        struct vm_unmapped_area_info info;
 273
 274        info.flags = 0;
 275        info.length = len;
 276        info.low_limit = current->mm->mmap_base;
 277        info.high_limit = TASK_SIZE;
 278        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 279        info.align_offset = 0;
 280        return vm_unmapped_area(&info);
 281}
 282
 283static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 284                unsigned long addr0, unsigned long len,
 285                unsigned long pgoff, unsigned long flags)
 286{
 287        struct hstate *h = hstate_file(file);
 288        struct vm_unmapped_area_info info;
 289        unsigned long addr;
 290
 291        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 292        info.length = len;
 293        info.low_limit = max(PAGE_SIZE, mmap_min_addr);
 294        info.high_limit = current->mm->mmap_base;
 295        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 296        info.align_offset = 0;
 297        addr = vm_unmapped_area(&info);
 298
 299        /*
 300         * A failed mmap() very likely causes application failure,
 301         * so fall back to the bottom-up function here. This scenario
 302         * can happen with large stack limits and large mmap()
 303         * allocations.
 304         */
 305        if (addr & ~PAGE_MASK) {
 306                VM_BUG_ON(addr != -ENOMEM);
 307                info.flags = 0;
 308                info.low_limit = TASK_UNMAPPED_BASE;
 309                info.high_limit = TASK_SIZE;
 310                addr = vm_unmapped_area(&info);
 311        }
 312
 313        return addr;
 314}
 315
 316unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 317                unsigned long len, unsigned long pgoff, unsigned long flags)
 318{
 319        struct hstate *h = hstate_file(file);
 320        struct mm_struct *mm = current->mm;
 321        struct vm_area_struct *vma;
 322
 323        if (len & ~huge_page_mask(h))
 324                return -EINVAL;
 325        if (len > TASK_SIZE - mmap_min_addr)
 326                return -ENOMEM;
 327
 328        if (flags & MAP_FIXED) {
 329                if (prepare_hugepage_range(file, addr, len))
 330                        return -EINVAL;
 331                goto check_asce_limit;
 332        }
 333
 334        if (addr) {
 335                addr = ALIGN(addr, huge_page_size(h));
 336                vma = find_vma(mm, addr);
 337                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
 338                    (!vma || addr + len <= vm_start_gap(vma)))
 339                        goto check_asce_limit;
 340        }
 341
 342        if (mm->get_unmapped_area == arch_get_unmapped_area)
 343                addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
 344                                pgoff, flags);
 345        else
 346                addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
 347                                pgoff, flags);
 348        if (offset_in_page(addr))
 349                return addr;
 350
 351check_asce_limit:
 352        return check_asce_limit(mm, addr, len);
 353}
 354
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.