linux/arch/s390/mm/mmap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/s390/mm/mmap.c
   3 *
   4 *  flexible mmap layout support
   5 *
   6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
   7 * All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  22 *
  23 *
  24 * Started by Ingo Molnar <mingo@elte.hu>
  25 */
  26
  27#include <linux/personality.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <asm/pgalloc.h>
  31
  32/*
  33 * Top of mmap area (just below the process stack).
  34 *
  35 * Leave an at least ~128 MB hole.
  36 */
  37#define MIN_GAP (128*1024*1024)
  38#define MAX_GAP (TASK_SIZE/6*5)
  39
  40static inline unsigned long mmap_base(void)
  41{
  42        unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
  43
  44        if (gap < MIN_GAP)
  45                gap = MIN_GAP;
  46        else if (gap > MAX_GAP)
  47                gap = MAX_GAP;
  48
  49        return TASK_SIZE - (gap & PAGE_MASK);
  50}
  51
  52static inline int mmap_is_legacy(void)
  53{
  54#ifdef CONFIG_64BIT
  55        /*
  56         * Force standard allocation for 64 bit programs.
  57         */
  58        if (!test_thread_flag(TIF_31BIT))
  59                return 1;
  60#endif
  61        return sysctl_legacy_va_layout ||
  62            (current->personality & ADDR_COMPAT_LAYOUT) ||
  63            current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
  64}
  65
  66#ifndef CONFIG_64BIT
  67
  68/*
  69 * This function, called very early during the creation of a new
  70 * process VM image, sets up which VM layout function to use:
  71 */
  72void arch_pick_mmap_layout(struct mm_struct *mm)
  73{
  74        /*
  75         * Fall back to the standard layout if the personality
  76         * bit is set, or if the expected stack growth is unlimited:
  77         */
  78        if (mmap_is_legacy()) {
  79                mm->mmap_base = TASK_UNMAPPED_BASE;
  80                mm->get_unmapped_area = arch_get_unmapped_area;
  81                mm->unmap_area = arch_unmap_area;
  82        } else {
  83                mm->mmap_base = mmap_base();
  84                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  85                mm->unmap_area = arch_unmap_area_topdown;
  86        }
  87}
  88EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
  89
  90#else
  91
  92static unsigned long
  93s390_get_unmapped_area(struct file *filp, unsigned long addr,
  94                unsigned long len, unsigned long pgoff, unsigned long flags)
  95{
  96        struct mm_struct *mm = current->mm;
  97        int rc;
  98
  99        addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 100        if (addr & ~PAGE_MASK)
 101                return addr;
 102        if (unlikely(mm->context.asce_limit < addr + len)) {
 103                rc = crst_table_upgrade(mm, addr + len);
 104                if (rc)
 105                        return (unsigned long) rc;
 106        }
 107        return addr;
 108}
 109
 110static unsigned long
 111s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 112                          const unsigned long len, const unsigned long pgoff,
 113                          const unsigned long flags)
 114{
 115        struct mm_struct *mm = current->mm;
 116        unsigned long addr = addr0;
 117        int rc;
 118
 119        addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
 120        if (addr & ~PAGE_MASK)
 121                return addr;
 122        if (unlikely(mm->context.asce_limit < addr + len)) {
 123                rc = crst_table_upgrade(mm, addr + len);
 124                if (rc)
 125                        return (unsigned long) rc;
 126        }
 127        return addr;
 128}
 129/*
 130 * This function, called very early during the creation of a new
 131 * process VM image, sets up which VM layout function to use:
 132 */
 133void arch_pick_mmap_layout(struct mm_struct *mm)
 134{
 135        /*
 136         * Fall back to the standard layout if the personality
 137         * bit is set, or if the expected stack growth is unlimited:
 138         */
 139        if (mmap_is_legacy()) {
 140                mm->mmap_base = TASK_UNMAPPED_BASE;
 141                mm->get_unmapped_area = s390_get_unmapped_area;
 142                mm->unmap_area = arch_unmap_area;
 143        } else {
 144                mm->mmap_base = mmap_base();
 145                mm->get_unmapped_area = s390_get_unmapped_area_topdown;
 146                mm->unmap_area = arch_unmap_area_topdown;
 147        }
 148}
 149EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
 150
 151#endif
 152