linux/include/linux/mman.h
<<
>>
Prefs
   1#ifndef _LINUX_MMAN_H
   2#define _LINUX_MMAN_H
   3
   4#include <asm/mman.h>
   5
   6#define MREMAP_MAYMOVE  1
   7#define MREMAP_FIXED    2
   8
   9#define OVERCOMMIT_GUESS                0
  10#define OVERCOMMIT_ALWAYS               1
  11#define OVERCOMMIT_NEVER                2
  12
  13#ifdef __KERNEL__
  14#include <linux/mm.h>
  15#include <linux/percpu_counter.h>
  16
  17#include <linux/atomic.h>
  18
  19extern int sysctl_overcommit_memory;
  20extern int sysctl_overcommit_ratio;
  21extern struct percpu_counter vm_committed_as;
  22
  23static inline void vm_acct_memory(long pages)
  24{
  25        percpu_counter_add(&vm_committed_as, pages);
  26}
  27
  28static inline void vm_unacct_memory(long pages)
  29{
  30        vm_acct_memory(-pages);
  31}
  32
  33/*
  34 * Allow architectures to handle additional protection bits
  35 */
  36
  37#ifndef arch_calc_vm_prot_bits
  38#define arch_calc_vm_prot_bits(prot) 0
  39#endif
  40
  41#ifndef arch_vm_get_page_prot
  42#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
  43#endif
  44
  45#ifndef arch_validate_prot
  46/*
  47 * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have
  48 * already been masked out.
  49 *
  50 * Returns true if the prot flags are valid
  51 */
  52static inline int arch_validate_prot(unsigned long prot)
  53{
  54        return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
  55}
  56#define arch_validate_prot arch_validate_prot
  57#endif
  58
  59/*
  60 * Optimisation macro.  It is equivalent to:
  61 *      (x & bit1) ? bit2 : 0
  62 * but this version is faster.
  63 * ("bit1" and "bit2" must be single bits)
  64 */
  65#define _calc_vm_trans(x, bit1, bit2) \
  66  ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
  67   : ((x) & (bit1)) / ((bit1) / (bit2)))
  68
  69/*
  70 * Combine the mmap "prot" argument into "vm_flags" used internally.
  71 */
  72static inline unsigned long
  73calc_vm_prot_bits(unsigned long prot)
  74{
  75        return _calc_vm_trans(prot, PROT_READ,  VM_READ ) |
  76               _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
  77               _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) |
  78               arch_calc_vm_prot_bits(prot);
  79}
  80
  81/*
  82 * Combine the mmap "flags" argument into "vm_flags" used internally.
  83 */
  84static inline unsigned long
  85calc_vm_flag_bits(unsigned long flags)
  86{
  87        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
  88               _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
  89               _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
  90               _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    );
  91}
  92#endif /* __KERNEL__ */
  93#endif /* _LINUX_MMAN_H */
  94
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.