linux/arch/s390/mm/maccess.c
<<
>>
Prefs
   1/*
   2 * Access kernel memory without faulting -- s390 specific implementation.
   3 *
   4 * Copyright IBM Corp. 2009
   5 *
   6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   7 *
   8 */
   9
  10#include <linux/uaccess.h>
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/errno.h>
  14#include <asm/system.h>
  15
  16/*
  17 * This function writes to kernel memory bypassing DAT and possible
  18 * write protection. It copies one to four bytes from src to dst
  19 * using the stura instruction.
  20 * Returns the number of bytes copied or -EFAULT.
  21 */
  22static long probe_kernel_write_odd(void *dst, void *src, size_t size)
  23{
  24        unsigned long count, aligned;
  25        int offset, mask;
  26        int rc = -EFAULT;
  27
  28        aligned = (unsigned long) dst & ~3UL;
  29        offset = (unsigned long) dst & 3;
  30        count = min_t(unsigned long, 4 - offset, size);
  31        mask = (0xf << (4 - count)) & 0xf;
  32        mask >>= offset;
  33        asm volatile(
  34                "       bras    1,0f\n"
  35                "       icm     0,0,0(%3)\n"
  36                "0:     l       0,0(%1)\n"
  37                "       lra     %1,0(%1)\n"
  38                "1:     ex      %2,0(1)\n"
  39                "2:     stura   0,%1\n"
  40                "       la      %0,0\n"
  41                "3:\n"
  42                EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
  43                : "+d" (rc), "+a" (aligned)
  44                : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
  45        return rc ? rc : count;
  46}
  47
  48long probe_kernel_write(void *dst, void *src, size_t size)
  49{
  50        long copied = 0;
  51
  52        while (size) {
  53                copied = probe_kernel_write_odd(dst, src, size);
  54                if (copied < 0)
  55                        break;
  56                dst += copied;
  57                src += copied;
  58                size -= copied;
  59        }
  60        return copied < 0 ? -EFAULT : 0;
  61}
  62
  63int memcpy_real(void *dest, void *src, size_t count)
  64{
  65        register unsigned long _dest asm("2") = (unsigned long) dest;
  66        register unsigned long _len1 asm("3") = (unsigned long) count;
  67        register unsigned long _src  asm("4") = (unsigned long) src;
  68        register unsigned long _len2 asm("5") = (unsigned long) count;
  69        unsigned long flags;
  70        int rc = -EFAULT;
  71
  72        if (!count)
  73                return 0;
  74        flags = __arch_local_irq_stnsm(0xf8UL);
  75        asm volatile (
  76                "0:     mvcle   %1,%2,0x0\n"
  77                "1:     jo      0b\n"
  78                "       lhi     %0,0x0\n"
  79                "2:\n"
  80                EX_TABLE(1b,2b)
  81                : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  82                  "+d" (_len2), "=m" (*((long *) dest))
  83                : "m" (*((long *) src))
  84                : "cc", "memory");
  85        arch_local_irq_restore(flags);
  86        return rc;
  87}
  88