linux/arch/s390/kvm/gaccess.h
<<
>>
Prefs
   1/*
   2 * gaccess.h -  access guest memory
   3 *
   4 * Copyright IBM Corp. 2008
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 */
  12
  13#ifndef __KVM_S390_GACCESS_H
  14#define __KVM_S390_GACCESS_H
  15
  16#include <linux/compiler.h>
  17#include <linux/kvm_host.h>
  18#include <asm/uaccess.h>
  19
  20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  21                                               unsigned long guestaddr)
  22{
  23        unsigned long prefix  = vcpu->arch.sie_block->prefix;
  24        unsigned long origin  = vcpu->kvm->arch.guest_origin;
  25        unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  26
  27        if (guestaddr < 2 * PAGE_SIZE)
  28                guestaddr += prefix;
  29        else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  30                guestaddr -= prefix;
  31
  32        if (guestaddr > memsize)
  33                return (void __user __force *) ERR_PTR(-EFAULT);
  34
  35        guestaddr += origin;
  36
  37        return (void __user *) guestaddr;
  38}
  39
  40static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  41                                u64 *result)
  42{
  43        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  44
  45        BUG_ON(guestaddr & 7);
  46
  47        if (IS_ERR((void __force *) uptr))
  48                return PTR_ERR((void __force *) uptr);
  49
  50        return get_user(*result, (unsigned long __user *) uptr);
  51}
  52
  53static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  54                                u32 *result)
  55{
  56        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  57
  58        BUG_ON(guestaddr & 3);
  59
  60        if (IS_ERR((void __force *) uptr))
  61                return PTR_ERR((void __force *) uptr);
  62
  63        return get_user(*result, (u32 __user *) uptr);
  64}
  65
  66static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  67                                u16 *result)
  68{
  69        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  70
  71        BUG_ON(guestaddr & 1);
  72
  73        if (IS_ERR(uptr))
  74                return PTR_ERR(uptr);
  75
  76        return get_user(*result, (u16 __user *) uptr);
  77}
  78
  79static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  80                               u8 *result)
  81{
  82        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  83
  84        if (IS_ERR((void __force *) uptr))
  85                return PTR_ERR((void __force *) uptr);
  86
  87        return get_user(*result, (u8 __user *) uptr);
  88}
  89
  90static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  91                                u64 value)
  92{
  93        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  94
  95        BUG_ON(guestaddr & 7);
  96
  97        if (IS_ERR((void __force *) uptr))
  98                return PTR_ERR((void __force *) uptr);
  99
 100        return put_user(value, (u64 __user *) uptr);
 101}
 102
 103static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 104                                u32 value)
 105{
 106        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 107
 108        BUG_ON(guestaddr & 3);
 109
 110        if (IS_ERR((void __force *) uptr))
 111                return PTR_ERR((void __force *) uptr);
 112
 113        return put_user(value, (u32 __user *) uptr);
 114}
 115
 116static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 117                                u16 value)
 118{
 119        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 120
 121        BUG_ON(guestaddr & 1);
 122
 123        if (IS_ERR((void __force *) uptr))
 124                return PTR_ERR((void __force *) uptr);
 125
 126        return put_user(value, (u16 __user *) uptr);
 127}
 128
 129static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 130                               u8 value)
 131{
 132        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 133
 134        if (IS_ERR((void __force *) uptr))
 135                return PTR_ERR((void __force *) uptr);
 136
 137        return put_user(value, (u8 __user *) uptr);
 138}
 139
 140
 141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
 142                                       unsigned long guestdest,
 143                                       const void *from, unsigned long n)
 144{
 145        int rc;
 146        unsigned long i;
 147        const u8 *data = from;
 148
 149        for (i = 0; i < n; i++) {
 150                rc = put_guest_u8(vcpu, guestdest++, *(data++));
 151                if (rc < 0)
 152                        return rc;
 153        }
 154        return 0;
 155}
 156
 157static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
 158                                const void *from, unsigned long n)
 159{
 160        unsigned long prefix  = vcpu->arch.sie_block->prefix;
 161        unsigned long origin  = vcpu->kvm->arch.guest_origin;
 162        unsigned long memsize = vcpu->kvm->arch.guest_memsize;
 163
 164        if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
 165                goto slowpath;
 166
 167        if ((guestdest < prefix) && (guestdest + n > prefix))
 168                goto slowpath;
 169
 170        if ((guestdest < prefix + 2 * PAGE_SIZE)
 171            && (guestdest + n > prefix + 2 * PAGE_SIZE))
 172                goto slowpath;
 173
 174        if (guestdest < 2 * PAGE_SIZE)
 175                guestdest += prefix;
 176        else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
 177                guestdest -= prefix;
 178
 179        if (guestdest + n > memsize)
 180                return -EFAULT;
 181
 182        if (guestdest + n < guestdest)
 183                return -EFAULT;
 184
 185        guestdest += origin;
 186
 187        return copy_to_user((void __user *) guestdest, from, n);
 188slowpath:
 189        return __copy_to_guest_slow(vcpu, guestdest, from, n);
 190}
 191
 192static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
 193                                         unsigned long guestsrc,
 194                                         unsigned long n)
 195{
 196        int rc;
 197        unsigned long i;
 198        u8 *data = to;
 199
 200        for (i = 0; i < n; i++) {
 201                rc = get_guest_u8(vcpu, guestsrc++, data++);
 202                if (rc < 0)
 203                        return rc;
 204        }
 205        return 0;
 206}
 207
 208static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
 209                                  unsigned long guestsrc, unsigned long n)
 210{
 211        unsigned long prefix  = vcpu->arch.sie_block->prefix;
 212        unsigned long origin  = vcpu->kvm->arch.guest_origin;
 213        unsigned long memsize = vcpu->kvm->arch.guest_memsize;
 214
 215        if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
 216                goto slowpath;
 217
 218        if ((guestsrc < prefix) && (guestsrc + n > prefix))
 219                goto slowpath;
 220
 221        if ((guestsrc < prefix + 2 * PAGE_SIZE)
 222            && (guestsrc + n > prefix + 2 * PAGE_SIZE))
 223                goto slowpath;
 224
 225        if (guestsrc < 2 * PAGE_SIZE)
 226                guestsrc += prefix;
 227        else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
 228                guestsrc -= prefix;
 229
 230        if (guestsrc + n > memsize)
 231                return -EFAULT;
 232
 233        if (guestsrc + n < guestsrc)
 234                return -EFAULT;
 235
 236        guestsrc += origin;
 237
 238        return copy_from_user(to, (void __user *) guestsrc, n);
 239slowpath:
 240        return __copy_from_guest_slow(vcpu, to, guestsrc, n);
 241}
 242
 243static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
 244                                         unsigned long guestdest,
 245                                         const void *from, unsigned long n)
 246{
 247        unsigned long origin  = vcpu->kvm->arch.guest_origin;
 248        unsigned long memsize = vcpu->kvm->arch.guest_memsize;
 249
 250        if (guestdest + n > memsize)
 251                return -EFAULT;
 252
 253        if (guestdest + n < guestdest)
 254                return -EFAULT;
 255
 256        guestdest += origin;
 257
 258        return copy_to_user((void __user *) guestdest, from, n);
 259}
 260
 261static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
 262                                           unsigned long guestsrc,
 263                                           unsigned long n)
 264{
 265        unsigned long origin  = vcpu->kvm->arch.guest_origin;
 266        unsigned long memsize = vcpu->kvm->arch.guest_memsize;
 267
 268        if (guestsrc + n > memsize)
 269                return -EFAULT;
 270
 271        if (guestsrc + n < guestsrc)
 272                return -EFAULT;
 273
 274        guestsrc += origin;
 275
 276        return copy_from_user(to, (void __user *) guestsrc, n);
 277}
 278#endif
 279