linux/kernel/kcmp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/syscalls.h>
   4#include <linux/fdtable.h>
   5#include <linux/string.h>
   6#include <linux/random.h>
   7#include <linux/module.h>
   8#include <linux/ptrace.h>
   9#include <linux/init.h>
  10#include <linux/errno.h>
  11#include <linux/cache.h>
  12#include <linux/bug.h>
  13#include <linux/err.h>
  14#include <linux/kcmp.h>
  15#include <linux/capability.h>
  16#include <linux/list.h>
  17#include <linux/eventpoll.h>
  18#include <linux/file.h>
  19
  20#include <asm/unistd.h>
  21
  22/*
  23 * We don't expose the real in-memory order of objects for security reasons.
  24 * But still the comparison results should be suitable for sorting. So we
  25 * obfuscate kernel pointers values and compare the production instead.
  26 *
  27 * The obfuscation is done in two steps. First we xor the kernel pointer with
  28 * a random value, which puts pointer into a new position in a reordered space.
  29 * Secondly we multiply the xor production with a large odd random number to
  30 * permute its bits even more (the odd multiplier guarantees that the product
  31 * is unique ever after the high bits are truncated, since any odd number is
  32 * relative prime to 2^n).
  33 *
  34 * Note also that the obfuscation itself is invisible to userspace and if needed
  35 * it can be changed to an alternate scheme.
  36 */
  37static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
  38
  39static long kptr_obfuscate(long v, int type)
  40{
  41        return (v ^ cookies[type][0]) * cookies[type][1];
  42}
  43
  44/*
  45 * 0 - equal, i.e. v1 = v2
  46 * 1 - less than, i.e. v1 < v2
  47 * 2 - greater than, i.e. v1 > v2
  48 * 3 - not equal but ordering unavailable (reserved for future)
  49 */
  50static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
  51{
  52        long t1, t2;
  53
  54        t1 = kptr_obfuscate((long)v1, type);
  55        t2 = kptr_obfuscate((long)v2, type);
  56
  57        return (t1 < t2) | ((t1 > t2) << 1);
  58}
  59
  60/* The caller must have pinned the task */
  61static struct file *
  62get_file_raw_ptr(struct task_struct *task, unsigned int idx)
  63{
  64        struct file *file;
  65
  66        rcu_read_lock();
  67        file = task_lookup_fd_rcu(task, idx);
  68        rcu_read_unlock();
  69
  70        return file;
  71}
  72
  73static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
  74{
  75        if (likely(l2 != l1))
  76                up_read(l2);
  77        up_read(l1);
  78}
  79
  80static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
  81{
  82        int err;
  83
  84        if (l2 > l1)
  85                swap(l1, l2);
  86
  87        err = down_read_killable(l1);
  88        if (!err && likely(l1 != l2)) {
  89                err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
  90                if (err)
  91                        up_read(l1);
  92        }
  93
  94        return err;
  95}
  96
  97#ifdef CONFIG_EPOLL
  98static int kcmp_epoll_target(struct task_struct *task1,
  99                             struct task_struct *task2,
 100                             unsigned long idx1,
 101                             struct kcmp_epoll_slot __user *uslot)
 102{
 103        struct file *filp, *filp_epoll, *filp_tgt;
 104        struct kcmp_epoll_slot slot;
 105
 106        if (copy_from_user(&slot, uslot, sizeof(slot)))
 107                return -EFAULT;
 108
 109        filp = get_file_raw_ptr(task1, idx1);
 110        if (!filp)
 111                return -EBADF;
 112
 113        filp_epoll = fget_task(task2, slot.efd);
 114        if (!filp_epoll)
 115                return -EBADF;
 116
 117        filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
 118        fput(filp_epoll);
 119
 120        if (IS_ERR(filp_tgt))
 121                return PTR_ERR(filp_tgt);
 122
 123        return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
 124}
 125#else
 126static int kcmp_epoll_target(struct task_struct *task1,
 127                             struct task_struct *task2,
 128                             unsigned long idx1,
 129                             struct kcmp_epoll_slot __user *uslot)
 130{
 131        return -EOPNOTSUPP;
 132}
 133#endif
 134
 135SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
 136                unsigned long, idx1, unsigned long, idx2)
 137{
 138        struct task_struct *task1, *task2;
 139        int ret;
 140
 141        rcu_read_lock();
 142
 143        /*
 144         * Tasks are looked up in caller's PID namespace only.
 145         */
 146        task1 = find_task_by_vpid(pid1);
 147        task2 = find_task_by_vpid(pid2);
 148        if (!task1 || !task2)
 149                goto err_no_task;
 150
 151        get_task_struct(task1);
 152        get_task_struct(task2);
 153
 154        rcu_read_unlock();
 155
 156        /*
 157         * One should have enough rights to inspect task details.
 158         */
 159        ret = kcmp_lock(&task1->signal->exec_update_lock,
 160                        &task2->signal->exec_update_lock);
 161        if (ret)
 162                goto err;
 163        if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
 164            !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
 165                ret = -EPERM;
 166                goto err_unlock;
 167        }
 168
 169        switch (type) {
 170        case KCMP_FILE: {
 171                struct file *filp1, *filp2;
 172
 173                filp1 = get_file_raw_ptr(task1, idx1);
 174                filp2 = get_file_raw_ptr(task2, idx2);
 175
 176                if (filp1 && filp2)
 177                        ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
 178                else
 179                        ret = -EBADF;
 180                break;
 181        }
 182        case KCMP_VM:
 183                ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
 184                break;
 185        case KCMP_FILES:
 186                ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
 187                break;
 188        case KCMP_FS:
 189                ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
 190                break;
 191        case KCMP_SIGHAND:
 192                ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
 193                break;
 194        case KCMP_IO:
 195                ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
 196                break;
 197        case KCMP_SYSVSEM:
 198#ifdef CONFIG_SYSVIPC
 199                ret = kcmp_ptr(task1->sysvsem.undo_list,
 200                               task2->sysvsem.undo_list,
 201                               KCMP_SYSVSEM);
 202#else
 203                ret = -EOPNOTSUPP;
 204#endif
 205                break;
 206        case KCMP_EPOLL_TFD:
 207                ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
 208                break;
 209        default:
 210                ret = -EINVAL;
 211                break;
 212        }
 213
 214err_unlock:
 215        kcmp_unlock(&task1->signal->exec_update_lock,
 216                    &task2->signal->exec_update_lock);
 217err:
 218        put_task_struct(task1);
 219        put_task_struct(task2);
 220
 221        return ret;
 222
 223err_no_task:
 224        rcu_read_unlock();
 225        return -ESRCH;
 226}
 227
 228static __init int kcmp_cookies_init(void)
 229{
 230        int i;
 231
 232        get_random_bytes(cookies, sizeof(cookies));
 233
 234        for (i = 0; i < KCMP_TYPES; i++)
 235                cookies[i][1] |= (~(~0UL >>  1) | 1);
 236
 237        return 0;
 238}
 239arch_initcall(kcmp_cookies_init);
 240