linux/kernel/user_namespace.c
<<
>>
Prefs
   1/*
   2 *  This program is free software; you can redistribute it and/or
   3 *  modify it under the terms of the GNU General Public License as
   4 *  published by the Free Software Foundation, version 2 of the
   5 *  License.
   6 */
   7
   8#include <linux/export.h>
   9#include <linux/nsproxy.h>
  10#include <linux/slab.h>
  11#include <linux/user_namespace.h>
  12#include <linux/proc_fs.h>
  13#include <linux/highuid.h>
  14#include <linux/cred.h>
  15#include <linux/securebits.h>
  16#include <linux/keyctl.h>
  17#include <linux/key-type.h>
  18#include <keys/user-type.h>
  19#include <linux/seq_file.h>
  20#include <linux/fs.h>
  21#include <linux/uaccess.h>
  22#include <linux/ctype.h>
  23#include <linux/projid.h>
  24#include <linux/fs_struct.h>
  25
  26static struct kmem_cache *user_ns_cachep __read_mostly;
  27
  28static bool new_idmap_permitted(const struct file *file,
  29                                struct user_namespace *ns, int cap_setid,
  30                                struct uid_gid_map *map);
  31
  32static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
  33{
  34        /* Start with the same capabilities as init but useless for doing
  35         * anything as the capabilities are bound to the new user namespace.
  36         */
  37        cred->securebits = SECUREBITS_DEFAULT;
  38        cred->cap_inheritable = CAP_EMPTY_SET;
  39        cred->cap_permitted = CAP_FULL_SET;
  40        cred->cap_effective = CAP_FULL_SET;
  41        cred->cap_bset = CAP_FULL_SET;
  42#ifdef CONFIG_KEYS
  43        key_put(cred->request_key_auth);
  44        cred->request_key_auth = NULL;
  45#endif
  46        /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
  47        cred->user_ns = user_ns;
  48}
  49
  50/*
  51 * Create a new user namespace, deriving the creator from the user in the
  52 * passed credentials, and replacing that user with the new root user for the
  53 * new namespace.
  54 *
  55 * This is called by copy_creds(), which will finish setting the target task's
  56 * credentials.
  57 */
  58int create_user_ns(struct cred *new)
  59{
  60        struct user_namespace *ns, *parent_ns = new->user_ns;
  61        kuid_t owner = new->euid;
  62        kgid_t group = new->egid;
  63        int ret;
  64
  65        /*
  66         * Verify that we can not violate the policy of which files
  67         * may be accessed that is specified by the root directory,
  68         * by verifing that the root directory is at the root of the
  69         * mount namespace which allows all files to be accessed.
  70         */
  71        if (current_chrooted())
  72                return -EPERM;
  73
  74        /* The creator needs a mapping in the parent user namespace
  75         * or else we won't be able to reasonably tell userspace who
  76         * created a user_namespace.
  77         */
  78        if (!kuid_has_mapping(parent_ns, owner) ||
  79            !kgid_has_mapping(parent_ns, group))
  80                return -EPERM;
  81
  82        ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
  83        if (!ns)
  84                return -ENOMEM;
  85
  86        ret = proc_alloc_inum(&ns->proc_inum);
  87        if (ret) {
  88                kmem_cache_free(user_ns_cachep, ns);
  89                return ret;
  90        }
  91
  92        atomic_set(&ns->count, 1);
  93        /* Leave the new->user_ns reference with the new user namespace. */
  94        ns->parent = parent_ns;
  95        ns->owner = owner;
  96        ns->group = group;
  97
  98        set_cred_user_ns(new, ns);
  99
 100        update_mnt_policy(ns);
 101
 102        return 0;
 103}
 104
 105int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
 106{
 107        struct cred *cred;
 108
 109        if (!(unshare_flags & CLONE_NEWUSER))
 110                return 0;
 111
 112        cred = prepare_creds();
 113        if (!cred)
 114                return -ENOMEM;
 115
 116        *new_cred = cred;
 117        return create_user_ns(cred);
 118}
 119
 120void free_user_ns(struct user_namespace *ns)
 121{
 122        struct user_namespace *parent;
 123
 124        do {
 125                parent = ns->parent;
 126                proc_free_inum(ns->proc_inum);
 127                kmem_cache_free(user_ns_cachep, ns);
 128                ns = parent;
 129        } while (atomic_dec_and_test(&parent->count));
 130}
 131EXPORT_SYMBOL(free_user_ns);
 132
 133static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
 134{
 135        unsigned idx, extents;
 136        u32 first, last, id2;
 137
 138        id2 = id + count - 1;
 139
 140        /* Find the matching extent */
 141        extents = map->nr_extents;
 142        smp_read_barrier_depends();
 143        for (idx = 0; idx < extents; idx++) {
 144                first = map->extent[idx].first;
 145                last = first + map->extent[idx].count - 1;
 146                if (id >= first && id <= last &&
 147                    (id2 >= first && id2 <= last))
 148                        break;
 149        }
 150        /* Map the id or note failure */
 151        if (idx < extents)
 152                id = (id - first) + map->extent[idx].lower_first;
 153        else
 154                id = (u32) -1;
 155
 156        return id;
 157}
 158
 159static u32 map_id_down(struct uid_gid_map *map, u32 id)
 160{
 161        unsigned idx, extents;
 162        u32 first, last;
 163
 164        /* Find the matching extent */
 165        extents = map->nr_extents;
 166        smp_read_barrier_depends();
 167        for (idx = 0; idx < extents; idx++) {
 168                first = map->extent[idx].first;
 169                last = first + map->extent[idx].count - 1;
 170                if (id >= first && id <= last)
 171                        break;
 172        }
 173        /* Map the id or note failure */
 174        if (idx < extents)
 175                id = (id - first) + map->extent[idx].lower_first;
 176        else
 177                id = (u32) -1;
 178
 179        return id;
 180}
 181
 182static u32 map_id_up(struct uid_gid_map *map, u32 id)
 183{
 184        unsigned idx, extents;
 185        u32 first, last;
 186
 187        /* Find the matching extent */
 188        extents = map->nr_extents;
 189        smp_read_barrier_depends();
 190        for (idx = 0; idx < extents; idx++) {
 191                first = map->extent[idx].lower_first;
 192                last = first + map->extent[idx].count - 1;
 193                if (id >= first && id <= last)
 194                        break;
 195        }
 196        /* Map the id or note failure */
 197        if (idx < extents)
 198                id = (id - first) + map->extent[idx].first;
 199        else
 200                id = (u32) -1;
 201
 202        return id;
 203}
 204
 205/**
 206 *      make_kuid - Map a user-namespace uid pair into a kuid.
 207 *      @ns:  User namespace that the uid is in
 208 *      @uid: User identifier
 209 *
 210 *      Maps a user-namespace uid pair into a kernel internal kuid,
 211 *      and returns that kuid.
 212 *
 213 *      When there is no mapping defined for the user-namespace uid
 214 *      pair INVALID_UID is returned.  Callers are expected to test
 215 *      for and handle handle INVALID_UID being returned.  INVALID_UID
 216 *      may be tested for using uid_valid().
 217 */
 218kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
 219{
 220        /* Map the uid to a global kernel uid */
 221        return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
 222}
 223EXPORT_SYMBOL(make_kuid);
 224
 225/**
 226 *      from_kuid - Create a uid from a kuid user-namespace pair.
 227 *      @targ: The user namespace we want a uid in.
 228 *      @kuid: The kernel internal uid to start with.
 229 *
 230 *      Map @kuid into the user-namespace specified by @targ and
 231 *      return the resulting uid.
 232 *
 233 *      There is always a mapping into the initial user_namespace.
 234 *
 235 *      If @kuid has no mapping in @targ (uid_t)-1 is returned.
 236 */
 237uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
 238{
 239        /* Map the uid from a global kernel uid */
 240        return map_id_up(&targ->uid_map, __kuid_val(kuid));
 241}
 242EXPORT_SYMBOL(from_kuid);
 243
 244/**
 245 *      from_kuid_munged - Create a uid from a kuid user-namespace pair.
 246 *      @targ: The user namespace we want a uid in.
 247 *      @kuid: The kernel internal uid to start with.
 248 *
 249 *      Map @kuid into the user-namespace specified by @targ and
 250 *      return the resulting uid.
 251 *
 252 *      There is always a mapping into the initial user_namespace.
 253 *
 254 *      Unlike from_kuid from_kuid_munged never fails and always
 255 *      returns a valid uid.  This makes from_kuid_munged appropriate
 256 *      for use in syscalls like stat and getuid where failing the
 257 *      system call and failing to provide a valid uid are not an
 258 *      options.
 259 *
 260 *      If @kuid has no mapping in @targ overflowuid is returned.
 261 */
 262uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
 263{
 264        uid_t uid;
 265        uid = from_kuid(targ, kuid);
 266
 267        if (uid == (uid_t) -1)
 268                uid = overflowuid;
 269        return uid;
 270}
 271EXPORT_SYMBOL(from_kuid_munged);
 272
 273/**
 274 *      make_kgid - Map a user-namespace gid pair into a kgid.
 275 *      @ns:  User namespace that the gid is in
 276 *      @uid: group identifier
 277 *
 278 *      Maps a user-namespace gid pair into a kernel internal kgid,
 279 *      and returns that kgid.
 280 *
 281 *      When there is no mapping defined for the user-namespace gid
 282 *      pair INVALID_GID is returned.  Callers are expected to test
 283 *      for and handle INVALID_GID being returned.  INVALID_GID may be
 284 *      tested for using gid_valid().
 285 */
 286kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
 287{
 288        /* Map the gid to a global kernel gid */
 289        return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
 290}
 291EXPORT_SYMBOL(make_kgid);
 292
 293/**
 294 *      from_kgid - Create a gid from a kgid user-namespace pair.
 295 *      @targ: The user namespace we want a gid in.
 296 *      @kgid: The kernel internal gid to start with.
 297 *
 298 *      Map @kgid into the user-namespace specified by @targ and
 299 *      return the resulting gid.
 300 *
 301 *      There is always a mapping into the initial user_namespace.
 302 *
 303 *      If @kgid has no mapping in @targ (gid_t)-1 is returned.
 304 */
 305gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
 306{
 307        /* Map the gid from a global kernel gid */
 308        return map_id_up(&targ->gid_map, __kgid_val(kgid));
 309}
 310EXPORT_SYMBOL(from_kgid);
 311
 312/**
 313 *      from_kgid_munged - Create a gid from a kgid user-namespace pair.
 314 *      @targ: The user namespace we want a gid in.
 315 *      @kgid: The kernel internal gid to start with.
 316 *
 317 *      Map @kgid into the user-namespace specified by @targ and
 318 *      return the resulting gid.
 319 *
 320 *      There is always a mapping into the initial user_namespace.
 321 *
 322 *      Unlike from_kgid from_kgid_munged never fails and always
 323 *      returns a valid gid.  This makes from_kgid_munged appropriate
 324 *      for use in syscalls like stat and getgid where failing the
 325 *      system call and failing to provide a valid gid are not options.
 326 *
 327 *      If @kgid has no mapping in @targ overflowgid is returned.
 328 */
 329gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
 330{
 331        gid_t gid;
 332        gid = from_kgid(targ, kgid);
 333
 334        if (gid == (gid_t) -1)
 335                gid = overflowgid;
 336        return gid;
 337}
 338EXPORT_SYMBOL(from_kgid_munged);
 339
 340/**
 341 *      make_kprojid - Map a user-namespace projid pair into a kprojid.
 342 *      @ns:  User namespace that the projid is in
 343 *      @projid: Project identifier
 344 *
 345 *      Maps a user-namespace uid pair into a kernel internal kuid,
 346 *      and returns that kuid.
 347 *
 348 *      When there is no mapping defined for the user-namespace projid
 349 *      pair INVALID_PROJID is returned.  Callers are expected to test
 350 *      for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
 351 *      may be tested for using projid_valid().
 352 */
 353kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
 354{
 355        /* Map the uid to a global kernel uid */
 356        return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
 357}
 358EXPORT_SYMBOL(make_kprojid);
 359
 360/**
 361 *      from_kprojid - Create a projid from a kprojid user-namespace pair.
 362 *      @targ: The user namespace we want a projid in.
 363 *      @kprojid: The kernel internal project identifier to start with.
 364 *
 365 *      Map @kprojid into the user-namespace specified by @targ and
 366 *      return the resulting projid.
 367 *
 368 *      There is always a mapping into the initial user_namespace.
 369 *
 370 *      If @kprojid has no mapping in @targ (projid_t)-1 is returned.
 371 */
 372projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
 373{
 374        /* Map the uid from a global kernel uid */
 375        return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
 376}
 377EXPORT_SYMBOL(from_kprojid);
 378
 379/**
 380 *      from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
 381 *      @targ: The user namespace we want a projid in.
 382 *      @kprojid: The kernel internal projid to start with.
 383 *
 384 *      Map @kprojid into the user-namespace specified by @targ and
 385 *      return the resulting projid.
 386 *
 387 *      There is always a mapping into the initial user_namespace.
 388 *
 389 *      Unlike from_kprojid from_kprojid_munged never fails and always
 390 *      returns a valid projid.  This makes from_kprojid_munged
 391 *      appropriate for use in syscalls like stat and where
 392 *      failing the system call and failing to provide a valid projid are
 393 *      not an options.
 394 *
 395 *      If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
 396 */
 397projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
 398{
 399        projid_t projid;
 400        projid = from_kprojid(targ, kprojid);
 401
 402        if (projid == (projid_t) -1)
 403                projid = OVERFLOW_PROJID;
 404        return projid;
 405}
 406EXPORT_SYMBOL(from_kprojid_munged);
 407
 408
 409static int uid_m_show(struct seq_file *seq, void *v)
 410{
 411        struct user_namespace *ns = seq->private;
 412        struct uid_gid_extent *extent = v;
 413        struct user_namespace *lower_ns;
 414        uid_t lower;
 415
 416        lower_ns = seq_user_ns(seq);
 417        if ((lower_ns == ns) && lower_ns->parent)
 418                lower_ns = lower_ns->parent;
 419
 420        lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
 421
 422        seq_printf(seq, "%10u %10u %10u\n",
 423                extent->first,
 424                lower,
 425                extent->count);
 426
 427        return 0;
 428}
 429
 430static int gid_m_show(struct seq_file *seq, void *v)
 431{
 432        struct user_namespace *ns = seq->private;
 433        struct uid_gid_extent *extent = v;
 434        struct user_namespace *lower_ns;
 435        gid_t lower;
 436
 437        lower_ns = seq_user_ns(seq);
 438        if ((lower_ns == ns) && lower_ns->parent)
 439                lower_ns = lower_ns->parent;
 440
 441        lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
 442
 443        seq_printf(seq, "%10u %10u %10u\n",
 444                extent->first,
 445                lower,
 446                extent->count);
 447
 448        return 0;
 449}
 450
 451static int projid_m_show(struct seq_file *seq, void *v)
 452{
 453        struct user_namespace *ns = seq->private;
 454        struct uid_gid_extent *extent = v;
 455        struct user_namespace *lower_ns;
 456        projid_t lower;
 457
 458        lower_ns = seq_user_ns(seq);
 459        if ((lower_ns == ns) && lower_ns->parent)
 460                lower_ns = lower_ns->parent;
 461
 462        lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
 463
 464        seq_printf(seq, "%10u %10u %10u\n",
 465                extent->first,
 466                lower,
 467                extent->count);
 468
 469        return 0;
 470}
 471
 472static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
 473{
 474        struct uid_gid_extent *extent = NULL;
 475        loff_t pos = *ppos;
 476
 477        if (pos < map->nr_extents)
 478                extent = &map->extent[pos];
 479
 480        return extent;
 481}
 482
 483static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
 484{
 485        struct user_namespace *ns = seq->private;
 486
 487        return m_start(seq, ppos, &ns->uid_map);
 488}
 489
 490static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
 491{
 492        struct user_namespace *ns = seq->private;
 493
 494        return m_start(seq, ppos, &ns->gid_map);
 495}
 496
 497static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
 498{
 499        struct user_namespace *ns = seq->private;
 500
 501        return m_start(seq, ppos, &ns->projid_map);
 502}
 503
 504static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
 505{
 506        (*pos)++;
 507        return seq->op->start(seq, pos);
 508}
 509
 510static void m_stop(struct seq_file *seq, void *v)
 511{
 512        return;
 513}
 514
 515struct seq_operations proc_uid_seq_operations = {
 516        .start = uid_m_start,
 517        .stop = m_stop,
 518        .next = m_next,
 519        .show = uid_m_show,
 520};
 521
 522struct seq_operations proc_gid_seq_operations = {
 523        .start = gid_m_start,
 524        .stop = m_stop,
 525        .next = m_next,
 526        .show = gid_m_show,
 527};
 528
 529struct seq_operations proc_projid_seq_operations = {
 530        .start = projid_m_start,
 531        .stop = m_stop,
 532        .next = m_next,
 533        .show = projid_m_show,
 534};
 535
 536static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent)
 537{
 538        u32 upper_first, lower_first, upper_last, lower_last;
 539        unsigned idx;
 540
 541        upper_first = extent->first;
 542        lower_first = extent->lower_first;
 543        upper_last = upper_first + extent->count - 1;
 544        lower_last = lower_first + extent->count - 1;
 545
 546        for (idx = 0; idx < new_map->nr_extents; idx++) {
 547                u32 prev_upper_first, prev_lower_first;
 548                u32 prev_upper_last, prev_lower_last;
 549                struct uid_gid_extent *prev;
 550
 551                prev = &new_map->extent[idx];
 552
 553                prev_upper_first = prev->first;
 554                prev_lower_first = prev->lower_first;
 555                prev_upper_last = prev_upper_first + prev->count - 1;
 556                prev_lower_last = prev_lower_first + prev->count - 1;
 557
 558                /* Does the upper range intersect a previous extent? */
 559                if ((prev_upper_first <= upper_last) &&
 560                    (prev_upper_last >= upper_first))
 561                        return true;
 562
 563                /* Does the lower range intersect a previous extent? */
 564                if ((prev_lower_first <= lower_last) &&
 565                    (prev_lower_last >= lower_first))
 566                        return true;
 567        }
 568        return false;
 569}
 570
 571
 572static DEFINE_MUTEX(id_map_mutex);
 573
 574static ssize_t map_write(struct file *file, const char __user *buf,
 575                         size_t count, loff_t *ppos,
 576                         int cap_setid,
 577                         struct uid_gid_map *map,
 578                         struct uid_gid_map *parent_map)
 579{
 580        struct seq_file *seq = file->private_data;
 581        struct user_namespace *ns = seq->private;
 582        struct uid_gid_map new_map;
 583        unsigned idx;
 584        struct uid_gid_extent *extent = NULL;
 585        unsigned long page = 0;
 586        char *kbuf, *pos, *next_line;
 587        ssize_t ret = -EINVAL;
 588
 589        /*
 590         * The id_map_mutex serializes all writes to any given map.
 591         *
 592         * Any map is only ever written once.
 593         *
 594         * An id map fits within 1 cache line on most architectures.
 595         *
 596         * On read nothing needs to be done unless you are on an
 597         * architecture with a crazy cache coherency model like alpha.
 598         *
 599         * There is a one time data dependency between reading the
 600         * count of the extents and the values of the extents.  The
 601         * desired behavior is to see the values of the extents that
 602         * were written before the count of the extents.
 603         *
 604         * To achieve this smp_wmb() is used on guarantee the write
 605         * order and smp_read_barrier_depends() is guaranteed that we
 606         * don't have crazy architectures returning stale data.
 607         *
 608         */
 609        mutex_lock(&id_map_mutex);
 610
 611        ret = -EPERM;
 612        /* Only allow one successful write to the map */
 613        if (map->nr_extents != 0)
 614                goto out;
 615
 616        /*
 617         * Adjusting namespace settings requires capabilities on the target.
 618         */
 619        if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
 620                goto out;
 621
 622        /* Get a buffer */
 623        ret = -ENOMEM;
 624        page = __get_free_page(GFP_TEMPORARY);
 625        kbuf = (char *) page;
 626        if (!page)
 627                goto out;
 628
 629        /* Only allow <= page size writes at the beginning of the file */
 630        ret = -EINVAL;
 631        if ((*ppos != 0) || (count >= PAGE_SIZE))
 632                goto out;
 633
 634        /* Slurp in the user data */
 635        ret = -EFAULT;
 636        if (copy_from_user(kbuf, buf, count))
 637                goto out;
 638        kbuf[count] = '\0';
 639
 640        /* Parse the user data */
 641        ret = -EINVAL;
 642        pos = kbuf;
 643        new_map.nr_extents = 0;
 644        for (;pos; pos = next_line) {
 645                extent = &new_map.extent[new_map.nr_extents];
 646
 647                /* Find the end of line and ensure I don't look past it */
 648                next_line = strchr(pos, '\n');
 649                if (next_line) {
 650                        *next_line = '\0';
 651                        next_line++;
 652                        if (*next_line == '\0')
 653                                next_line = NULL;
 654                }
 655
 656                pos = skip_spaces(pos);
 657                extent->first = simple_strtoul(pos, &pos, 10);
 658                if (!isspace(*pos))
 659                        goto out;
 660
 661                pos = skip_spaces(pos);
 662                extent->lower_first = simple_strtoul(pos, &pos, 10);
 663                if (!isspace(*pos))
 664                        goto out;
 665
 666                pos = skip_spaces(pos);
 667                extent->count = simple_strtoul(pos, &pos, 10);
 668                if (*pos && !isspace(*pos))
 669                        goto out;
 670
 671                /* Verify there is not trailing junk on the line */
 672                pos = skip_spaces(pos);
 673                if (*pos != '\0')
 674                        goto out;
 675
 676                /* Verify we have been given valid starting values */
 677                if ((extent->first == (u32) -1) ||
 678                    (extent->lower_first == (u32) -1 ))
 679                        goto out;
 680
 681                /* Verify count is not zero and does not cause the extent to wrap */
 682                if ((extent->first + extent->count) <= extent->first)
 683                        goto out;
 684                if ((extent->lower_first + extent->count) <= extent->lower_first)
 685                        goto out;
 686
 687                /* Do the ranges in extent overlap any previous extents? */
 688                if (mappings_overlap(&new_map, extent))
 689                        goto out;
 690
 691                new_map.nr_extents++;
 692
 693                /* Fail if the file contains too many extents */
 694                if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
 695                    (next_line != NULL))
 696                        goto out;
 697        }
 698        /* Be very certaint the new map actually exists */
 699        if (new_map.nr_extents == 0)
 700                goto out;
 701
 702        ret = -EPERM;
 703        /* Validate the user is allowed to use user id's mapped to. */
 704        if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
 705                goto out;
 706
 707        /* Map the lower ids from the parent user namespace to the
 708         * kernel global id space.
 709         */
 710        for (idx = 0; idx < new_map.nr_extents; idx++) {
 711                u32 lower_first;
 712                extent = &new_map.extent[idx];
 713
 714                lower_first = map_id_range_down(parent_map,
 715                                                extent->lower_first,
 716                                                extent->count);
 717
 718                /* Fail if we can not map the specified extent to
 719                 * the kernel global id space.
 720                 */
 721                if (lower_first == (u32) -1)
 722                        goto out;
 723
 724                extent->lower_first = lower_first;
 725        }
 726
 727        /* Install the map */
 728        memcpy(map->extent, new_map.extent,
 729                new_map.nr_extents*sizeof(new_map.extent[0]));
 730        smp_wmb();
 731        map->nr_extents = new_map.nr_extents;
 732
 733        *ppos = count;
 734        ret = count;
 735out:
 736        mutex_unlock(&id_map_mutex);
 737        if (page)
 738                free_page(page);
 739        return ret;
 740}
 741
 742ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 743{
 744        struct seq_file *seq = file->private_data;
 745        struct user_namespace *ns = seq->private;
 746        struct user_namespace *seq_ns = seq_user_ns(seq);
 747
 748        if (!ns->parent)
 749                return -EPERM;
 750
 751        if ((seq_ns != ns) && (seq_ns != ns->parent))
 752                return -EPERM;
 753
 754        return map_write(file, buf, size, ppos, CAP_SETUID,
 755                         &ns->uid_map, &ns->parent->uid_map);
 756}
 757
 758ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 759{
 760        struct seq_file *seq = file->private_data;
 761        struct user_namespace *ns = seq->private;
 762        struct user_namespace *seq_ns = seq_user_ns(seq);
 763
 764        if (!ns->parent)
 765                return -EPERM;
 766
 767        if ((seq_ns != ns) && (seq_ns != ns->parent))
 768                return -EPERM;
 769
 770        return map_write(file, buf, size, ppos, CAP_SETGID,
 771                         &ns->gid_map, &ns->parent->gid_map);
 772}
 773
 774ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 775{
 776        struct seq_file *seq = file->private_data;
 777        struct user_namespace *ns = seq->private;
 778        struct user_namespace *seq_ns = seq_user_ns(seq);
 779
 780        if (!ns->parent)
 781                return -EPERM;
 782
 783        if ((seq_ns != ns) && (seq_ns != ns->parent))
 784                return -EPERM;
 785
 786        /* Anyone can set any valid project id no capability needed */
 787        return map_write(file, buf, size, ppos, -1,
 788                         &ns->projid_map, &ns->parent->projid_map);
 789}
 790
 791static bool new_idmap_permitted(const struct file *file, 
 792                                struct user_namespace *ns, int cap_setid,
 793                                struct uid_gid_map *new_map)
 794{
 795        /* Allow mapping to your own filesystem ids */
 796        if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
 797                u32 id = new_map->extent[0].lower_first;
 798                if (cap_setid == CAP_SETUID) {
 799                        kuid_t uid = make_kuid(ns->parent, id);
 800                        if (uid_eq(uid, file->f_cred->fsuid))
 801                                return true;
 802                }
 803                else if (cap_setid == CAP_SETGID) {
 804                        kgid_t gid = make_kgid(ns->parent, id);
 805                        if (gid_eq(gid, file->f_cred->fsgid))
 806                                return true;
 807                }
 808        }
 809
 810        /* Allow anyone to set a mapping that doesn't require privilege */
 811        if (!cap_valid(cap_setid))
 812                return true;
 813
 814        /* Allow the specified ids if we have the appropriate capability
 815         * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
 816         * And the opener of the id file also had the approprpiate capability.
 817         */
 818        if (ns_capable(ns->parent, cap_setid) &&
 819            file_ns_capable(file, ns->parent, cap_setid))
 820                return true;
 821
 822        return false;
 823}
 824
 825static void *userns_get(struct task_struct *task)
 826{
 827        struct user_namespace *user_ns;
 828
 829        rcu_read_lock();
 830        user_ns = get_user_ns(__task_cred(task)->user_ns);
 831        rcu_read_unlock();
 832
 833        return user_ns;
 834}
 835
 836static void userns_put(void *ns)
 837{
 838        put_user_ns(ns);
 839}
 840
 841static int userns_install(struct nsproxy *nsproxy, void *ns)
 842{
 843        struct user_namespace *user_ns = ns;
 844        struct cred *cred;
 845
 846        /* Don't allow gaining capabilities by reentering
 847         * the same user namespace.
 848         */
 849        if (user_ns == current_user_ns())
 850                return -EINVAL;
 851
 852        /* Threaded processes may not enter a different user namespace */
 853        if (atomic_read(&current->mm->mm_users) > 1)
 854                return -EINVAL;
 855
 856        if (current->fs->users != 1)
 857                return -EINVAL;
 858
 859        if (!ns_capable(user_ns, CAP_SYS_ADMIN))
 860                return -EPERM;
 861
 862        cred = prepare_creds();
 863        if (!cred)
 864                return -ENOMEM;
 865
 866        put_user_ns(cred->user_ns);
 867        set_cred_user_ns(cred, get_user_ns(user_ns));
 868
 869        return commit_creds(cred);
 870}
 871
 872static unsigned int userns_inum(void *ns)
 873{
 874        struct user_namespace *user_ns = ns;
 875        return user_ns->proc_inum;
 876}
 877
 878const struct proc_ns_operations userns_operations = {
 879        .name           = "user",
 880        .type           = CLONE_NEWUSER,
 881        .get            = userns_get,
 882        .put            = userns_put,
 883        .install        = userns_install,
 884        .inum           = userns_inum,
 885};
 886
 887static __init int user_namespaces_init(void)
 888{
 889        user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
 890        return 0;
 891}
 892module_init(user_namespaces_init);
 893
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.