linux/kernel/user_namespace.c
<<
>>
Prefs
   1/*
   2 *  This program is free software; you can redistribute it and/or
   3 *  modify it under the terms of the GNU General Public License as
   4 *  published by the Free Software Foundation, version 2 of the
   5 *  License.
   6 */
   7
   8#include <linux/export.h>
   9#include <linux/nsproxy.h>
  10#include <linux/slab.h>
  11#include <linux/user_namespace.h>
  12#include <linux/proc_fs.h>
  13#include <linux/highuid.h>
  14#include <linux/cred.h>
  15#include <linux/securebits.h>
  16#include <linux/keyctl.h>
  17#include <linux/key-type.h>
  18#include <keys/user-type.h>
  19#include <linux/seq_file.h>
  20#include <linux/fs.h>
  21#include <linux/uaccess.h>
  22#include <linux/ctype.h>
  23#include <linux/projid.h>
  24#include <linux/fs_struct.h>
  25
  26static struct kmem_cache *user_ns_cachep __read_mostly;
  27
  28static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
  29                                struct uid_gid_map *map);
  30
  31static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
  32{
  33        /* Start with the same capabilities as init but useless for doing
  34         * anything as the capabilities are bound to the new user namespace.
  35         */
  36        cred->securebits = SECUREBITS_DEFAULT;
  37        cred->cap_inheritable = CAP_EMPTY_SET;
  38        cred->cap_permitted = CAP_FULL_SET;
  39        cred->cap_effective = CAP_FULL_SET;
  40        cred->cap_bset = CAP_FULL_SET;
  41#ifdef CONFIG_KEYS
  42        key_put(cred->request_key_auth);
  43        cred->request_key_auth = NULL;
  44#endif
  45        /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
  46        cred->user_ns = user_ns;
  47}
  48
  49/*
  50 * Create a new user namespace, deriving the creator from the user in the
  51 * passed credentials, and replacing that user with the new root user for the
  52 * new namespace.
  53 *
  54 * This is called by copy_creds(), which will finish setting the target task's
  55 * credentials.
  56 */
  57int create_user_ns(struct cred *new)
  58{
  59        struct user_namespace *ns, *parent_ns = new->user_ns;
  60        kuid_t owner = new->euid;
  61        kgid_t group = new->egid;
  62        int ret;
  63
  64        /* The creator needs a mapping in the parent user namespace
  65         * or else we won't be able to reasonably tell userspace who
  66         * created a user_namespace.
  67         */
  68        if (!kuid_has_mapping(parent_ns, owner) ||
  69            !kgid_has_mapping(parent_ns, group))
  70                return -EPERM;
  71
  72        ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
  73        if (!ns)
  74                return -ENOMEM;
  75
  76        ret = proc_alloc_inum(&ns->proc_inum);
  77        if (ret) {
  78                kmem_cache_free(user_ns_cachep, ns);
  79                return ret;
  80        }
  81
  82        kref_init(&ns->kref);
  83        /* Leave the new->user_ns reference with the new user namespace. */
  84        ns->parent = parent_ns;
  85        ns->owner = owner;
  86        ns->group = group;
  87
  88        set_cred_user_ns(new, ns);
  89
  90        return 0;
  91}
  92
  93int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
  94{
  95        struct cred *cred;
  96
  97        if (!(unshare_flags & CLONE_NEWUSER))
  98                return 0;
  99
 100        cred = prepare_creds();
 101        if (!cred)
 102                return -ENOMEM;
 103
 104        *new_cred = cred;
 105        return create_user_ns(cred);
 106}
 107
 108void free_user_ns(struct kref *kref)
 109{
 110        struct user_namespace *parent, *ns =
 111                container_of(kref, struct user_namespace, kref);
 112
 113        parent = ns->parent;
 114        proc_free_inum(ns->proc_inum);
 115        kmem_cache_free(user_ns_cachep, ns);
 116        put_user_ns(parent);
 117}
 118EXPORT_SYMBOL(free_user_ns);
 119
 120static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
 121{
 122        unsigned idx, extents;
 123        u32 first, last, id2;
 124
 125        id2 = id + count - 1;
 126
 127        /* Find the matching extent */
 128        extents = map->nr_extents;
 129        smp_read_barrier_depends();
 130        for (idx = 0; idx < extents; idx++) {
 131                first = map->extent[idx].first;
 132                last = first + map->extent[idx].count - 1;
 133                if (id >= first && id <= last &&
 134                    (id2 >= first && id2 <= last))
 135                        break;
 136        }
 137        /* Map the id or note failure */
 138        if (idx < extents)
 139                id = (id - first) + map->extent[idx].lower_first;
 140        else
 141                id = (u32) -1;
 142
 143        return id;
 144}
 145
 146static u32 map_id_down(struct uid_gid_map *map, u32 id)
 147{
 148        unsigned idx, extents;
 149        u32 first, last;
 150
 151        /* Find the matching extent */
 152        extents = map->nr_extents;
 153        smp_read_barrier_depends();
 154        for (idx = 0; idx < extents; idx++) {
 155                first = map->extent[idx].first;
 156                last = first + map->extent[idx].count - 1;
 157                if (id >= first && id <= last)
 158                        break;
 159        }
 160        /* Map the id or note failure */
 161        if (idx < extents)
 162                id = (id - first) + map->extent[idx].lower_first;
 163        else
 164                id = (u32) -1;
 165
 166        return id;
 167}
 168
 169static u32 map_id_up(struct uid_gid_map *map, u32 id)
 170{
 171        unsigned idx, extents;
 172        u32 first, last;
 173
 174        /* Find the matching extent */
 175        extents = map->nr_extents;
 176        smp_read_barrier_depends();
 177        for (idx = 0; idx < extents; idx++) {
 178                first = map->extent[idx].lower_first;
 179                last = first + map->extent[idx].count - 1;
 180                if (id >= first && id <= last)
 181                        break;
 182        }
 183        /* Map the id or note failure */
 184        if (idx < extents)
 185                id = (id - first) + map->extent[idx].first;
 186        else
 187                id = (u32) -1;
 188
 189        return id;
 190}
 191
 192/**
 193 *      make_kuid - Map a user-namespace uid pair into a kuid.
 194 *      @ns:  User namespace that the uid is in
 195 *      @uid: User identifier
 196 *
 197 *      Maps a user-namespace uid pair into a kernel internal kuid,
 198 *      and returns that kuid.
 199 *
 200 *      When there is no mapping defined for the user-namespace uid
 201 *      pair INVALID_UID is returned.  Callers are expected to test
 202 *      for and handle handle INVALID_UID being returned.  INVALID_UID
 203 *      may be tested for using uid_valid().
 204 */
 205kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
 206{
 207        /* Map the uid to a global kernel uid */
 208        return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
 209}
 210EXPORT_SYMBOL(make_kuid);
 211
 212/**
 213 *      from_kuid - Create a uid from a kuid user-namespace pair.
 214 *      @targ: The user namespace we want a uid in.
 215 *      @kuid: The kernel internal uid to start with.
 216 *
 217 *      Map @kuid into the user-namespace specified by @targ and
 218 *      return the resulting uid.
 219 *
 220 *      There is always a mapping into the initial user_namespace.
 221 *
 222 *      If @kuid has no mapping in @targ (uid_t)-1 is returned.
 223 */
 224uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
 225{
 226        /* Map the uid from a global kernel uid */
 227        return map_id_up(&targ->uid_map, __kuid_val(kuid));
 228}
 229EXPORT_SYMBOL(from_kuid);
 230
 231/**
 232 *      from_kuid_munged - Create a uid from a kuid user-namespace pair.
 233 *      @targ: The user namespace we want a uid in.
 234 *      @kuid: The kernel internal uid to start with.
 235 *
 236 *      Map @kuid into the user-namespace specified by @targ and
 237 *      return the resulting uid.
 238 *
 239 *      There is always a mapping into the initial user_namespace.
 240 *
 241 *      Unlike from_kuid from_kuid_munged never fails and always
 242 *      returns a valid uid.  This makes from_kuid_munged appropriate
 243 *      for use in syscalls like stat and getuid where failing the
 244 *      system call and failing to provide a valid uid are not an
 245 *      options.
 246 *
 247 *      If @kuid has no mapping in @targ overflowuid is returned.
 248 */
 249uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
 250{
 251        uid_t uid;
 252        uid = from_kuid(targ, kuid);
 253
 254        if (uid == (uid_t) -1)
 255                uid = overflowuid;
 256        return uid;
 257}
 258EXPORT_SYMBOL(from_kuid_munged);
 259
 260/**
 261 *      make_kgid - Map a user-namespace gid pair into a kgid.
 262 *      @ns:  User namespace that the gid is in
 263 *      @uid: group identifier
 264 *
 265 *      Maps a user-namespace gid pair into a kernel internal kgid,
 266 *      and returns that kgid.
 267 *
 268 *      When there is no mapping defined for the user-namespace gid
 269 *      pair INVALID_GID is returned.  Callers are expected to test
 270 *      for and handle INVALID_GID being returned.  INVALID_GID may be
 271 *      tested for using gid_valid().
 272 */
 273kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
 274{
 275        /* Map the gid to a global kernel gid */
 276        return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
 277}
 278EXPORT_SYMBOL(make_kgid);
 279
 280/**
 281 *      from_kgid - Create a gid from a kgid user-namespace pair.
 282 *      @targ: The user namespace we want a gid in.
 283 *      @kgid: The kernel internal gid to start with.
 284 *
 285 *      Map @kgid into the user-namespace specified by @targ and
 286 *      return the resulting gid.
 287 *
 288 *      There is always a mapping into the initial user_namespace.
 289 *
 290 *      If @kgid has no mapping in @targ (gid_t)-1 is returned.
 291 */
 292gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
 293{
 294        /* Map the gid from a global kernel gid */
 295        return map_id_up(&targ->gid_map, __kgid_val(kgid));
 296}
 297EXPORT_SYMBOL(from_kgid);
 298
 299/**
 300 *      from_kgid_munged - Create a gid from a kgid user-namespace pair.
 301 *      @targ: The user namespace we want a gid in.
 302 *      @kgid: The kernel internal gid to start with.
 303 *
 304 *      Map @kgid into the user-namespace specified by @targ and
 305 *      return the resulting gid.
 306 *
 307 *      There is always a mapping into the initial user_namespace.
 308 *
 309 *      Unlike from_kgid from_kgid_munged never fails and always
 310 *      returns a valid gid.  This makes from_kgid_munged appropriate
 311 *      for use in syscalls like stat and getgid where failing the
 312 *      system call and failing to provide a valid gid are not options.
 313 *
 314 *      If @kgid has no mapping in @targ overflowgid is returned.
 315 */
 316gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
 317{
 318        gid_t gid;
 319        gid = from_kgid(targ, kgid);
 320
 321        if (gid == (gid_t) -1)
 322                gid = overflowgid;
 323        return gid;
 324}
 325EXPORT_SYMBOL(from_kgid_munged);
 326
 327/**
 328 *      make_kprojid - Map a user-namespace projid pair into a kprojid.
 329 *      @ns:  User namespace that the projid is in
 330 *      @projid: Project identifier
 331 *
 332 *      Maps a user-namespace uid pair into a kernel internal kuid,
 333 *      and returns that kuid.
 334 *
 335 *      When there is no mapping defined for the user-namespace projid
 336 *      pair INVALID_PROJID is returned.  Callers are expected to test
 337 *      for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
 338 *      may be tested for using projid_valid().
 339 */
 340kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
 341{
 342        /* Map the uid to a global kernel uid */
 343        return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
 344}
 345EXPORT_SYMBOL(make_kprojid);
 346
 347/**
 348 *      from_kprojid - Create a projid from a kprojid user-namespace pair.
 349 *      @targ: The user namespace we want a projid in.
 350 *      @kprojid: The kernel internal project identifier to start with.
 351 *
 352 *      Map @kprojid into the user-namespace specified by @targ and
 353 *      return the resulting projid.
 354 *
 355 *      There is always a mapping into the initial user_namespace.
 356 *
 357 *      If @kprojid has no mapping in @targ (projid_t)-1 is returned.
 358 */
 359projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
 360{
 361        /* Map the uid from a global kernel uid */
 362        return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
 363}
 364EXPORT_SYMBOL(from_kprojid);
 365
 366/**
 367 *      from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
 368 *      @targ: The user namespace we want a projid in.
 369 *      @kprojid: The kernel internal projid to start with.
 370 *
 371 *      Map @kprojid into the user-namespace specified by @targ and
 372 *      return the resulting projid.
 373 *
 374 *      There is always a mapping into the initial user_namespace.
 375 *
 376 *      Unlike from_kprojid from_kprojid_munged never fails and always
 377 *      returns a valid projid.  This makes from_kprojid_munged
 378 *      appropriate for use in syscalls like stat and where
 379 *      failing the system call and failing to provide a valid projid are
 380 *      not an options.
 381 *
 382 *      If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
 383 */
 384projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
 385{
 386        projid_t projid;
 387        projid = from_kprojid(targ, kprojid);
 388
 389        if (projid == (projid_t) -1)
 390                projid = OVERFLOW_PROJID;
 391        return projid;
 392}
 393EXPORT_SYMBOL(from_kprojid_munged);
 394
 395
 396static int uid_m_show(struct seq_file *seq, void *v)
 397{
 398        struct user_namespace *ns = seq->private;
 399        struct uid_gid_extent *extent = v;
 400        struct user_namespace *lower_ns;
 401        uid_t lower;
 402
 403        lower_ns = seq_user_ns(seq);
 404        if ((lower_ns == ns) && lower_ns->parent)
 405                lower_ns = lower_ns->parent;
 406
 407        lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
 408
 409        seq_printf(seq, "%10u %10u %10u\n",
 410                extent->first,
 411                lower,
 412                extent->count);
 413
 414        return 0;
 415}
 416
 417static int gid_m_show(struct seq_file *seq, void *v)
 418{
 419        struct user_namespace *ns = seq->private;
 420        struct uid_gid_extent *extent = v;
 421        struct user_namespace *lower_ns;
 422        gid_t lower;
 423
 424        lower_ns = seq_user_ns(seq);
 425        if ((lower_ns == ns) && lower_ns->parent)
 426                lower_ns = lower_ns->parent;
 427
 428        lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
 429
 430        seq_printf(seq, "%10u %10u %10u\n",
 431                extent->first,
 432                lower,
 433                extent->count);
 434
 435        return 0;
 436}
 437
 438static int projid_m_show(struct seq_file *seq, void *v)
 439{
 440        struct user_namespace *ns = seq->private;
 441        struct uid_gid_extent *extent = v;
 442        struct user_namespace *lower_ns;
 443        projid_t lower;
 444
 445        lower_ns = seq_user_ns(seq);
 446        if ((lower_ns == ns) && lower_ns->parent)
 447                lower_ns = lower_ns->parent;
 448
 449        lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
 450
 451        seq_printf(seq, "%10u %10u %10u\n",
 452                extent->first,
 453                lower,
 454                extent->count);
 455
 456        return 0;
 457}
 458
 459static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
 460{
 461        struct uid_gid_extent *extent = NULL;
 462        loff_t pos = *ppos;
 463
 464        if (pos < map->nr_extents)
 465                extent = &map->extent[pos];
 466
 467        return extent;
 468}
 469
 470static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
 471{
 472        struct user_namespace *ns = seq->private;
 473
 474        return m_start(seq, ppos, &ns->uid_map);
 475}
 476
 477static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
 478{
 479        struct user_namespace *ns = seq->private;
 480
 481        return m_start(seq, ppos, &ns->gid_map);
 482}
 483
 484static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
 485{
 486        struct user_namespace *ns = seq->private;
 487
 488        return m_start(seq, ppos, &ns->projid_map);
 489}
 490
 491static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
 492{
 493        (*pos)++;
 494        return seq->op->start(seq, pos);
 495}
 496
 497static void m_stop(struct seq_file *seq, void *v)
 498{
 499        return;
 500}
 501
 502struct seq_operations proc_uid_seq_operations = {
 503        .start = uid_m_start,
 504        .stop = m_stop,
 505        .next = m_next,
 506        .show = uid_m_show,
 507};
 508
 509struct seq_operations proc_gid_seq_operations = {
 510        .start = gid_m_start,
 511        .stop = m_stop,
 512        .next = m_next,
 513        .show = gid_m_show,
 514};
 515
 516struct seq_operations proc_projid_seq_operations = {
 517        .start = projid_m_start,
 518        .stop = m_stop,
 519        .next = m_next,
 520        .show = projid_m_show,
 521};
 522
 523static DEFINE_MUTEX(id_map_mutex);
 524
 525static ssize_t map_write(struct file *file, const char __user *buf,
 526                         size_t count, loff_t *ppos,
 527                         int cap_setid,
 528                         struct uid_gid_map *map,
 529                         struct uid_gid_map *parent_map)
 530{
 531        struct seq_file *seq = file->private_data;
 532        struct user_namespace *ns = seq->private;
 533        struct uid_gid_map new_map;
 534        unsigned idx;
 535        struct uid_gid_extent *extent, *last = NULL;
 536        unsigned long page = 0;
 537        char *kbuf, *pos, *next_line;
 538        ssize_t ret = -EINVAL;
 539
 540        /*
 541         * The id_map_mutex serializes all writes to any given map.
 542         *
 543         * Any map is only ever written once.
 544         *
 545         * An id map fits within 1 cache line on most architectures.
 546         *
 547         * On read nothing needs to be done unless you are on an
 548         * architecture with a crazy cache coherency model like alpha.
 549         *
 550         * There is a one time data dependency between reading the
 551         * count of the extents and the values of the extents.  The
 552         * desired behavior is to see the values of the extents that
 553         * were written before the count of the extents.
 554         *
 555         * To achieve this smp_wmb() is used on guarantee the write
 556         * order and smp_read_barrier_depends() is guaranteed that we
 557         * don't have crazy architectures returning stale data.
 558         *
 559         */
 560        mutex_lock(&id_map_mutex);
 561
 562        ret = -EPERM;
 563        /* Only allow one successful write to the map */
 564        if (map->nr_extents != 0)
 565                goto out;
 566
 567        /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
 568         * over the user namespace in order to set the id mapping.
 569         */
 570        if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid))
 571                goto out;
 572
 573        /* Get a buffer */
 574        ret = -ENOMEM;
 575        page = __get_free_page(GFP_TEMPORARY);
 576        kbuf = (char *) page;
 577        if (!page)
 578                goto out;
 579
 580        /* Only allow <= page size writes at the beginning of the file */
 581        ret = -EINVAL;
 582        if ((*ppos != 0) || (count >= PAGE_SIZE))
 583                goto out;
 584
 585        /* Slurp in the user data */
 586        ret = -EFAULT;
 587        if (copy_from_user(kbuf, buf, count))
 588                goto out;
 589        kbuf[count] = '\0';
 590
 591        /* Parse the user data */
 592        ret = -EINVAL;
 593        pos = kbuf;
 594        new_map.nr_extents = 0;
 595        for (;pos; pos = next_line) {
 596                extent = &new_map.extent[new_map.nr_extents];
 597
 598                /* Find the end of line and ensure I don't look past it */
 599                next_line = strchr(pos, '\n');
 600                if (next_line) {
 601                        *next_line = '\0';
 602                        next_line++;
 603                        if (*next_line == '\0')
 604                                next_line = NULL;
 605                }
 606
 607                pos = skip_spaces(pos);
 608                extent->first = simple_strtoul(pos, &pos, 10);
 609                if (!isspace(*pos))
 610                        goto out;
 611
 612                pos = skip_spaces(pos);
 613                extent->lower_first = simple_strtoul(pos, &pos, 10);
 614                if (!isspace(*pos))
 615                        goto out;
 616
 617                pos = skip_spaces(pos);
 618                extent->count = simple_strtoul(pos, &pos, 10);
 619                if (*pos && !isspace(*pos))
 620                        goto out;
 621
 622                /* Verify there is not trailing junk on the line */
 623                pos = skip_spaces(pos);
 624                if (*pos != '\0')
 625                        goto out;
 626
 627                /* Verify we have been given valid starting values */
 628                if ((extent->first == (u32) -1) ||
 629                    (extent->lower_first == (u32) -1 ))
 630                        goto out;
 631
 632                /* Verify count is not zero and does not cause the extent to wrap */
 633                if ((extent->first + extent->count) <= extent->first)
 634                        goto out;
 635                if ((extent->lower_first + extent->count) <= extent->lower_first)
 636                        goto out;
 637
 638                /* For now only accept extents that are strictly in order */
 639                if (last &&
 640                    (((last->first + last->count) > extent->first) ||
 641                     ((last->lower_first + last->count) > extent->lower_first)))
 642                        goto out;
 643
 644                new_map.nr_extents++;
 645                last = extent;
 646
 647                /* Fail if the file contains too many extents */
 648                if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
 649                    (next_line != NULL))
 650                        goto out;
 651        }
 652        /* Be very certaint the new map actually exists */
 653        if (new_map.nr_extents == 0)
 654                goto out;
 655
 656        ret = -EPERM;
 657        /* Validate the user is allowed to use user id's mapped to. */
 658        if (!new_idmap_permitted(ns, cap_setid, &new_map))
 659                goto out;
 660
 661        /* Map the lower ids from the parent user namespace to the
 662         * kernel global id space.
 663         */
 664        for (idx = 0; idx < new_map.nr_extents; idx++) {
 665                u32 lower_first;
 666                extent = &new_map.extent[idx];
 667
 668                lower_first = map_id_range_down(parent_map,
 669                                                extent->lower_first,
 670                                                extent->count);
 671
 672                /* Fail if we can not map the specified extent to
 673                 * the kernel global id space.
 674                 */
 675                if (lower_first == (u32) -1)
 676                        goto out;
 677
 678                extent->lower_first = lower_first;
 679        }
 680
 681        /* Install the map */
 682        memcpy(map->extent, new_map.extent,
 683                new_map.nr_extents*sizeof(new_map.extent[0]));
 684        smp_wmb();
 685        map->nr_extents = new_map.nr_extents;
 686
 687        *ppos = count;
 688        ret = count;
 689out:
 690        mutex_unlock(&id_map_mutex);
 691        if (page)
 692                free_page(page);
 693        return ret;
 694}
 695
 696ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 697{
 698        struct seq_file *seq = file->private_data;
 699        struct user_namespace *ns = seq->private;
 700        struct user_namespace *seq_ns = seq_user_ns(seq);
 701
 702        if (!ns->parent)
 703                return -EPERM;
 704
 705        if ((seq_ns != ns) && (seq_ns != ns->parent))
 706                return -EPERM;
 707
 708        return map_write(file, buf, size, ppos, CAP_SETUID,
 709                         &ns->uid_map, &ns->parent->uid_map);
 710}
 711
 712ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 713{
 714        struct seq_file *seq = file->private_data;
 715        struct user_namespace *ns = seq->private;
 716        struct user_namespace *seq_ns = seq_user_ns(seq);
 717
 718        if (!ns->parent)
 719                return -EPERM;
 720
 721        if ((seq_ns != ns) && (seq_ns != ns->parent))
 722                return -EPERM;
 723
 724        return map_write(file, buf, size, ppos, CAP_SETGID,
 725                         &ns->gid_map, &ns->parent->gid_map);
 726}
 727
 728ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
 729{
 730        struct seq_file *seq = file->private_data;
 731        struct user_namespace *ns = seq->private;
 732        struct user_namespace *seq_ns = seq_user_ns(seq);
 733
 734        if (!ns->parent)
 735                return -EPERM;
 736
 737        if ((seq_ns != ns) && (seq_ns != ns->parent))
 738                return -EPERM;
 739
 740        /* Anyone can set any valid project id no capability needed */
 741        return map_write(file, buf, size, ppos, -1,
 742                         &ns->projid_map, &ns->parent->projid_map);
 743}
 744
 745static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
 746                                struct uid_gid_map *new_map)
 747{
 748        /* Allow mapping to your own filesystem ids */
 749        if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
 750                u32 id = new_map->extent[0].lower_first;
 751                if (cap_setid == CAP_SETUID) {
 752                        kuid_t uid = make_kuid(ns->parent, id);
 753                        if (uid_eq(uid, current_fsuid()))
 754                                return true;
 755                }
 756                else if (cap_setid == CAP_SETGID) {
 757                        kgid_t gid = make_kgid(ns->parent, id);
 758                        if (gid_eq(gid, current_fsgid()))
 759                                return true;
 760                }
 761        }
 762
 763        /* Allow anyone to set a mapping that doesn't require privilege */
 764        if (!cap_valid(cap_setid))
 765                return true;
 766
 767        /* Allow the specified ids if we have the appropriate capability
 768         * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
 769         */
 770        if (ns_capable(ns->parent, cap_setid))
 771                return true;
 772
 773        return false;
 774}
 775
 776static void *userns_get(struct task_struct *task)
 777{
 778        struct user_namespace *user_ns;
 779
 780        rcu_read_lock();
 781        user_ns = get_user_ns(__task_cred(task)->user_ns);
 782        rcu_read_unlock();
 783
 784        return user_ns;
 785}
 786
 787static void userns_put(void *ns)
 788{
 789        put_user_ns(ns);
 790}
 791
 792static int userns_install(struct nsproxy *nsproxy, void *ns)
 793{
 794        struct user_namespace *user_ns = ns;
 795        struct cred *cred;
 796
 797        /* Don't allow gaining capabilities by reentering
 798         * the same user namespace.
 799         */
 800        if (user_ns == current_user_ns())
 801                return -EINVAL;
 802
 803        /* Threaded processes may not enter a different user namespace */
 804        if (atomic_read(&current->mm->mm_users) > 1)
 805                return -EINVAL;
 806
 807        if (current->fs->users != 1)
 808                return -EINVAL;
 809
 810        if (!ns_capable(user_ns, CAP_SYS_ADMIN))
 811                return -EPERM;
 812
 813        cred = prepare_creds();
 814        if (!cred)
 815                return -ENOMEM;
 816
 817        put_user_ns(cred->user_ns);
 818        set_cred_user_ns(cred, get_user_ns(user_ns));
 819
 820        return commit_creds(cred);
 821}
 822
 823static unsigned int userns_inum(void *ns)
 824{
 825        struct user_namespace *user_ns = ns;
 826        return user_ns->proc_inum;
 827}
 828
 829const struct proc_ns_operations userns_operations = {
 830        .name           = "user",
 831        .type           = CLONE_NEWUSER,
 832        .get            = userns_get,
 833        .put            = userns_put,
 834        .install        = userns_install,
 835        .inum           = userns_inum,
 836};
 837
 838static __init int user_namespaces_init(void)
 839{
 840        user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
 841        return 0;
 842}
 843module_init(user_namespaces_init);
 844
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.