linux/fs/cifs/cifsacl.c
<<
>>
Prefs
   1/*
   2 *   fs/cifs/cifsacl.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2007,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
   7 *   Contains the routines for mapping CIFS/NTFS ACLs
   8 *
   9 *   This library is free software; you can redistribute it and/or modify
  10 *   it under the terms of the GNU Lesser General Public License as published
  11 *   by the Free Software Foundation; either version 2.1 of the License, or
  12 *   (at your option) any later version.
  13 *
  14 *   This library is distributed in the hope that it will be useful,
  15 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  17 *   the GNU Lesser General Public License for more details.
  18 *
  19 *   You should have received a copy of the GNU Lesser General Public License
  20 *   along with this library; if not, write to the Free Software
  21 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22 */
  23
  24#include <linux/fs.h>
  25#include <linux/slab.h>
  26#include <linux/string.h>
  27#include <linux/keyctl.h>
  28#include <linux/key-type.h>
  29#include <keys/user-type.h>
  30#include "cifspdu.h"
  31#include "cifsglob.h"
  32#include "cifsacl.h"
  33#include "cifsproto.h"
  34#include "cifs_debug.h"
  35
  36/* security id for everyone/world system group */
  37static const struct cifs_sid sid_everyone = {
  38        1, 1, {0, 0, 0, 0, 0, 1}, {0} };
  39/* security id for Authenticated Users system group */
  40static const struct cifs_sid sid_authusers = {
  41        1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
  42/* group users */
  43static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
  44
  45const struct cred *root_cred;
  46
  47static void
  48shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
  49                        int *nr_del)
  50{
  51        struct rb_node *node;
  52        struct rb_node *tmp;
  53        struct cifs_sid_id *psidid;
  54
  55        node = rb_first(root);
  56        while (node) {
  57                tmp = node;
  58                node = rb_next(tmp);
  59                psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
  60                if (nr_to_scan == 0 || *nr_del == nr_to_scan)
  61                        ++(*nr_rem);
  62                else {
  63                        if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
  64                                                && psidid->refcount == 0) {
  65                                rb_erase(tmp, root);
  66                                ++(*nr_del);
  67                        } else
  68                                ++(*nr_rem);
  69                }
  70        }
  71}
  72
  73/*
  74 * Run idmap cache shrinker.
  75 */
  76static int
  77cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
  78{
  79        int nr_to_scan = sc->nr_to_scan;
  80        int nr_del = 0;
  81        int nr_rem = 0;
  82        struct rb_root *root;
  83
  84        root = &uidtree;
  85        spin_lock(&siduidlock);
  86        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  87        spin_unlock(&siduidlock);
  88
  89        root = &gidtree;
  90        spin_lock(&sidgidlock);
  91        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  92        spin_unlock(&sidgidlock);
  93
  94        root = &siduidtree;
  95        spin_lock(&uidsidlock);
  96        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  97        spin_unlock(&uidsidlock);
  98
  99        root = &sidgidtree;
 100        spin_lock(&gidsidlock);
 101        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
 102        spin_unlock(&gidsidlock);
 103
 104        return nr_rem;
 105}
 106
 107static void
 108sid_rb_insert(struct rb_root *root, unsigned long cid,
 109                struct cifs_sid_id **psidid, char *typestr)
 110{
 111        char *strptr;
 112        struct rb_node *node = root->rb_node;
 113        struct rb_node *parent = NULL;
 114        struct rb_node **linkto = &(root->rb_node);
 115        struct cifs_sid_id *lsidid;
 116
 117        while (node) {
 118                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 119                parent = node;
 120                if (cid > lsidid->id) {
 121                        linkto = &(node->rb_left);
 122                        node = node->rb_left;
 123                }
 124                if (cid < lsidid->id) {
 125                        linkto = &(node->rb_right);
 126                        node = node->rb_right;
 127                }
 128        }
 129
 130        (*psidid)->id = cid;
 131        (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
 132        (*psidid)->refcount = 0;
 133
 134        sprintf((*psidid)->sidstr, "%s", typestr);
 135        strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
 136        sprintf(strptr, "%ld", cid);
 137
 138        clear_bit(SID_ID_PENDING, &(*psidid)->state);
 139        clear_bit(SID_ID_MAPPED, &(*psidid)->state);
 140
 141        rb_link_node(&(*psidid)->rbnode, parent, linkto);
 142        rb_insert_color(&(*psidid)->rbnode, root);
 143}
 144
 145static struct cifs_sid_id *
 146sid_rb_search(struct rb_root *root, unsigned long cid)
 147{
 148        struct rb_node *node = root->rb_node;
 149        struct cifs_sid_id *lsidid;
 150
 151        while (node) {
 152                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 153                if (cid > lsidid->id)
 154                        node = node->rb_left;
 155                else if (cid < lsidid->id)
 156                        node = node->rb_right;
 157                else /* node found */
 158                        return lsidid;
 159        }
 160
 161        return NULL;
 162}
 163
 164static struct shrinker cifs_shrinker = {
 165        .shrink = cifs_idmap_shrinker,
 166        .seeks = DEFAULT_SEEKS,
 167};
 168
 169static int
 170cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
 171{
 172        char *payload;
 173
 174        payload = kmalloc(prep->datalen, GFP_KERNEL);
 175        if (!payload)
 176                return -ENOMEM;
 177
 178        memcpy(payload, prep->data, prep->datalen);
 179        key->payload.data = payload;
 180        key->datalen = prep->datalen;
 181        return 0;
 182}
 183
 184static inline void
 185cifs_idmap_key_destroy(struct key *key)
 186{
 187        kfree(key->payload.data);
 188}
 189
 190struct key_type cifs_idmap_key_type = {
 191        .name        = "cifs.idmap",
 192        .instantiate = cifs_idmap_key_instantiate,
 193        .destroy     = cifs_idmap_key_destroy,
 194        .describe    = user_describe,
 195        .match       = user_match,
 196};
 197
 198static void
 199sid_to_str(struct cifs_sid *sidptr, char *sidstr)
 200{
 201        int i;
 202        unsigned long saval;
 203        char *strptr;
 204
 205        strptr = sidstr;
 206
 207        sprintf(strptr, "%s", "S");
 208        strptr = sidstr + strlen(sidstr);
 209
 210        sprintf(strptr, "-%d", sidptr->revision);
 211        strptr = sidstr + strlen(sidstr);
 212
 213        for (i = 0; i < 6; ++i) {
 214                if (sidptr->authority[i]) {
 215                        sprintf(strptr, "-%d", sidptr->authority[i]);
 216                        strptr = sidstr + strlen(sidstr);
 217                }
 218        }
 219
 220        for (i = 0; i < sidptr->num_subauth; ++i) {
 221                saval = le32_to_cpu(sidptr->sub_auth[i]);
 222                sprintf(strptr, "-%ld", saval);
 223                strptr = sidstr + strlen(sidstr);
 224        }
 225}
 226
 227static void
 228cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
 229{
 230        memcpy(dst, src, sizeof(*dst));
 231        dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
 232}
 233
 234static void
 235id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
 236                struct cifs_sid_id **psidid, char *typestr)
 237{
 238        int rc;
 239        char *strptr;
 240        struct rb_node *node = root->rb_node;
 241        struct rb_node *parent = NULL;
 242        struct rb_node **linkto = &(root->rb_node);
 243        struct cifs_sid_id *lsidid;
 244
 245        while (node) {
 246                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 247                parent = node;
 248                rc = compare_sids(sidptr, &((lsidid)->sid));
 249                if (rc > 0) {
 250                        linkto = &(node->rb_left);
 251                        node = node->rb_left;
 252                } else if (rc < 0) {
 253                        linkto = &(node->rb_right);
 254                        node = node->rb_right;
 255                }
 256        }
 257
 258        cifs_copy_sid(&(*psidid)->sid, sidptr);
 259        (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
 260        (*psidid)->refcount = 0;
 261
 262        sprintf((*psidid)->sidstr, "%s", typestr);
 263        strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
 264        sid_to_str(&(*psidid)->sid, strptr);
 265
 266        clear_bit(SID_ID_PENDING, &(*psidid)->state);
 267        clear_bit(SID_ID_MAPPED, &(*psidid)->state);
 268
 269        rb_link_node(&(*psidid)->rbnode, parent, linkto);
 270        rb_insert_color(&(*psidid)->rbnode, root);
 271}
 272
 273static struct cifs_sid_id *
 274id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
 275{
 276        int rc;
 277        struct rb_node *node = root->rb_node;
 278        struct cifs_sid_id *lsidid;
 279
 280        while (node) {
 281                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 282                rc = compare_sids(sidptr, &((lsidid)->sid));
 283                if (rc > 0) {
 284                        node = node->rb_left;
 285                } else if (rc < 0) {
 286                        node = node->rb_right;
 287                } else /* node found */
 288                        return lsidid;
 289        }
 290
 291        return NULL;
 292}
 293
 294static int
 295sidid_pending_wait(void *unused)
 296{
 297        schedule();
 298        return signal_pending(current) ? -ERESTARTSYS : 0;
 299}
 300
 301static int
 302id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
 303{
 304        int rc = 0;
 305        struct key *sidkey;
 306        const struct cred *saved_cred;
 307        struct cifs_sid *lsid;
 308        struct cifs_sid_id *psidid, *npsidid;
 309        struct rb_root *cidtree;
 310        spinlock_t *cidlock;
 311
 312        if (sidtype == SIDOWNER) {
 313                cidlock = &siduidlock;
 314                cidtree = &uidtree;
 315        } else if (sidtype == SIDGROUP) {
 316                cidlock = &sidgidlock;
 317                cidtree = &gidtree;
 318        } else
 319                return -EINVAL;
 320
 321        spin_lock(cidlock);
 322        psidid = sid_rb_search(cidtree, cid);
 323
 324        if (!psidid) { /* node does not exist, allocate one & attempt adding */
 325                spin_unlock(cidlock);
 326                npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
 327                if (!npsidid)
 328                        return -ENOMEM;
 329
 330                npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
 331                if (!npsidid->sidstr) {
 332                        kfree(npsidid);
 333                        return -ENOMEM;
 334                }
 335
 336                spin_lock(cidlock);
 337                psidid = sid_rb_search(cidtree, cid);
 338                if (psidid) { /* node happened to get inserted meanwhile */
 339                        ++psidid->refcount;
 340                        spin_unlock(cidlock);
 341                        kfree(npsidid->sidstr);
 342                        kfree(npsidid);
 343                } else {
 344                        psidid = npsidid;
 345                        sid_rb_insert(cidtree, cid, &psidid,
 346                                        sidtype == SIDOWNER ? "oi:" : "gi:");
 347                        ++psidid->refcount;
 348                        spin_unlock(cidlock);
 349                }
 350        } else {
 351                ++psidid->refcount;
 352                spin_unlock(cidlock);
 353        }
 354
 355        /*
 356         * If we are here, it is safe to access psidid and its fields
 357         * since a reference was taken earlier while holding the spinlock.
 358         * A reference on the node is put without holding the spinlock
 359         * and it is OK to do so in this case, shrinker will not erase
 360         * this node until all references are put and we do not access
 361         * any fields of the node after a reference is put .
 362         */
 363        if (test_bit(SID_ID_MAPPED, &psidid->state)) {
 364                cifs_copy_sid(ssid, &psidid->sid);
 365                psidid->time = jiffies; /* update ts for accessing */
 366                goto id_sid_out;
 367        }
 368
 369        if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
 370                rc = -EINVAL;
 371                goto id_sid_out;
 372        }
 373
 374        if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
 375                saved_cred = override_creds(root_cred);
 376                sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
 377                if (IS_ERR(sidkey)) {
 378                        rc = -EINVAL;
 379                        cFYI(1, "%s: Can't map and id to a SID", __func__);
 380                } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
 381                        rc = -EIO;
 382                        cFYI(1, "%s: Downcall contained malformed key "
 383                                "(datalen=%hu)", __func__, sidkey->datalen);
 384                } else {
 385                        lsid = (struct cifs_sid *)sidkey->payload.data;
 386                        cifs_copy_sid(&psidid->sid, lsid);
 387                        cifs_copy_sid(ssid, &psidid->sid);
 388                        set_bit(SID_ID_MAPPED, &psidid->state);
 389                        key_put(sidkey);
 390                        kfree(psidid->sidstr);
 391                }
 392                psidid->time = jiffies; /* update ts for accessing */
 393                revert_creds(saved_cred);
 394                clear_bit(SID_ID_PENDING, &psidid->state);
 395                wake_up_bit(&psidid->state, SID_ID_PENDING);
 396        } else {
 397                rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
 398                                sidid_pending_wait, TASK_INTERRUPTIBLE);
 399                if (rc) {
 400                        cFYI(1, "%s: sidid_pending_wait interrupted %d",
 401                                        __func__, rc);
 402                        --psidid->refcount;
 403                        return rc;
 404                }
 405                if (test_bit(SID_ID_MAPPED, &psidid->state))
 406                        cifs_copy_sid(ssid, &psidid->sid);
 407                else
 408                        rc = -EINVAL;
 409        }
 410id_sid_out:
 411        --psidid->refcount;
 412        return rc;
 413}
 414
 415static int
 416sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
 417                struct cifs_fattr *fattr, uint sidtype)
 418{
 419        int rc;
 420        unsigned long cid;
 421        struct key *idkey;
 422        const struct cred *saved_cred;
 423        struct cifs_sid_id *psidid, *npsidid;
 424        struct rb_root *cidtree;
 425        spinlock_t *cidlock;
 426
 427        if (sidtype == SIDOWNER) {
 428                cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
 429                cidlock = &siduidlock;
 430                cidtree = &uidtree;
 431        } else if (sidtype == SIDGROUP) {
 432                cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
 433                cidlock = &sidgidlock;
 434                cidtree = &gidtree;
 435        } else
 436                return -ENOENT;
 437
 438        spin_lock(cidlock);
 439        psidid = id_rb_search(cidtree, psid);
 440
 441        if (!psidid) { /* node does not exist, allocate one & attempt adding */
 442                spin_unlock(cidlock);
 443                npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
 444                if (!npsidid)
 445                        return -ENOMEM;
 446
 447                npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
 448                if (!npsidid->sidstr) {
 449                        kfree(npsidid);
 450                        return -ENOMEM;
 451                }
 452
 453                spin_lock(cidlock);
 454                psidid = id_rb_search(cidtree, psid);
 455                if (psidid) { /* node happened to get inserted meanwhile */
 456                        ++psidid->refcount;
 457                        spin_unlock(cidlock);
 458                        kfree(npsidid->sidstr);
 459                        kfree(npsidid);
 460                } else {
 461                        psidid = npsidid;
 462                        id_rb_insert(cidtree, psid, &psidid,
 463                                        sidtype == SIDOWNER ? "os:" : "gs:");
 464                        ++psidid->refcount;
 465                        spin_unlock(cidlock);
 466                }
 467        } else {
 468                ++psidid->refcount;
 469                spin_unlock(cidlock);
 470        }
 471
 472        /*
 473         * If we are here, it is safe to access psidid and its fields
 474         * since a reference was taken earlier while holding the spinlock.
 475         * A reference on the node is put without holding the spinlock
 476         * and it is OK to do so in this case, shrinker will not erase
 477         * this node until all references are put and we do not access
 478         * any fields of the node after a reference is put .
 479         */
 480        if (test_bit(SID_ID_MAPPED, &psidid->state)) {
 481                cid = psidid->id;
 482                psidid->time = jiffies; /* update ts for accessing */
 483                goto sid_to_id_out;
 484        }
 485
 486        if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
 487                goto sid_to_id_out;
 488
 489        if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
 490                saved_cred = override_creds(root_cred);
 491                idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
 492                if (IS_ERR(idkey))
 493                        cFYI(1, "%s: Can't map SID to an id", __func__);
 494                else {
 495                        cid = *(unsigned long *)idkey->payload.value;
 496                        psidid->id = cid;
 497                        set_bit(SID_ID_MAPPED, &psidid->state);
 498                        key_put(idkey);
 499                        kfree(psidid->sidstr);
 500                }
 501                revert_creds(saved_cred);
 502                psidid->time = jiffies; /* update ts for accessing */
 503                clear_bit(SID_ID_PENDING, &psidid->state);
 504                wake_up_bit(&psidid->state, SID_ID_PENDING);
 505        } else {
 506                rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
 507                                sidid_pending_wait, TASK_INTERRUPTIBLE);
 508                if (rc) {
 509                        cFYI(1, "%s: sidid_pending_wait interrupted %d",
 510                                        __func__, rc);
 511                        --psidid->refcount; /* decremented without spinlock */
 512                        return rc;
 513                }
 514                if (test_bit(SID_ID_MAPPED, &psidid->state))
 515                        cid = psidid->id;
 516        }
 517
 518sid_to_id_out:
 519        --psidid->refcount; /* decremented without spinlock */
 520        if (sidtype == SIDOWNER)
 521                fattr->cf_uid = cid;
 522        else
 523                fattr->cf_gid = cid;
 524
 525        return 0;
 526}
 527
 528int
 529init_cifs_idmap(void)
 530{
 531        struct cred *cred;
 532        struct key *keyring;
 533        int ret;
 534
 535        cFYI(1, "Registering the %s key type", cifs_idmap_key_type.name);
 536
 537        /* create an override credential set with a special thread keyring in
 538         * which requests are cached
 539         *
 540         * this is used to prevent malicious redirections from being installed
 541         * with add_key().
 542         */
 543        cred = prepare_kernel_cred(NULL);
 544        if (!cred)
 545                return -ENOMEM;
 546
 547        keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
 548                            (KEY_POS_ALL & ~KEY_POS_SETATTR) |
 549                            KEY_USR_VIEW | KEY_USR_READ,
 550                            KEY_ALLOC_NOT_IN_QUOTA);
 551        if (IS_ERR(keyring)) {
 552                ret = PTR_ERR(keyring);
 553                goto failed_put_cred;
 554        }
 555
 556        ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
 557        if (ret < 0)
 558                goto failed_put_key;
 559
 560        ret = register_key_type(&cifs_idmap_key_type);
 561        if (ret < 0)
 562                goto failed_put_key;
 563
 564        /* instruct request_key() to use this special keyring as a cache for
 565         * the results it looks up */
 566        set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
 567        cred->thread_keyring = keyring;
 568        cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
 569        root_cred = cred;
 570
 571        spin_lock_init(&siduidlock);
 572        uidtree = RB_ROOT;
 573        spin_lock_init(&sidgidlock);
 574        gidtree = RB_ROOT;
 575
 576        spin_lock_init(&uidsidlock);
 577        siduidtree = RB_ROOT;
 578        spin_lock_init(&gidsidlock);
 579        sidgidtree = RB_ROOT;
 580        register_shrinker(&cifs_shrinker);
 581
 582        cFYI(1, "cifs idmap keyring: %d", key_serial(keyring));
 583        return 0;
 584
 585failed_put_key:
 586        key_put(keyring);
 587failed_put_cred:
 588        put_cred(cred);
 589        return ret;
 590}
 591
 592void
 593exit_cifs_idmap(void)
 594{
 595        key_revoke(root_cred->thread_keyring);
 596        unregister_key_type(&cifs_idmap_key_type);
 597        put_cred(root_cred);
 598        unregister_shrinker(&cifs_shrinker);
 599        cFYI(1, "Unregistered %s key type", cifs_idmap_key_type.name);
 600}
 601
 602void
 603cifs_destroy_idmaptrees(void)
 604{
 605        struct rb_root *root;
 606        struct rb_node *node;
 607
 608        root = &uidtree;
 609        spin_lock(&siduidlock);
 610        while ((node = rb_first(root)))
 611                rb_erase(node, root);
 612        spin_unlock(&siduidlock);
 613
 614        root = &gidtree;
 615        spin_lock(&sidgidlock);
 616        while ((node = rb_first(root)))
 617                rb_erase(node, root);
 618        spin_unlock(&sidgidlock);
 619
 620        root = &siduidtree;
 621        spin_lock(&uidsidlock);
 622        while ((node = rb_first(root)))
 623                rb_erase(node, root);
 624        spin_unlock(&uidsidlock);
 625
 626        root = &sidgidtree;
 627        spin_lock(&gidsidlock);
 628        while ((node = rb_first(root)))
 629                rb_erase(node, root);
 630        spin_unlock(&gidsidlock);
 631}
 632
 633/* if the two SIDs (roughly equivalent to a UUID for a user or group) are
 634   the same returns 1, if they do not match returns 0 */
 635int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
 636{
 637        int i;
 638        int num_subauth, num_sat, num_saw;
 639
 640        if ((!ctsid) || (!cwsid))
 641                return 1;
 642
 643        /* compare the revision */
 644        if (ctsid->revision != cwsid->revision) {
 645                if (ctsid->revision > cwsid->revision)
 646                        return 1;
 647                else
 648                        return -1;
 649        }
 650
 651        /* compare all of the six auth values */
 652        for (i = 0; i < 6; ++i) {
 653                if (ctsid->authority[i] != cwsid->authority[i]) {
 654                        if (ctsid->authority[i] > cwsid->authority[i])
 655                                return 1;
 656                        else
 657                                return -1;
 658                }
 659        }
 660
 661        /* compare all of the subauth values if any */
 662        num_sat = ctsid->num_subauth;
 663        num_saw = cwsid->num_subauth;
 664        num_subauth = num_sat < num_saw ? num_sat : num_saw;
 665        if (num_subauth) {
 666                for (i = 0; i < num_subauth; ++i) {
 667                        if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
 668                                if (le32_to_cpu(ctsid->sub_auth[i]) >
 669                                        le32_to_cpu(cwsid->sub_auth[i]))
 670                                        return 1;
 671                                else
 672                                        return -1;
 673                        }
 674                }
 675        }
 676
 677        return 0; /* sids compare/match */
 678}
 679
 680
 681/* copy ntsd, owner sid, and group sid from a security descriptor to another */
 682static void copy_sec_desc(const struct cifs_ntsd *pntsd,
 683                                struct cifs_ntsd *pnntsd, __u32 sidsoffset)
 684{
 685        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
 686        struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
 687
 688        /* copy security descriptor control portion */
 689        pnntsd->revision = pntsd->revision;
 690        pnntsd->type = pntsd->type;
 691        pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
 692        pnntsd->sacloffset = 0;
 693        pnntsd->osidoffset = cpu_to_le32(sidsoffset);
 694        pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
 695
 696        /* copy owner sid */
 697        owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
 698                                le32_to_cpu(pntsd->osidoffset));
 699        nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
 700        cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
 701
 702        /* copy group sid */
 703        group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
 704                                le32_to_cpu(pntsd->gsidoffset));
 705        ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
 706                                        sizeof(struct cifs_sid));
 707        cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
 708
 709        return;
 710}
 711
 712
 713/*
 714   change posix mode to reflect permissions
 715   pmode is the existing mode (we only want to overwrite part of this
 716   bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
 717*/
 718static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
 719                                 umode_t *pbits_to_set)
 720{
 721        __u32 flags = le32_to_cpu(ace_flags);
 722        /* the order of ACEs is important.  The canonical order is to begin with
 723           DENY entries followed by ALLOW, otherwise an allow entry could be
 724           encountered first, making the subsequent deny entry like "dead code"
 725           which would be superflous since Windows stops when a match is made
 726           for the operation you are trying to perform for your user */
 727
 728        /* For deny ACEs we change the mask so that subsequent allow access
 729           control entries do not turn on the bits we are denying */
 730        if (type == ACCESS_DENIED) {
 731                if (flags & GENERIC_ALL)
 732                        *pbits_to_set &= ~S_IRWXUGO;
 733
 734                if ((flags & GENERIC_WRITE) ||
 735                        ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
 736                        *pbits_to_set &= ~S_IWUGO;
 737                if ((flags & GENERIC_READ) ||
 738                        ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
 739                        *pbits_to_set &= ~S_IRUGO;
 740                if ((flags & GENERIC_EXECUTE) ||
 741                        ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
 742                        *pbits_to_set &= ~S_IXUGO;
 743                return;
 744        } else if (type != ACCESS_ALLOWED) {
 745                cERROR(1, "unknown access control type %d", type);
 746                return;
 747        }
 748        /* else ACCESS_ALLOWED type */
 749
 750        if (flags & GENERIC_ALL) {
 751                *pmode |= (S_IRWXUGO & (*pbits_to_set));
 752                cFYI(DBG2, "all perms");
 753                return;
 754        }
 755        if ((flags & GENERIC_WRITE) ||
 756                        ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
 757                *pmode |= (S_IWUGO & (*pbits_to_set));
 758        if ((flags & GENERIC_READ) ||
 759                        ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
 760                *pmode |= (S_IRUGO & (*pbits_to_set));
 761        if ((flags & GENERIC_EXECUTE) ||
 762                        ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
 763                *pmode |= (S_IXUGO & (*pbits_to_set));
 764
 765        cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
 766        return;
 767}
 768
 769/*
 770   Generate access flags to reflect permissions mode is the existing mode.
 771   This function is called for every ACE in the DACL whose SID matches
 772   with either owner or group or everyone.
 773*/
 774
 775static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
 776                                __u32 *pace_flags)
 777{
 778        /* reset access mask */
 779        *pace_flags = 0x0;
 780
 781        /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
 782        mode &= bits_to_use;
 783
 784        /* check for R/W/X UGO since we do not know whose flags
 785           is this but we have cleared all the bits sans RWX for
 786           either user or group or other as per bits_to_use */
 787        if (mode & S_IRUGO)
 788                *pace_flags |= SET_FILE_READ_RIGHTS;
 789        if (mode & S_IWUGO)
 790                *pace_flags |= SET_FILE_WRITE_RIGHTS;
 791        if (mode & S_IXUGO)
 792                *pace_flags |= SET_FILE_EXEC_RIGHTS;
 793
 794        cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
 795        return;
 796}
 797
 798static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
 799                        const struct cifs_sid *psid, __u64 nmode, umode_t bits)
 800{
 801        int i;
 802        __u16 size = 0;
 803        __u32 access_req = 0;
 804
 805        pntace->type = ACCESS_ALLOWED;
 806        pntace->flags = 0x0;
 807        mode_to_access_flags(nmode, bits, &access_req);
 808        if (!access_req)
 809                access_req = SET_MINIMUM_RIGHTS;
 810        pntace->access_req = cpu_to_le32(access_req);
 811
 812        pntace->sid.revision = psid->revision;
 813        pntace->sid.num_subauth = psid->num_subauth;
 814        for (i = 0; i < 6; i++)
 815                pntace->sid.authority[i] = psid->authority[i];
 816        for (i = 0; i < psid->num_subauth; i++)
 817                pntace->sid.sub_auth[i] = psid->sub_auth[i];
 818
 819        size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
 820        pntace->size = cpu_to_le16(size);
 821
 822        return size;
 823}
 824
 825
 826#ifdef CONFIG_CIFS_DEBUG2
 827static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
 828{
 829        int num_subauth;
 830
 831        /* validate that we do not go past end of acl */
 832
 833        if (le16_to_cpu(pace->size) < 16) {
 834                cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
 835                return;
 836        }
 837
 838        if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
 839                cERROR(1, "ACL too small to parse ACE");
 840                return;
 841        }
 842
 843        num_subauth = pace->sid.num_subauth;
 844        if (num_subauth) {
 845                int i;
 846                cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
 847                        pace->sid.revision, pace->sid.num_subauth, pace->type,
 848                        pace->flags, le16_to_cpu(pace->size));
 849                for (i = 0; i < num_subauth; ++i) {
 850                        cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
 851                                le32_to_cpu(pace->sid.sub_auth[i]));
 852                }
 853
 854                /* BB add length check to make sure that we do not have huge
 855                        num auths and therefore go off the end */
 856        }
 857
 858        return;
 859}
 860#endif
 861
 862
 863static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
 864                       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
 865                       struct cifs_fattr *fattr)
 866{
 867        int i;
 868        int num_aces = 0;
 869        int acl_size;
 870        char *acl_base;
 871        struct cifs_ace **ppace;
 872
 873        /* BB need to add parm so we can store the SID BB */
 874
 875        if (!pdacl) {
 876                /* no DACL in the security descriptor, set
 877                   all the permissions for user/group/other */
 878                fattr->cf_mode |= S_IRWXUGO;
 879                return;
 880        }
 881
 882        /* validate that we do not go past end of acl */
 883        if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
 884                cERROR(1, "ACL too small to parse DACL");
 885                return;
 886        }
 887
 888        cFYI(DBG2, "DACL revision %d size %d num aces %d",
 889                le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
 890                le32_to_cpu(pdacl->num_aces));
 891
 892        /* reset rwx permissions for user/group/other.
 893           Also, if num_aces is 0 i.e. DACL has no ACEs,
 894           user/group/other have no permissions */
 895        fattr->cf_mode &= ~(S_IRWXUGO);
 896
 897        acl_base = (char *)pdacl;
 898        acl_size = sizeof(struct cifs_acl);
 899
 900        num_aces = le32_to_cpu(pdacl->num_aces);
 901        if (num_aces > 0) {
 902                umode_t user_mask = S_IRWXU;
 903                umode_t group_mask = S_IRWXG;
 904                umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 905
 906                if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
 907                        return;
 908                ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
 909                                GFP_KERNEL);
 910                if (!ppace) {
 911                        cERROR(1, "DACL memory allocation error");
 912                        return;
 913                }
 914
 915                for (i = 0; i < num_aces; ++i) {
 916                        ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
 917#ifdef CONFIG_CIFS_DEBUG2
 918                        dump_ace(ppace[i], end_of_acl);
 919#endif
 920                        if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
 921                                access_flags_to_mode(ppace[i]->access_req,
 922                                                     ppace[i]->type,
 923                                                     &fattr->cf_mode,
 924                                                     &user_mask);
 925                        if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
 926                                access_flags_to_mode(ppace[i]->access_req,
 927                                                     ppace[i]->type,
 928                                                     &fattr->cf_mode,
 929                                                     &group_mask);
 930                        if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
 931                                access_flags_to_mode(ppace[i]->access_req,
 932                                                     ppace[i]->type,
 933                                                     &fattr->cf_mode,
 934                                                     &other_mask);
 935                        if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
 936                                access_flags_to_mode(ppace[i]->access_req,
 937                                                     ppace[i]->type,
 938                                                     &fattr->cf_mode,
 939                                                     &other_mask);
 940
 941
 942/*                      memcpy((void *)(&(cifscred->aces[i])),
 943                                (void *)ppace[i],
 944                                sizeof(struct cifs_ace)); */
 945
 946                        acl_base = (char *)ppace[i];
 947                        acl_size = le16_to_cpu(ppace[i]->size);
 948                }
 949
 950                kfree(ppace);
 951        }
 952
 953        return;
 954}
 955
 956
 957static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
 958                        struct cifs_sid *pgrpsid, __u64 nmode)
 959{
 960        u16 size = 0;
 961        struct cifs_acl *pnndacl;
 962
 963        pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
 964
 965        size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
 966                                        pownersid, nmode, S_IRWXU);
 967        size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
 968                                        pgrpsid, nmode, S_IRWXG);
 969        size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
 970                                         &sid_everyone, nmode, S_IRWXO);
 971
 972        pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
 973        pndacl->num_aces = cpu_to_le32(3);
 974
 975        return 0;
 976}
 977
 978
 979static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
 980{
 981        /* BB need to add parm so we can store the SID BB */
 982
 983        /* validate that we do not go past end of ACL - sid must be at least 8
 984           bytes long (assuming no sub-auths - e.g. the null SID */
 985        if (end_of_acl < (char *)psid + 8) {
 986                cERROR(1, "ACL too small to parse SID %p", psid);
 987                return -EINVAL;
 988        }
 989
 990        if (psid->num_subauth) {
 991#ifdef CONFIG_CIFS_DEBUG2
 992                int i;
 993                cFYI(1, "SID revision %d num_auth %d",
 994                        psid->revision, psid->num_subauth);
 995
 996                for (i = 0; i < psid->num_subauth; i++) {
 997                        cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
 998                                le32_to_cpu(psid->sub_auth[i]));
 999                }
1000
1001                /* BB add length check to make sure that we do not have huge
1002                        num auths and therefore go off the end */
1003                cFYI(1, "RID 0x%x",
1004                        le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1005#endif
1006        }
1007
1008        return 0;
1009}
1010
1011
1012/* Convert CIFS ACL to POSIX form */
1013static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1014                struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1015{
1016        int rc = 0;
1017        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1018        struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1019        char *end_of_acl = ((char *)pntsd) + acl_len;
1020        __u32 dacloffset;
1021
1022        if (pntsd == NULL)
1023                return -EIO;
1024
1025        owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1026                                le32_to_cpu(pntsd->osidoffset));
1027        group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1028                                le32_to_cpu(pntsd->gsidoffset));
1029        dacloffset = le32_to_cpu(pntsd->dacloffset);
1030        dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1031        cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1032                 "sacloffset 0x%x dacloffset 0x%x",
1033                 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1034                 le32_to_cpu(pntsd->gsidoffset),
1035                 le32_to_cpu(pntsd->sacloffset), dacloffset);
1036/*      cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1037        rc = parse_sid(owner_sid_ptr, end_of_acl);
1038        if (rc) {
1039                cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1040                return rc;
1041        }
1042        rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1043        if (rc) {
1044                cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1045                return rc;
1046        }
1047
1048        rc = parse_sid(group_sid_ptr, end_of_acl);
1049        if (rc) {
1050                cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1051                return rc;
1052        }
1053        rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1054        if (rc) {
1055                cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1056                return rc;
1057        }
1058
1059        if (dacloffset)
1060                parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1061                           group_sid_ptr, fattr);
1062        else
1063                cFYI(1, "no ACL"); /* BB grant all or default perms? */
1064
1065        return rc;
1066}
1067
1068/* Convert permission bits from mode to equivalent CIFS ACL */
1069static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1070        __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1071{
1072        int rc = 0;
1073        __u32 dacloffset;
1074        __u32 ndacloffset;
1075        __u32 sidsoffset;
1076        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1077        struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1078        struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
1079        struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1080
1081        if (nmode != NO_CHANGE_64) { /* chmod */
1082                owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1083                                le32_to_cpu(pntsd->osidoffset));
1084                group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1085                                le32_to_cpu(pntsd->gsidoffset));
1086                dacloffset = le32_to_cpu(pntsd->dacloffset);
1087                dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1088                ndacloffset = sizeof(struct cifs_ntsd);
1089                ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1090                ndacl_ptr->revision = dacl_ptr->revision;
1091                ndacl_ptr->size = 0;
1092                ndacl_ptr->num_aces = 0;
1093
1094                rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1095                                        nmode);
1096                sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1097                /* copy sec desc control portion & owner and group sids */
1098                copy_sec_desc(pntsd, pnntsd, sidsoffset);
1099                *aclflag = CIFS_ACL_DACL;
1100        } else {
1101                memcpy(pnntsd, pntsd, secdesclen);
1102                if (uid != NO_CHANGE_32) { /* chown */
1103                        owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1104                                        le32_to_cpu(pnntsd->osidoffset));
1105                        nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1106                                                                GFP_KERNEL);
1107                        if (!nowner_sid_ptr)
1108                                return -ENOMEM;
1109                        rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1110                        if (rc) {
1111                                cFYI(1, "%s: Mapping error %d for owner id %d",
1112                                                __func__, rc, uid);
1113                                kfree(nowner_sid_ptr);
1114                                return rc;
1115                        }
1116                        cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
1117                        kfree(nowner_sid_ptr);
1118                        *aclflag = CIFS_ACL_OWNER;
1119                }
1120                if (gid != NO_CHANGE_32) { /* chgrp */
1121                        group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1122                                        le32_to_cpu(pnntsd->gsidoffset));
1123                        ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1124                                                                GFP_KERNEL);
1125                        if (!ngroup_sid_ptr)
1126                                return -ENOMEM;
1127                        rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1128                        if (rc) {
1129                                cFYI(1, "%s: Mapping error %d for group id %d",
1130                                                __func__, rc, gid);
1131                                kfree(ngroup_sid_ptr);
1132                                return rc;
1133                        }
1134                        cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
1135                        kfree(ngroup_sid_ptr);
1136                        *aclflag = CIFS_ACL_GROUP;
1137                }
1138        }
1139
1140        return rc;
1141}
1142
1143static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1144                __u16 fid, u32 *pacllen)
1145{
1146        struct cifs_ntsd *pntsd = NULL;
1147        unsigned int xid;
1148        int rc;
1149        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1150
1151        if (IS_ERR(tlink))
1152                return ERR_CAST(tlink);
1153
1154        xid = get_xid();
1155        rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1156        free_xid(xid);
1157
1158        cifs_put_tlink(tlink);
1159
1160        cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1161        if (rc)
1162                return ERR_PTR(rc);
1163        return pntsd;
1164}
1165
1166static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1167                const char *path, u32 *pacllen)
1168{
1169        struct cifs_ntsd *pntsd = NULL;
1170        int oplock = 0;
1171        unsigned int xid;
1172        int rc, create_options = 0;
1173        __u16 fid;
1174        struct cifs_tcon *tcon;
1175        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1176
1177        if (IS_ERR(tlink))
1178                return ERR_CAST(tlink);
1179
1180        tcon = tlink_tcon(tlink);
1181        xid = get_xid();
1182
1183        if (backup_cred(cifs_sb))
1184                create_options |= CREATE_OPEN_BACKUP_INTENT;
1185
1186        rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1187                        create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1188                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1189        if (!rc) {
1190                rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1191                CIFSSMBClose(xid, tcon, fid);
1192        }
1193
1194        cifs_put_tlink(tlink);
1195        free_xid(xid);
1196
1197        cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1198        if (rc)
1199                return ERR_PTR(rc);
1200        return pntsd;
1201}
1202
1203/* Retrieve an ACL from the server */
1204struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1205                                      struct inode *inode, const char *path,
1206                                      u32 *pacllen)
1207{
1208        struct cifs_ntsd *pntsd = NULL;
1209        struct cifsFileInfo *open_file = NULL;
1210
1211        if (inode)
1212                open_file = find_readable_file(CIFS_I(inode), true);
1213        if (!open_file)
1214                return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1215
1216        pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
1217        cifsFileInfo_put(open_file);
1218        return pntsd;
1219}
1220
1221 /* Set an ACL on the server */
1222int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1223                        struct inode *inode, const char *path, int aclflag)
1224{
1225        int oplock = 0;
1226        unsigned int xid;
1227        int rc, access_flags, create_options = 0;
1228        __u16 fid;
1229        struct cifs_tcon *tcon;
1230        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1231        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1232
1233        if (IS_ERR(tlink))
1234                return PTR_ERR(tlink);
1235
1236        tcon = tlink_tcon(tlink);
1237        xid = get_xid();
1238
1239        if (backup_cred(cifs_sb))
1240                create_options |= CREATE_OPEN_BACKUP_INTENT;
1241
1242        if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1243                access_flags = WRITE_OWNER;
1244        else
1245                access_flags = WRITE_DAC;
1246
1247        rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1248                        create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1249                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1250        if (rc) {
1251                cERROR(1, "Unable to open file to set ACL");
1252                goto out;
1253        }
1254
1255        rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1256        cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1257
1258        CIFSSMBClose(xid, tcon, fid);
1259out:
1260        free_xid(xid);
1261        cifs_put_tlink(tlink);
1262        return rc;
1263}
1264
1265/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1266int
1267cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1268                  struct inode *inode, const char *path, const __u16 *pfid)
1269{
1270        struct cifs_ntsd *pntsd = NULL;
1271        u32 acllen = 0;
1272        int rc = 0;
1273
1274        cFYI(DBG2, "converting ACL to mode for %s", path);
1275
1276        if (pfid)
1277                pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1278        else
1279                pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1280
1281        /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1282        if (IS_ERR(pntsd)) {
1283                rc = PTR_ERR(pntsd);
1284                cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1285        } else {
1286                rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1287                kfree(pntsd);
1288                if (rc)
1289                        cERROR(1, "parse sec desc failed rc = %d", rc);
1290        }
1291
1292        return rc;
1293}
1294
1295/* Convert mode bits to an ACL so we can update the ACL on the server */
1296int
1297id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1298                        uid_t uid, gid_t gid)
1299{
1300        int rc = 0;
1301        int aclflag = CIFS_ACL_DACL; /* default flag to set */
1302        __u32 secdesclen = 0;
1303        struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1304        struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1305
1306        cFYI(DBG2, "set ACL from mode for %s", path);
1307
1308        /* Get the security descriptor */
1309        pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1310
1311        /* Add three ACEs for owner, group, everyone getting rid of
1312           other ACEs as chmod disables ACEs and set the security descriptor */
1313
1314        if (IS_ERR(pntsd)) {
1315                rc = PTR_ERR(pntsd);
1316                cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1317        } else {
1318                /* allocate memory for the smb header,
1319                   set security descriptor request security descriptor
1320                   parameters, and secuirty descriptor itself */
1321
1322                secdesclen = secdesclen < DEFSECDESCLEN ?
1323                                        DEFSECDESCLEN : secdesclen;
1324                pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1325                if (!pnntsd) {
1326                        cERROR(1, "Unable to allocate security descriptor");
1327                        kfree(pntsd);
1328                        return -ENOMEM;
1329                }
1330
1331                rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1332                                        &aclflag);
1333
1334                cFYI(DBG2, "build_sec_desc rc: %d", rc);
1335
1336                if (!rc) {
1337                        /* Set the security descriptor */
1338                        rc = set_cifs_acl(pnntsd, secdesclen, inode,
1339                                                path, aclflag);
1340                        cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1341                }
1342
1343                kfree(pnntsd);
1344                kfree(pntsd);
1345        }
1346
1347        return rc;
1348}
1349
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.