linux/fs/cifs/cifsacl.c
<<
>>
Prefs
   1/*
   2 *   fs/cifs/cifsacl.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2007,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
   7 *   Contains the routines for mapping CIFS/NTFS ACLs
   8 *
   9 *   This library is free software; you can redistribute it and/or modify
  10 *   it under the terms of the GNU Lesser General Public License as published
  11 *   by the Free Software Foundation; either version 2.1 of the License, or
  12 *   (at your option) any later version.
  13 *
  14 *   This library is distributed in the hope that it will be useful,
  15 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  17 *   the GNU Lesser General Public License for more details.
  18 *
  19 *   You should have received a copy of the GNU Lesser General Public License
  20 *   along with this library; if not, write to the Free Software
  21 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22 */
  23
  24#include <linux/fs.h>
  25#include <linux/slab.h>
  26#include <linux/string.h>
  27#include <linux/keyctl.h>
  28#include <linux/key-type.h>
  29#include <keys/user-type.h>
  30#include "cifspdu.h"
  31#include "cifsglob.h"
  32#include "cifsacl.h"
  33#include "cifsproto.h"
  34#include "cifs_debug.h"
  35
  36/* security id for everyone/world system group */
  37static const struct cifs_sid sid_everyone = {
  38        1, 1, {0, 0, 0, 0, 0, 1}, {0} };
  39/* security id for Authenticated Users system group */
  40static const struct cifs_sid sid_authusers = {
  41        1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
  42/* group users */
  43static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
  44
  45const struct cred *root_cred;
  46
  47static void
  48shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
  49                        int *nr_del)
  50{
  51        struct rb_node *node;
  52        struct rb_node *tmp;
  53        struct cifs_sid_id *psidid;
  54
  55        node = rb_first(root);
  56        while (node) {
  57                tmp = node;
  58                node = rb_next(tmp);
  59                psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
  60                if (nr_to_scan == 0 || *nr_del == nr_to_scan)
  61                        ++(*nr_rem);
  62                else {
  63                        if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
  64                                                && psidid->refcount == 0) {
  65                                rb_erase(tmp, root);
  66                                ++(*nr_del);
  67                        } else
  68                                ++(*nr_rem);
  69                }
  70        }
  71}
  72
  73/*
  74 * Run idmap cache shrinker.
  75 */
  76static int
  77cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
  78{
  79        int nr_to_scan = sc->nr_to_scan;
  80        int nr_del = 0;
  81        int nr_rem = 0;
  82        struct rb_root *root;
  83
  84        root = &uidtree;
  85        spin_lock(&siduidlock);
  86        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  87        spin_unlock(&siduidlock);
  88
  89        root = &gidtree;
  90        spin_lock(&sidgidlock);
  91        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  92        spin_unlock(&sidgidlock);
  93
  94        root = &siduidtree;
  95        spin_lock(&uidsidlock);
  96        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
  97        spin_unlock(&uidsidlock);
  98
  99        root = &sidgidtree;
 100        spin_lock(&gidsidlock);
 101        shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
 102        spin_unlock(&gidsidlock);
 103
 104        return nr_rem;
 105}
 106
 107static void
 108sid_rb_insert(struct rb_root *root, unsigned long cid,
 109                struct cifs_sid_id **psidid, char *typestr)
 110{
 111        char *strptr;
 112        struct rb_node *node = root->rb_node;
 113        struct rb_node *parent = NULL;
 114        struct rb_node **linkto = &(root->rb_node);
 115        struct cifs_sid_id *lsidid;
 116
 117        while (node) {
 118                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 119                parent = node;
 120                if (cid > lsidid->id) {
 121                        linkto = &(node->rb_left);
 122                        node = node->rb_left;
 123                }
 124                if (cid < lsidid->id) {
 125                        linkto = &(node->rb_right);
 126                        node = node->rb_right;
 127                }
 128        }
 129
 130        (*psidid)->id = cid;
 131        (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
 132        (*psidid)->refcount = 0;
 133
 134        sprintf((*psidid)->sidstr, "%s", typestr);
 135        strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
 136        sprintf(strptr, "%ld", cid);
 137
 138        clear_bit(SID_ID_PENDING, &(*psidid)->state);
 139        clear_bit(SID_ID_MAPPED, &(*psidid)->state);
 140
 141        rb_link_node(&(*psidid)->rbnode, parent, linkto);
 142        rb_insert_color(&(*psidid)->rbnode, root);
 143}
 144
 145static struct cifs_sid_id *
 146sid_rb_search(struct rb_root *root, unsigned long cid)
 147{
 148        struct rb_node *node = root->rb_node;
 149        struct cifs_sid_id *lsidid;
 150
 151        while (node) {
 152                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 153                if (cid > lsidid->id)
 154                        node = node->rb_left;
 155                else if (cid < lsidid->id)
 156                        node = node->rb_right;
 157                else /* node found */
 158                        return lsidid;
 159        }
 160
 161        return NULL;
 162}
 163
 164static struct shrinker cifs_shrinker = {
 165        .shrink = cifs_idmap_shrinker,
 166        .seeks = DEFAULT_SEEKS,
 167};
 168
 169static int
 170cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
 171{
 172        char *payload;
 173
 174        payload = kmalloc(datalen, GFP_KERNEL);
 175        if (!payload)
 176                return -ENOMEM;
 177
 178        memcpy(payload, data, datalen);
 179        key->payload.data = payload;
 180        key->datalen = datalen;
 181        return 0;
 182}
 183
 184static inline void
 185cifs_idmap_key_destroy(struct key *key)
 186{
 187        kfree(key->payload.data);
 188}
 189
 190struct key_type cifs_idmap_key_type = {
 191        .name        = "cifs.idmap",
 192        .instantiate = cifs_idmap_key_instantiate,
 193        .destroy     = cifs_idmap_key_destroy,
 194        .describe    = user_describe,
 195        .match       = user_match,
 196};
 197
 198static void
 199sid_to_str(struct cifs_sid *sidptr, char *sidstr)
 200{
 201        int i;
 202        unsigned long saval;
 203        char *strptr;
 204
 205        strptr = sidstr;
 206
 207        sprintf(strptr, "%s", "S");
 208        strptr = sidstr + strlen(sidstr);
 209
 210        sprintf(strptr, "-%d", sidptr->revision);
 211        strptr = sidstr + strlen(sidstr);
 212
 213        for (i = 0; i < 6; ++i) {
 214                if (sidptr->authority[i]) {
 215                        sprintf(strptr, "-%d", sidptr->authority[i]);
 216                        strptr = sidstr + strlen(sidstr);
 217                }
 218        }
 219
 220        for (i = 0; i < sidptr->num_subauth; ++i) {
 221                saval = le32_to_cpu(sidptr->sub_auth[i]);
 222                sprintf(strptr, "-%ld", saval);
 223                strptr = sidstr + strlen(sidstr);
 224        }
 225}
 226
 227static void
 228id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
 229                struct cifs_sid_id **psidid, char *typestr)
 230{
 231        int rc;
 232        char *strptr;
 233        struct rb_node *node = root->rb_node;
 234        struct rb_node *parent = NULL;
 235        struct rb_node **linkto = &(root->rb_node);
 236        struct cifs_sid_id *lsidid;
 237
 238        while (node) {
 239                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 240                parent = node;
 241                rc = compare_sids(sidptr, &((lsidid)->sid));
 242                if (rc > 0) {
 243                        linkto = &(node->rb_left);
 244                        node = node->rb_left;
 245                } else if (rc < 0) {
 246                        linkto = &(node->rb_right);
 247                        node = node->rb_right;
 248                }
 249        }
 250
 251        memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
 252        (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
 253        (*psidid)->refcount = 0;
 254
 255        sprintf((*psidid)->sidstr, "%s", typestr);
 256        strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
 257        sid_to_str(&(*psidid)->sid, strptr);
 258
 259        clear_bit(SID_ID_PENDING, &(*psidid)->state);
 260        clear_bit(SID_ID_MAPPED, &(*psidid)->state);
 261
 262        rb_link_node(&(*psidid)->rbnode, parent, linkto);
 263        rb_insert_color(&(*psidid)->rbnode, root);
 264}
 265
 266static struct cifs_sid_id *
 267id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
 268{
 269        int rc;
 270        struct rb_node *node = root->rb_node;
 271        struct cifs_sid_id *lsidid;
 272
 273        while (node) {
 274                lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
 275                rc = compare_sids(sidptr, &((lsidid)->sid));
 276                if (rc > 0) {
 277                        node = node->rb_left;
 278                } else if (rc < 0) {
 279                        node = node->rb_right;
 280                } else /* node found */
 281                        return lsidid;
 282        }
 283
 284        return NULL;
 285}
 286
 287static int
 288sidid_pending_wait(void *unused)
 289{
 290        schedule();
 291        return signal_pending(current) ? -ERESTARTSYS : 0;
 292}
 293
 294static int
 295id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
 296{
 297        int rc = 0;
 298        struct key *sidkey;
 299        const struct cred *saved_cred;
 300        struct cifs_sid *lsid;
 301        struct cifs_sid_id *psidid, *npsidid;
 302        struct rb_root *cidtree;
 303        spinlock_t *cidlock;
 304
 305        if (sidtype == SIDOWNER) {
 306                cidlock = &siduidlock;
 307                cidtree = &uidtree;
 308        } else if (sidtype == SIDGROUP) {
 309                cidlock = &sidgidlock;
 310                cidtree = &gidtree;
 311        } else
 312                return -EINVAL;
 313
 314        spin_lock(cidlock);
 315        psidid = sid_rb_search(cidtree, cid);
 316
 317        if (!psidid) { /* node does not exist, allocate one & attempt adding */
 318                spin_unlock(cidlock);
 319                npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
 320                if (!npsidid)
 321                        return -ENOMEM;
 322
 323                npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
 324                if (!npsidid->sidstr) {
 325                        kfree(npsidid);
 326                        return -ENOMEM;
 327                }
 328
 329                spin_lock(cidlock);
 330                psidid = sid_rb_search(cidtree, cid);
 331                if (psidid) { /* node happened to get inserted meanwhile */
 332                        ++psidid->refcount;
 333                        spin_unlock(cidlock);
 334                        kfree(npsidid->sidstr);
 335                        kfree(npsidid);
 336                } else {
 337                        psidid = npsidid;
 338                        sid_rb_insert(cidtree, cid, &psidid,
 339                                        sidtype == SIDOWNER ? "oi:" : "gi:");
 340                        ++psidid->refcount;
 341                        spin_unlock(cidlock);
 342                }
 343        } else {
 344                ++psidid->refcount;
 345                spin_unlock(cidlock);
 346        }
 347
 348        /*
 349         * If we are here, it is safe to access psidid and its fields
 350         * since a reference was taken earlier while holding the spinlock.
 351         * A reference on the node is put without holding the spinlock
 352         * and it is OK to do so in this case, shrinker will not erase
 353         * this node until all references are put and we do not access
 354         * any fields of the node after a reference is put .
 355         */
 356        if (test_bit(SID_ID_MAPPED, &psidid->state)) {
 357                memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
 358                psidid->time = jiffies; /* update ts for accessing */
 359                goto id_sid_out;
 360        }
 361
 362        if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
 363                rc = -EINVAL;
 364                goto id_sid_out;
 365        }
 366
 367        if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
 368                saved_cred = override_creds(root_cred);
 369                sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
 370                if (IS_ERR(sidkey)) {
 371                        rc = -EINVAL;
 372                        cFYI(1, "%s: Can't map and id to a SID", __func__);
 373                } else {
 374                        lsid = (struct cifs_sid *)sidkey->payload.data;
 375                        memcpy(&psidid->sid, lsid,
 376                                sidkey->datalen < sizeof(struct cifs_sid) ?
 377                                sidkey->datalen : sizeof(struct cifs_sid));
 378                        memcpy(ssid, &psidid->sid,
 379                                sidkey->datalen < sizeof(struct cifs_sid) ?
 380                                sidkey->datalen : sizeof(struct cifs_sid));
 381                        set_bit(SID_ID_MAPPED, &psidid->state);
 382                        key_put(sidkey);
 383                        kfree(psidid->sidstr);
 384                }
 385                psidid->time = jiffies; /* update ts for accessing */
 386                revert_creds(saved_cred);
 387                clear_bit(SID_ID_PENDING, &psidid->state);
 388                wake_up_bit(&psidid->state, SID_ID_PENDING);
 389        } else {
 390                rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
 391                                sidid_pending_wait, TASK_INTERRUPTIBLE);
 392                if (rc) {
 393                        cFYI(1, "%s: sidid_pending_wait interrupted %d",
 394                                        __func__, rc);
 395                        --psidid->refcount;
 396                        return rc;
 397                }
 398                if (test_bit(SID_ID_MAPPED, &psidid->state))
 399                        memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
 400                else
 401                        rc = -EINVAL;
 402        }
 403id_sid_out:
 404        --psidid->refcount;
 405        return rc;
 406}
 407
 408static int
 409sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
 410                struct cifs_fattr *fattr, uint sidtype)
 411{
 412        int rc;
 413        unsigned long cid;
 414        struct key *idkey;
 415        const struct cred *saved_cred;
 416        struct cifs_sid_id *psidid, *npsidid;
 417        struct rb_root *cidtree;
 418        spinlock_t *cidlock;
 419
 420        if (sidtype == SIDOWNER) {
 421                cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
 422                cidlock = &siduidlock;
 423                cidtree = &uidtree;
 424        } else if (sidtype == SIDGROUP) {
 425                cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
 426                cidlock = &sidgidlock;
 427                cidtree = &gidtree;
 428        } else
 429                return -ENOENT;
 430
 431        spin_lock(cidlock);
 432        psidid = id_rb_search(cidtree, psid);
 433
 434        if (!psidid) { /* node does not exist, allocate one & attempt adding */
 435                spin_unlock(cidlock);
 436                npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
 437                if (!npsidid)
 438                        return -ENOMEM;
 439
 440                npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
 441                if (!npsidid->sidstr) {
 442                        kfree(npsidid);
 443                        return -ENOMEM;
 444                }
 445
 446                spin_lock(cidlock);
 447                psidid = id_rb_search(cidtree, psid);
 448                if (psidid) { /* node happened to get inserted meanwhile */
 449                        ++psidid->refcount;
 450                        spin_unlock(cidlock);
 451                        kfree(npsidid->sidstr);
 452                        kfree(npsidid);
 453                } else {
 454                        psidid = npsidid;
 455                        id_rb_insert(cidtree, psid, &psidid,
 456                                        sidtype == SIDOWNER ? "os:" : "gs:");
 457                        ++psidid->refcount;
 458                        spin_unlock(cidlock);
 459                }
 460        } else {
 461                ++psidid->refcount;
 462                spin_unlock(cidlock);
 463        }
 464
 465        /*
 466         * If we are here, it is safe to access psidid and its fields
 467         * since a reference was taken earlier while holding the spinlock.
 468         * A reference on the node is put without holding the spinlock
 469         * and it is OK to do so in this case, shrinker will not erase
 470         * this node until all references are put and we do not access
 471         * any fields of the node after a reference is put .
 472         */
 473        if (test_bit(SID_ID_MAPPED, &psidid->state)) {
 474                cid = psidid->id;
 475                psidid->time = jiffies; /* update ts for accessing */
 476                goto sid_to_id_out;
 477        }
 478
 479        if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
 480                goto sid_to_id_out;
 481
 482        if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
 483                saved_cred = override_creds(root_cred);
 484                idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
 485                if (IS_ERR(idkey))
 486                        cFYI(1, "%s: Can't map SID to an id", __func__);
 487                else {
 488                        cid = *(unsigned long *)idkey->payload.value;
 489                        psidid->id = cid;
 490                        set_bit(SID_ID_MAPPED, &psidid->state);
 491                        key_put(idkey);
 492                        kfree(psidid->sidstr);
 493                }
 494                revert_creds(saved_cred);
 495                psidid->time = jiffies; /* update ts for accessing */
 496                clear_bit(SID_ID_PENDING, &psidid->state);
 497                wake_up_bit(&psidid->state, SID_ID_PENDING);
 498        } else {
 499                rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
 500                                sidid_pending_wait, TASK_INTERRUPTIBLE);
 501                if (rc) {
 502                        cFYI(1, "%s: sidid_pending_wait interrupted %d",
 503                                        __func__, rc);
 504                        --psidid->refcount; /* decremented without spinlock */
 505                        return rc;
 506                }
 507                if (test_bit(SID_ID_MAPPED, &psidid->state))
 508                        cid = psidid->id;
 509        }
 510
 511sid_to_id_out:
 512        --psidid->refcount; /* decremented without spinlock */
 513        if (sidtype == SIDOWNER)
 514                fattr->cf_uid = cid;
 515        else
 516                fattr->cf_gid = cid;
 517
 518        return 0;
 519}
 520
 521int
 522init_cifs_idmap(void)
 523{
 524        struct cred *cred;
 525        struct key *keyring;
 526        int ret;
 527
 528        cFYI(1, "Registering the %s key type", cifs_idmap_key_type.name);
 529
 530        /* create an override credential set with a special thread keyring in
 531         * which requests are cached
 532         *
 533         * this is used to prevent malicious redirections from being installed
 534         * with add_key().
 535         */
 536        cred = prepare_kernel_cred(NULL);
 537        if (!cred)
 538                return -ENOMEM;
 539
 540        keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
 541                            (KEY_POS_ALL & ~KEY_POS_SETATTR) |
 542                            KEY_USR_VIEW | KEY_USR_READ,
 543                            KEY_ALLOC_NOT_IN_QUOTA);
 544        if (IS_ERR(keyring)) {
 545                ret = PTR_ERR(keyring);
 546                goto failed_put_cred;
 547        }
 548
 549        ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
 550        if (ret < 0)
 551                goto failed_put_key;
 552
 553        ret = register_key_type(&cifs_idmap_key_type);
 554        if (ret < 0)
 555                goto failed_put_key;
 556
 557        /* instruct request_key() to use this special keyring as a cache for
 558         * the results it looks up */
 559        set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
 560        cred->thread_keyring = keyring;
 561        cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
 562        root_cred = cred;
 563
 564        spin_lock_init(&siduidlock);
 565        uidtree = RB_ROOT;
 566        spin_lock_init(&sidgidlock);
 567        gidtree = RB_ROOT;
 568
 569        spin_lock_init(&uidsidlock);
 570        siduidtree = RB_ROOT;
 571        spin_lock_init(&gidsidlock);
 572        sidgidtree = RB_ROOT;
 573        register_shrinker(&cifs_shrinker);
 574
 575        cFYI(1, "cifs idmap keyring: %d", key_serial(keyring));
 576        return 0;
 577
 578failed_put_key:
 579        key_put(keyring);
 580failed_put_cred:
 581        put_cred(cred);
 582        return ret;
 583}
 584
 585void
 586exit_cifs_idmap(void)
 587{
 588        key_revoke(root_cred->thread_keyring);
 589        unregister_key_type(&cifs_idmap_key_type);
 590        put_cred(root_cred);
 591        unregister_shrinker(&cifs_shrinker);
 592        cFYI(1, "Unregistered %s key type", cifs_idmap_key_type.name);
 593}
 594
 595void
 596cifs_destroy_idmaptrees(void)
 597{
 598        struct rb_root *root;
 599        struct rb_node *node;
 600
 601        root = &uidtree;
 602        spin_lock(&siduidlock);
 603        while ((node = rb_first(root)))
 604                rb_erase(node, root);
 605        spin_unlock(&siduidlock);
 606
 607        root = &gidtree;
 608        spin_lock(&sidgidlock);
 609        while ((node = rb_first(root)))
 610                rb_erase(node, root);
 611        spin_unlock(&sidgidlock);
 612
 613        root = &siduidtree;
 614        spin_lock(&uidsidlock);
 615        while ((node = rb_first(root)))
 616                rb_erase(node, root);
 617        spin_unlock(&uidsidlock);
 618
 619        root = &sidgidtree;
 620        spin_lock(&gidsidlock);
 621        while ((node = rb_first(root)))
 622                rb_erase(node, root);
 623        spin_unlock(&gidsidlock);
 624}
 625
 626/* if the two SIDs (roughly equivalent to a UUID for a user or group) are
 627   the same returns 1, if they do not match returns 0 */
 628int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
 629{
 630        int i;
 631        int num_subauth, num_sat, num_saw;
 632
 633        if ((!ctsid) || (!cwsid))
 634                return 1;
 635
 636        /* compare the revision */
 637        if (ctsid->revision != cwsid->revision) {
 638                if (ctsid->revision > cwsid->revision)
 639                        return 1;
 640                else
 641                        return -1;
 642        }
 643
 644        /* compare all of the six auth values */
 645        for (i = 0; i < 6; ++i) {
 646                if (ctsid->authority[i] != cwsid->authority[i]) {
 647                        if (ctsid->authority[i] > cwsid->authority[i])
 648                                return 1;
 649                        else
 650                                return -1;
 651                }
 652        }
 653
 654        /* compare all of the subauth values if any */
 655        num_sat = ctsid->num_subauth;
 656        num_saw = cwsid->num_subauth;
 657        num_subauth = num_sat < num_saw ? num_sat : num_saw;
 658        if (num_subauth) {
 659                for (i = 0; i < num_subauth; ++i) {
 660                        if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
 661                                if (le32_to_cpu(ctsid->sub_auth[i]) >
 662                                        le32_to_cpu(cwsid->sub_auth[i]))
 663                                        return 1;
 664                                else
 665                                        return -1;
 666                        }
 667                }
 668        }
 669
 670        return 0; /* sids compare/match */
 671}
 672
 673
 674/* copy ntsd, owner sid, and group sid from a security descriptor to another */
 675static void copy_sec_desc(const struct cifs_ntsd *pntsd,
 676                                struct cifs_ntsd *pnntsd, __u32 sidsoffset)
 677{
 678        int i;
 679
 680        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
 681        struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
 682
 683        /* copy security descriptor control portion */
 684        pnntsd->revision = pntsd->revision;
 685        pnntsd->type = pntsd->type;
 686        pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
 687        pnntsd->sacloffset = 0;
 688        pnntsd->osidoffset = cpu_to_le32(sidsoffset);
 689        pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
 690
 691        /* copy owner sid */
 692        owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
 693                                le32_to_cpu(pntsd->osidoffset));
 694        nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
 695
 696        nowner_sid_ptr->revision = owner_sid_ptr->revision;
 697        nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
 698        for (i = 0; i < 6; i++)
 699                nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
 700        for (i = 0; i < 5; i++)
 701                nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
 702
 703        /* copy group sid */
 704        group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
 705                                le32_to_cpu(pntsd->gsidoffset));
 706        ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
 707                                        sizeof(struct cifs_sid));
 708
 709        ngroup_sid_ptr->revision = group_sid_ptr->revision;
 710        ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
 711        for (i = 0; i < 6; i++)
 712                ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
 713        for (i = 0; i < 5; i++)
 714                ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
 715
 716        return;
 717}
 718
 719
 720/*
 721   change posix mode to reflect permissions
 722   pmode is the existing mode (we only want to overwrite part of this
 723   bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
 724*/
 725static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
 726                                 umode_t *pbits_to_set)
 727{
 728        __u32 flags = le32_to_cpu(ace_flags);
 729        /* the order of ACEs is important.  The canonical order is to begin with
 730           DENY entries followed by ALLOW, otherwise an allow entry could be
 731           encountered first, making the subsequent deny entry like "dead code"
 732           which would be superflous since Windows stops when a match is made
 733           for the operation you are trying to perform for your user */
 734
 735        /* For deny ACEs we change the mask so that subsequent allow access
 736           control entries do not turn on the bits we are denying */
 737        if (type == ACCESS_DENIED) {
 738                if (flags & GENERIC_ALL)
 739                        *pbits_to_set &= ~S_IRWXUGO;
 740
 741                if ((flags & GENERIC_WRITE) ||
 742                        ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
 743                        *pbits_to_set &= ~S_IWUGO;
 744                if ((flags & GENERIC_READ) ||
 745                        ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
 746                        *pbits_to_set &= ~S_IRUGO;
 747                if ((flags & GENERIC_EXECUTE) ||
 748                        ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
 749                        *pbits_to_set &= ~S_IXUGO;
 750                return;
 751        } else if (type != ACCESS_ALLOWED) {
 752                cERROR(1, "unknown access control type %d", type);
 753                return;
 754        }
 755        /* else ACCESS_ALLOWED type */
 756
 757        if (flags & GENERIC_ALL) {
 758                *pmode |= (S_IRWXUGO & (*pbits_to_set));
 759                cFYI(DBG2, "all perms");
 760                return;
 761        }
 762        if ((flags & GENERIC_WRITE) ||
 763                        ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
 764                *pmode |= (S_IWUGO & (*pbits_to_set));
 765        if ((flags & GENERIC_READ) ||
 766                        ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
 767                *pmode |= (S_IRUGO & (*pbits_to_set));
 768        if ((flags & GENERIC_EXECUTE) ||
 769                        ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
 770                *pmode |= (S_IXUGO & (*pbits_to_set));
 771
 772        cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
 773        return;
 774}
 775
 776/*
 777   Generate access flags to reflect permissions mode is the existing mode.
 778   This function is called for every ACE in the DACL whose SID matches
 779   with either owner or group or everyone.
 780*/
 781
 782static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
 783                                __u32 *pace_flags)
 784{
 785        /* reset access mask */
 786        *pace_flags = 0x0;
 787
 788        /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
 789        mode &= bits_to_use;
 790
 791        /* check for R/W/X UGO since we do not know whose flags
 792           is this but we have cleared all the bits sans RWX for
 793           either user or group or other as per bits_to_use */
 794        if (mode & S_IRUGO)
 795                *pace_flags |= SET_FILE_READ_RIGHTS;
 796        if (mode & S_IWUGO)
 797                *pace_flags |= SET_FILE_WRITE_RIGHTS;
 798        if (mode & S_IXUGO)
 799                *pace_flags |= SET_FILE_EXEC_RIGHTS;
 800
 801        cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
 802        return;
 803}
 804
 805static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
 806                        const struct cifs_sid *psid, __u64 nmode, umode_t bits)
 807{
 808        int i;
 809        __u16 size = 0;
 810        __u32 access_req = 0;
 811
 812        pntace->type = ACCESS_ALLOWED;
 813        pntace->flags = 0x0;
 814        mode_to_access_flags(nmode, bits, &access_req);
 815        if (!access_req)
 816                access_req = SET_MINIMUM_RIGHTS;
 817        pntace->access_req = cpu_to_le32(access_req);
 818
 819        pntace->sid.revision = psid->revision;
 820        pntace->sid.num_subauth = psid->num_subauth;
 821        for (i = 0; i < 6; i++)
 822                pntace->sid.authority[i] = psid->authority[i];
 823        for (i = 0; i < psid->num_subauth; i++)
 824                pntace->sid.sub_auth[i] = psid->sub_auth[i];
 825
 826        size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
 827        pntace->size = cpu_to_le16(size);
 828
 829        return size;
 830}
 831
 832
 833#ifdef CONFIG_CIFS_DEBUG2
 834static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
 835{
 836        int num_subauth;
 837
 838        /* validate that we do not go past end of acl */
 839
 840        if (le16_to_cpu(pace->size) < 16) {
 841                cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
 842                return;
 843        }
 844
 845        if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
 846                cERROR(1, "ACL too small to parse ACE");
 847                return;
 848        }
 849
 850        num_subauth = pace->sid.num_subauth;
 851        if (num_subauth) {
 852                int i;
 853                cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
 854                        pace->sid.revision, pace->sid.num_subauth, pace->type,
 855                        pace->flags, le16_to_cpu(pace->size));
 856                for (i = 0; i < num_subauth; ++i) {
 857                        cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
 858                                le32_to_cpu(pace->sid.sub_auth[i]));
 859                }
 860
 861                /* BB add length check to make sure that we do not have huge
 862                        num auths and therefore go off the end */
 863        }
 864
 865        return;
 866}
 867#endif
 868
 869
 870static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
 871                       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
 872                       struct cifs_fattr *fattr)
 873{
 874        int i;
 875        int num_aces = 0;
 876        int acl_size;
 877        char *acl_base;
 878        struct cifs_ace **ppace;
 879
 880        /* BB need to add parm so we can store the SID BB */
 881
 882        if (!pdacl) {
 883                /* no DACL in the security descriptor, set
 884                   all the permissions for user/group/other */
 885                fattr->cf_mode |= S_IRWXUGO;
 886                return;
 887        }
 888
 889        /* validate that we do not go past end of acl */
 890        if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
 891                cERROR(1, "ACL too small to parse DACL");
 892                return;
 893        }
 894
 895        cFYI(DBG2, "DACL revision %d size %d num aces %d",
 896                le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
 897                le32_to_cpu(pdacl->num_aces));
 898
 899        /* reset rwx permissions for user/group/other.
 900           Also, if num_aces is 0 i.e. DACL has no ACEs,
 901           user/group/other have no permissions */
 902        fattr->cf_mode &= ~(S_IRWXUGO);
 903
 904        acl_base = (char *)pdacl;
 905        acl_size = sizeof(struct cifs_acl);
 906
 907        num_aces = le32_to_cpu(pdacl->num_aces);
 908        if (num_aces > 0) {
 909                umode_t user_mask = S_IRWXU;
 910                umode_t group_mask = S_IRWXG;
 911                umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 912
 913                if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
 914                        return;
 915                ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
 916                                GFP_KERNEL);
 917                if (!ppace) {
 918                        cERROR(1, "DACL memory allocation error");
 919                        return;
 920                }
 921
 922                for (i = 0; i < num_aces; ++i) {
 923                        ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
 924#ifdef CONFIG_CIFS_DEBUG2
 925                        dump_ace(ppace[i], end_of_acl);
 926#endif
 927                        if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
 928                                access_flags_to_mode(ppace[i]->access_req,
 929                                                     ppace[i]->type,
 930                                                     &fattr->cf_mode,
 931                                                     &user_mask);
 932                        if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
 933                                access_flags_to_mode(ppace[i]->access_req,
 934                                                     ppace[i]->type,
 935                                                     &fattr->cf_mode,
 936                                                     &group_mask);
 937                        if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
 938                                access_flags_to_mode(ppace[i]->access_req,
 939                                                     ppace[i]->type,
 940                                                     &fattr->cf_mode,
 941                                                     &other_mask);
 942                        if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
 943                                access_flags_to_mode(ppace[i]->access_req,
 944                                                     ppace[i]->type,
 945                                                     &fattr->cf_mode,
 946                                                     &other_mask);
 947
 948
 949/*                      memcpy((void *)(&(cifscred->aces[i])),
 950                                (void *)ppace[i],
 951                                sizeof(struct cifs_ace)); */
 952
 953                        acl_base = (char *)ppace[i];
 954                        acl_size = le16_to_cpu(ppace[i]->size);
 955                }
 956
 957                kfree(ppace);
 958        }
 959
 960        return;
 961}
 962
 963
 964static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
 965                        struct cifs_sid *pgrpsid, __u64 nmode)
 966{
 967        u16 size = 0;
 968        struct cifs_acl *pnndacl;
 969
 970        pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
 971
 972        size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
 973                                        pownersid, nmode, S_IRWXU);
 974        size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
 975                                        pgrpsid, nmode, S_IRWXG);
 976        size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
 977                                         &sid_everyone, nmode, S_IRWXO);
 978
 979        pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
 980        pndacl->num_aces = cpu_to_le32(3);
 981
 982        return 0;
 983}
 984
 985
 986static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
 987{
 988        /* BB need to add parm so we can store the SID BB */
 989
 990        /* validate that we do not go past end of ACL - sid must be at least 8
 991           bytes long (assuming no sub-auths - e.g. the null SID */
 992        if (end_of_acl < (char *)psid + 8) {
 993                cERROR(1, "ACL too small to parse SID %p", psid);
 994                return -EINVAL;
 995        }
 996
 997        if (psid->num_subauth) {
 998#ifdef CONFIG_CIFS_DEBUG2
 999                int i;
1000                cFYI(1, "SID revision %d num_auth %d",
1001                        psid->revision, psid->num_subauth);
1002
1003                for (i = 0; i < psid->num_subauth; i++) {
1004                        cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
1005                                le32_to_cpu(psid->sub_auth[i]));
1006                }
1007
1008                /* BB add length check to make sure that we do not have huge
1009                        num auths and therefore go off the end */
1010                cFYI(1, "RID 0x%x",
1011                        le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1012#endif
1013        }
1014
1015        return 0;
1016}
1017
1018
1019/* Convert CIFS ACL to POSIX form */
1020static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1021                struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1022{
1023        int rc = 0;
1024        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1025        struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1026        char *end_of_acl = ((char *)pntsd) + acl_len;
1027        __u32 dacloffset;
1028
1029        if (pntsd == NULL)
1030                return -EIO;
1031
1032        owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1033                                le32_to_cpu(pntsd->osidoffset));
1034        group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1035                                le32_to_cpu(pntsd->gsidoffset));
1036        dacloffset = le32_to_cpu(pntsd->dacloffset);
1037        dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1038        cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1039                 "sacloffset 0x%x dacloffset 0x%x",
1040                 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1041                 le32_to_cpu(pntsd->gsidoffset),
1042                 le32_to_cpu(pntsd->sacloffset), dacloffset);
1043/*      cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1044        rc = parse_sid(owner_sid_ptr, end_of_acl);
1045        if (rc) {
1046                cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1047                return rc;
1048        }
1049        rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1050        if (rc) {
1051                cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1052                return rc;
1053        }
1054
1055        rc = parse_sid(group_sid_ptr, end_of_acl);
1056        if (rc) {
1057                cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1058                return rc;
1059        }
1060        rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1061        if (rc) {
1062                cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1063                return rc;
1064        }
1065
1066        if (dacloffset)
1067                parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1068                           group_sid_ptr, fattr);
1069        else
1070                cFYI(1, "no ACL"); /* BB grant all or default perms? */
1071
1072        return rc;
1073}
1074
1075/* Convert permission bits from mode to equivalent CIFS ACL */
1076static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1077        __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1078{
1079        int rc = 0;
1080        __u32 dacloffset;
1081        __u32 ndacloffset;
1082        __u32 sidsoffset;
1083        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1084        struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1085        struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
1086        struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1087
1088        if (nmode != NO_CHANGE_64) { /* chmod */
1089                owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1090                                le32_to_cpu(pntsd->osidoffset));
1091                group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1092                                le32_to_cpu(pntsd->gsidoffset));
1093                dacloffset = le32_to_cpu(pntsd->dacloffset);
1094                dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1095                ndacloffset = sizeof(struct cifs_ntsd);
1096                ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1097                ndacl_ptr->revision = dacl_ptr->revision;
1098                ndacl_ptr->size = 0;
1099                ndacl_ptr->num_aces = 0;
1100
1101                rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1102                                        nmode);
1103                sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1104                /* copy sec desc control portion & owner and group sids */
1105                copy_sec_desc(pntsd, pnntsd, sidsoffset);
1106                *aclflag = CIFS_ACL_DACL;
1107        } else {
1108                memcpy(pnntsd, pntsd, secdesclen);
1109                if (uid != NO_CHANGE_32) { /* chown */
1110                        owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1111                                        le32_to_cpu(pnntsd->osidoffset));
1112                        nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1113                                                                GFP_KERNEL);
1114                        if (!nowner_sid_ptr)
1115                                return -ENOMEM;
1116                        rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1117                        if (rc) {
1118                                cFYI(1, "%s: Mapping error %d for owner id %d",
1119                                                __func__, rc, uid);
1120                                kfree(nowner_sid_ptr);
1121                                return rc;
1122                        }
1123                        memcpy(owner_sid_ptr, nowner_sid_ptr,
1124                                        sizeof(struct cifs_sid));
1125                        kfree(nowner_sid_ptr);
1126                        *aclflag = CIFS_ACL_OWNER;
1127                }
1128                if (gid != NO_CHANGE_32) { /* chgrp */
1129                        group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1130                                        le32_to_cpu(pnntsd->gsidoffset));
1131                        ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1132                                                                GFP_KERNEL);
1133                        if (!ngroup_sid_ptr)
1134                                return -ENOMEM;
1135                        rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1136                        if (rc) {
1137                                cFYI(1, "%s: Mapping error %d for group id %d",
1138                                                __func__, rc, gid);
1139                                kfree(ngroup_sid_ptr);
1140                                return rc;
1141                        }
1142                        memcpy(group_sid_ptr, ngroup_sid_ptr,
1143                                        sizeof(struct cifs_sid));
1144                        kfree(ngroup_sid_ptr);
1145                        *aclflag = CIFS_ACL_GROUP;
1146                }
1147        }
1148
1149        return rc;
1150}
1151
1152static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1153                __u16 fid, u32 *pacllen)
1154{
1155        struct cifs_ntsd *pntsd = NULL;
1156        unsigned int xid;
1157        int rc;
1158        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1159
1160        if (IS_ERR(tlink))
1161                return ERR_CAST(tlink);
1162
1163        xid = get_xid();
1164        rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1165        free_xid(xid);
1166
1167        cifs_put_tlink(tlink);
1168
1169        cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1170        if (rc)
1171                return ERR_PTR(rc);
1172        return pntsd;
1173}
1174
1175static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1176                const char *path, u32 *pacllen)
1177{
1178        struct cifs_ntsd *pntsd = NULL;
1179        int oplock = 0;
1180        unsigned int xid;
1181        int rc, create_options = 0;
1182        __u16 fid;
1183        struct cifs_tcon *tcon;
1184        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1185
1186        if (IS_ERR(tlink))
1187                return ERR_CAST(tlink);
1188
1189        tcon = tlink_tcon(tlink);
1190        xid = get_xid();
1191
1192        if (backup_cred(cifs_sb))
1193                create_options |= CREATE_OPEN_BACKUP_INTENT;
1194
1195        rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1196                        create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1197                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1198        if (!rc) {
1199                rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1200                CIFSSMBClose(xid, tcon, fid);
1201        }
1202
1203        cifs_put_tlink(tlink);
1204        free_xid(xid);
1205
1206        cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1207        if (rc)
1208                return ERR_PTR(rc);
1209        return pntsd;
1210}
1211
1212/* Retrieve an ACL from the server */
1213struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1214                                      struct inode *inode, const char *path,
1215                                      u32 *pacllen)
1216{
1217        struct cifs_ntsd *pntsd = NULL;
1218        struct cifsFileInfo *open_file = NULL;
1219
1220        if (inode)
1221                open_file = find_readable_file(CIFS_I(inode), true);
1222        if (!open_file)
1223                return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1224
1225        pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
1226        cifsFileInfo_put(open_file);
1227        return pntsd;
1228}
1229
1230 /* Set an ACL on the server */
1231int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1232                        struct inode *inode, const char *path, int aclflag)
1233{
1234        int oplock = 0;
1235        unsigned int xid;
1236        int rc, access_flags, create_options = 0;
1237        __u16 fid;
1238        struct cifs_tcon *tcon;
1239        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1240        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1241
1242        if (IS_ERR(tlink))
1243                return PTR_ERR(tlink);
1244
1245        tcon = tlink_tcon(tlink);
1246        xid = get_xid();
1247
1248        if (backup_cred(cifs_sb))
1249                create_options |= CREATE_OPEN_BACKUP_INTENT;
1250
1251        if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1252                access_flags = WRITE_OWNER;
1253        else
1254                access_flags = WRITE_DAC;
1255
1256        rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1257                        create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1258                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1259        if (rc) {
1260                cERROR(1, "Unable to open file to set ACL");
1261                goto out;
1262        }
1263
1264        rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1265        cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1266
1267        CIFSSMBClose(xid, tcon, fid);
1268out:
1269        free_xid(xid);
1270        cifs_put_tlink(tlink);
1271        return rc;
1272}
1273
1274/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1275int
1276cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1277                  struct inode *inode, const char *path, const __u16 *pfid)
1278{
1279        struct cifs_ntsd *pntsd = NULL;
1280        u32 acllen = 0;
1281        int rc = 0;
1282
1283        cFYI(DBG2, "converting ACL to mode for %s", path);
1284
1285        if (pfid)
1286                pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1287        else
1288                pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1289
1290        /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1291        if (IS_ERR(pntsd)) {
1292                rc = PTR_ERR(pntsd);
1293                cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1294        } else {
1295                rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1296                kfree(pntsd);
1297                if (rc)
1298                        cERROR(1, "parse sec desc failed rc = %d", rc);
1299        }
1300
1301        return rc;
1302}
1303
1304/* Convert mode bits to an ACL so we can update the ACL on the server */
1305int
1306id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1307                        uid_t uid, gid_t gid)
1308{
1309        int rc = 0;
1310        int aclflag = CIFS_ACL_DACL; /* default flag to set */
1311        __u32 secdesclen = 0;
1312        struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1313        struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1314
1315        cFYI(DBG2, "set ACL from mode for %s", path);
1316
1317        /* Get the security descriptor */
1318        pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1319
1320        /* Add three ACEs for owner, group, everyone getting rid of
1321           other ACEs as chmod disables ACEs and set the security descriptor */
1322
1323        if (IS_ERR(pntsd)) {
1324                rc = PTR_ERR(pntsd);
1325                cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1326        } else {
1327                /* allocate memory for the smb header,
1328                   set security descriptor request security descriptor
1329                   parameters, and secuirty descriptor itself */
1330
1331                secdesclen = secdesclen < DEFSECDESCLEN ?
1332                                        DEFSECDESCLEN : secdesclen;
1333                pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1334                if (!pnntsd) {
1335                        cERROR(1, "Unable to allocate security descriptor");
1336                        kfree(pntsd);
1337                        return -ENOMEM;
1338                }
1339
1340                rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1341                                        &aclflag);
1342
1343                cFYI(DBG2, "build_sec_desc rc: %d", rc);
1344
1345                if (!rc) {
1346                        /* Set the security descriptor */
1347                        rc = set_cifs_acl(pnntsd, secdesclen, inode,
1348                                                path, aclflag);
1349                        cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1350                }
1351
1352                kfree(pnntsd);
1353                kfree(pntsd);
1354        }
1355
1356        return rc;
1357}
1358
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.