linux/kernel/audit_tree.c
<<
>>
Prefs
   1#include "audit.h"
   2#include <linux/fsnotify_backend.h>
   3#include <linux/namei.h>
   4#include <linux/mount.h>
   5#include <linux/kthread.h>
   6#include <linux/slab.h>
   7
   8struct audit_tree;
   9struct audit_chunk;
  10
  11struct audit_tree {
  12        atomic_t count;
  13        int goner;
  14        struct audit_chunk *root;
  15        struct list_head chunks;
  16        struct list_head rules;
  17        struct list_head list;
  18        struct list_head same_root;
  19        struct rcu_head head;
  20        char pathname[];
  21};
  22
  23struct audit_chunk {
  24        struct list_head hash;
  25        struct fsnotify_mark mark;
  26        struct list_head trees;         /* with root here */
  27        int dead;
  28        int count;
  29        atomic_long_t refs;
  30        struct rcu_head head;
  31        struct node {
  32                struct list_head list;
  33                struct audit_tree *owner;
  34                unsigned index;         /* index; upper bit indicates 'will prune' */
  35        } owners[];
  36};
  37
  38static LIST_HEAD(tree_list);
  39static LIST_HEAD(prune_list);
  40
  41/*
  42 * One struct chunk is attached to each inode of interest.
  43 * We replace struct chunk on tagging/untagging.
  44 * Rules have pointer to struct audit_tree.
  45 * Rules have struct list_head rlist forming a list of rules over
  46 * the same tree.
  47 * References to struct chunk are collected at audit_inode{,_child}()
  48 * time and used in AUDIT_TREE rule matching.
  49 * These references are dropped at the same time we are calling
  50 * audit_free_names(), etc.
  51 *
  52 * Cyclic lists galore:
  53 * tree.chunks anchors chunk.owners[].list                      hash_lock
  54 * tree.rules anchors rule.rlist                                audit_filter_mutex
  55 * chunk.trees anchors tree.same_root                           hash_lock
  56 * chunk.hash is a hash with middle bits of watch.inode as
  57 * a hash function.                                             RCU, hash_lock
  58 *
  59 * tree is refcounted; one reference for "some rules on rules_list refer to
  60 * it", one for each chunk with pointer to it.
  61 *
  62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  63 * of watch contributes 1 to .refs).
  64 *
  65 * node.index allows to get from node.list to containing chunk.
  66 * MSB of that sucker is stolen to mark taggings that we might have to
  67 * revert - several operations have very unpleasant cleanup logics and
  68 * that makes a difference.  Some.
  69 */
  70
  71static struct fsnotify_group *audit_tree_group;
  72
  73static struct audit_tree *alloc_tree(const char *s)
  74{
  75        struct audit_tree *tree;
  76
  77        tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  78        if (tree) {
  79                atomic_set(&tree->count, 1);
  80                tree->goner = 0;
  81                INIT_LIST_HEAD(&tree->chunks);
  82                INIT_LIST_HEAD(&tree->rules);
  83                INIT_LIST_HEAD(&tree->list);
  84                INIT_LIST_HEAD(&tree->same_root);
  85                tree->root = NULL;
  86                strcpy(tree->pathname, s);
  87        }
  88        return tree;
  89}
  90
  91static inline void get_tree(struct audit_tree *tree)
  92{
  93        atomic_inc(&tree->count);
  94}
  95
  96static inline void put_tree(struct audit_tree *tree)
  97{
  98        if (atomic_dec_and_test(&tree->count))
  99                kfree_rcu(tree, head);
 100}
 101
 102/* to avoid bringing the entire thing in audit.h */
 103const char *audit_tree_path(struct audit_tree *tree)
 104{
 105        return tree->pathname;
 106}
 107
 108static void free_chunk(struct audit_chunk *chunk)
 109{
 110        int i;
 111
 112        for (i = 0; i < chunk->count; i++) {
 113                if (chunk->owners[i].owner)
 114                        put_tree(chunk->owners[i].owner);
 115        }
 116        kfree(chunk);
 117}
 118
 119void audit_put_chunk(struct audit_chunk *chunk)
 120{
 121        if (atomic_long_dec_and_test(&chunk->refs))
 122                free_chunk(chunk);
 123}
 124
 125static void __put_chunk(struct rcu_head *rcu)
 126{
 127        struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 128        audit_put_chunk(chunk);
 129}
 130
 131static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 132{
 133        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 134        call_rcu(&chunk->head, __put_chunk);
 135}
 136
 137static struct audit_chunk *alloc_chunk(int count)
 138{
 139        struct audit_chunk *chunk;
 140        size_t size;
 141        int i;
 142
 143        size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 144        chunk = kzalloc(size, GFP_KERNEL);
 145        if (!chunk)
 146                return NULL;
 147
 148        INIT_LIST_HEAD(&chunk->hash);
 149        INIT_LIST_HEAD(&chunk->trees);
 150        chunk->count = count;
 151        atomic_long_set(&chunk->refs, 1);
 152        for (i = 0; i < count; i++) {
 153                INIT_LIST_HEAD(&chunk->owners[i].list);
 154                chunk->owners[i].index = i;
 155        }
 156        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
 157        return chunk;
 158}
 159
 160enum {HASH_SIZE = 128};
 161static struct list_head chunk_hash_heads[HASH_SIZE];
 162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 163
 164static inline struct list_head *chunk_hash(const struct inode *inode)
 165{
 166        unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
 167        return chunk_hash_heads + n % HASH_SIZE;
 168}
 169
 170/* hash_lock & entry->lock is held by caller */
 171static void insert_hash(struct audit_chunk *chunk)
 172{
 173        struct fsnotify_mark *entry = &chunk->mark;
 174        struct list_head *list;
 175
 176        if (!entry->i.inode)
 177                return;
 178        list = chunk_hash(entry->i.inode);
 179        list_add_rcu(&chunk->hash, list);
 180}
 181
 182/* called under rcu_read_lock */
 183struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 184{
 185        struct list_head *list = chunk_hash(inode);
 186        struct audit_chunk *p;
 187
 188        list_for_each_entry_rcu(p, list, hash) {
 189                /* mark.inode may have gone NULL, but who cares? */
 190                if (p->mark.i.inode == inode) {
 191                        atomic_long_inc(&p->refs);
 192                        return p;
 193                }
 194        }
 195        return NULL;
 196}
 197
 198int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 199{
 200        int n;
 201        for (n = 0; n < chunk->count; n++)
 202                if (chunk->owners[n].owner == tree)
 203                        return 1;
 204        return 0;
 205}
 206
 207/* tagging and untagging inodes with trees */
 208
 209static struct audit_chunk *find_chunk(struct node *p)
 210{
 211        int index = p->index & ~(1U<<31);
 212        p -= index;
 213        return container_of(p, struct audit_chunk, owners[0]);
 214}
 215
 216static void untag_chunk(struct node *p)
 217{
 218        struct audit_chunk *chunk = find_chunk(p);
 219        struct fsnotify_mark *entry = &chunk->mark;
 220        struct audit_chunk *new = NULL;
 221        struct audit_tree *owner;
 222        int size = chunk->count - 1;
 223        int i, j;
 224
 225        fsnotify_get_mark(entry);
 226
 227        spin_unlock(&hash_lock);
 228
 229        if (size)
 230                new = alloc_chunk(size);
 231
 232        spin_lock(&entry->lock);
 233        if (chunk->dead || !entry->i.inode) {
 234                spin_unlock(&entry->lock);
 235                if (new)
 236                        free_chunk(new);
 237                goto out;
 238        }
 239
 240        owner = p->owner;
 241
 242        if (!size) {
 243                chunk->dead = 1;
 244                spin_lock(&hash_lock);
 245                list_del_init(&chunk->trees);
 246                if (owner->root == chunk)
 247                        owner->root = NULL;
 248                list_del_init(&p->list);
 249                list_del_rcu(&chunk->hash);
 250                spin_unlock(&hash_lock);
 251                spin_unlock(&entry->lock);
 252                fsnotify_destroy_mark(entry);
 253                goto out;
 254        }
 255
 256        if (!new)
 257                goto Fallback;
 258
 259        fsnotify_duplicate_mark(&new->mark, entry);
 260        if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
 261                fsnotify_put_mark(&new->mark);
 262                goto Fallback;
 263        }
 264
 265        chunk->dead = 1;
 266        spin_lock(&hash_lock);
 267        list_replace_init(&chunk->trees, &new->trees);
 268        if (owner->root == chunk) {
 269                list_del_init(&owner->same_root);
 270                owner->root = NULL;
 271        }
 272
 273        for (i = j = 0; j <= size; i++, j++) {
 274                struct audit_tree *s;
 275                if (&chunk->owners[j] == p) {
 276                        list_del_init(&p->list);
 277                        i--;
 278                        continue;
 279                }
 280                s = chunk->owners[j].owner;
 281                new->owners[i].owner = s;
 282                new->owners[i].index = chunk->owners[j].index - j + i;
 283                if (!s) /* result of earlier fallback */
 284                        continue;
 285                get_tree(s);
 286                list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 287        }
 288
 289        list_replace_rcu(&chunk->hash, &new->hash);
 290        list_for_each_entry(owner, &new->trees, same_root)
 291                owner->root = new;
 292        spin_unlock(&hash_lock);
 293        spin_unlock(&entry->lock);
 294        fsnotify_destroy_mark(entry);
 295        goto out;
 296
 297Fallback:
 298        // do the best we can
 299        spin_lock(&hash_lock);
 300        if (owner->root == chunk) {
 301                list_del_init(&owner->same_root);
 302                owner->root = NULL;
 303        }
 304        list_del_init(&p->list);
 305        p->owner = NULL;
 306        put_tree(owner);
 307        spin_unlock(&hash_lock);
 308        spin_unlock(&entry->lock);
 309out:
 310        fsnotify_put_mark(entry);
 311        spin_lock(&hash_lock);
 312}
 313
 314static int create_chunk(struct inode *inode, struct audit_tree *tree)
 315{
 316        struct fsnotify_mark *entry;
 317        struct audit_chunk *chunk = alloc_chunk(1);
 318        if (!chunk)
 319                return -ENOMEM;
 320
 321        entry = &chunk->mark;
 322        if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
 323                fsnotify_put_mark(entry);
 324                return -ENOSPC;
 325        }
 326
 327        spin_lock(&entry->lock);
 328        spin_lock(&hash_lock);
 329        if (tree->goner) {
 330                spin_unlock(&hash_lock);
 331                chunk->dead = 1;
 332                spin_unlock(&entry->lock);
 333                fsnotify_get_mark(entry);
 334                fsnotify_destroy_mark(entry);
 335                fsnotify_put_mark(entry);
 336                return 0;
 337        }
 338        chunk->owners[0].index = (1U << 31);
 339        chunk->owners[0].owner = tree;
 340        get_tree(tree);
 341        list_add(&chunk->owners[0].list, &tree->chunks);
 342        if (!tree->root) {
 343                tree->root = chunk;
 344                list_add(&tree->same_root, &chunk->trees);
 345        }
 346        insert_hash(chunk);
 347        spin_unlock(&hash_lock);
 348        spin_unlock(&entry->lock);
 349        return 0;
 350}
 351
 352/* the first tagged inode becomes root of tree */
 353static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 354{
 355        struct fsnotify_mark *old_entry, *chunk_entry;
 356        struct audit_tree *owner;
 357        struct audit_chunk *chunk, *old;
 358        struct node *p;
 359        int n;
 360
 361        old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
 362        if (!old_entry)
 363                return create_chunk(inode, tree);
 364
 365        old = container_of(old_entry, struct audit_chunk, mark);
 366
 367        /* are we already there? */
 368        spin_lock(&hash_lock);
 369        for (n = 0; n < old->count; n++) {
 370                if (old->owners[n].owner == tree) {
 371                        spin_unlock(&hash_lock);
 372                        fsnotify_put_mark(old_entry);
 373                        return 0;
 374                }
 375        }
 376        spin_unlock(&hash_lock);
 377
 378        chunk = alloc_chunk(old->count + 1);
 379        if (!chunk) {
 380                fsnotify_put_mark(old_entry);
 381                return -ENOMEM;
 382        }
 383
 384        chunk_entry = &chunk->mark;
 385
 386        spin_lock(&old_entry->lock);
 387        if (!old_entry->i.inode) {
 388                /* old_entry is being shot, lets just lie */
 389                spin_unlock(&old_entry->lock);
 390                fsnotify_put_mark(old_entry);
 391                free_chunk(chunk);
 392                return -ENOENT;
 393        }
 394
 395        fsnotify_duplicate_mark(chunk_entry, old_entry);
 396        if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
 397                spin_unlock(&old_entry->lock);
 398                fsnotify_put_mark(chunk_entry);
 399                fsnotify_put_mark(old_entry);
 400                return -ENOSPC;
 401        }
 402
 403        /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
 404        spin_lock(&chunk_entry->lock);
 405        spin_lock(&hash_lock);
 406
 407        /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
 408        if (tree->goner) {
 409                spin_unlock(&hash_lock);
 410                chunk->dead = 1;
 411                spin_unlock(&chunk_entry->lock);
 412                spin_unlock(&old_entry->lock);
 413
 414                fsnotify_get_mark(chunk_entry);
 415                fsnotify_destroy_mark(chunk_entry);
 416
 417                fsnotify_put_mark(chunk_entry);
 418                fsnotify_put_mark(old_entry);
 419                return 0;
 420        }
 421        list_replace_init(&old->trees, &chunk->trees);
 422        for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
 423                struct audit_tree *s = old->owners[n].owner;
 424                p->owner = s;
 425                p->index = old->owners[n].index;
 426                if (!s) /* result of fallback in untag */
 427                        continue;
 428                get_tree(s);
 429                list_replace_init(&old->owners[n].list, &p->list);
 430        }
 431        p->index = (chunk->count - 1) | (1U<<31);
 432        p->owner = tree;
 433        get_tree(tree);
 434        list_add(&p->list, &tree->chunks);
 435        list_replace_rcu(&old->hash, &chunk->hash);
 436        list_for_each_entry(owner, &chunk->trees, same_root)
 437                owner->root = chunk;
 438        old->dead = 1;
 439        if (!tree->root) {
 440                tree->root = chunk;
 441                list_add(&tree->same_root, &chunk->trees);
 442        }
 443        spin_unlock(&hash_lock);
 444        spin_unlock(&chunk_entry->lock);
 445        spin_unlock(&old_entry->lock);
 446        fsnotify_destroy_mark(old_entry);
 447        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
 448        return 0;
 449}
 450
 451static void kill_rules(struct audit_tree *tree)
 452{
 453        struct audit_krule *rule, *next;
 454        struct audit_entry *entry;
 455        struct audit_buffer *ab;
 456
 457        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 458                entry = container_of(rule, struct audit_entry, rule);
 459
 460                list_del_init(&rule->rlist);
 461                if (rule->tree) {
 462                        /* not a half-baked one */
 463                        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 464                        audit_log_format(ab, "op=");
 465                        audit_log_string(ab, "remove rule");
 466                        audit_log_format(ab, " dir=");
 467                        audit_log_untrustedstring(ab, rule->tree->pathname);
 468                        audit_log_key(ab, rule->filterkey);
 469                        audit_log_format(ab, " list=%d res=1", rule->listnr);
 470                        audit_log_end(ab);
 471                        rule->tree = NULL;
 472                        list_del_rcu(&entry->list);
 473                        list_del(&entry->rule.list);
 474                        call_rcu(&entry->rcu, audit_free_rule_rcu);
 475                }
 476        }
 477}
 478
 479/*
 480 * finish killing struct audit_tree
 481 */
 482static void prune_one(struct audit_tree *victim)
 483{
 484        spin_lock(&hash_lock);
 485        while (!list_empty(&victim->chunks)) {
 486                struct node *p;
 487
 488                p = list_entry(victim->chunks.next, struct node, list);
 489
 490                untag_chunk(p);
 491        }
 492        spin_unlock(&hash_lock);
 493        put_tree(victim);
 494}
 495
 496/* trim the uncommitted chunks from tree */
 497
 498static void trim_marked(struct audit_tree *tree)
 499{
 500        struct list_head *p, *q;
 501        spin_lock(&hash_lock);
 502        if (tree->goner) {
 503                spin_unlock(&hash_lock);
 504                return;
 505        }
 506        /* reorder */
 507        for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 508                struct node *node = list_entry(p, struct node, list);
 509                q = p->next;
 510                if (node->index & (1U<<31)) {
 511                        list_del_init(p);
 512                        list_add(p, &tree->chunks);
 513                }
 514        }
 515
 516        while (!list_empty(&tree->chunks)) {
 517                struct node *node;
 518
 519                node = list_entry(tree->chunks.next, struct node, list);
 520
 521                /* have we run out of marked? */
 522                if (!(node->index & (1U<<31)))
 523                        break;
 524
 525                untag_chunk(node);
 526        }
 527        if (!tree->root && !tree->goner) {
 528                tree->goner = 1;
 529                spin_unlock(&hash_lock);
 530                mutex_lock(&audit_filter_mutex);
 531                kill_rules(tree);
 532                list_del_init(&tree->list);
 533                mutex_unlock(&audit_filter_mutex);
 534                prune_one(tree);
 535        } else {
 536                spin_unlock(&hash_lock);
 537        }
 538}
 539
 540static void audit_schedule_prune(void);
 541
 542/* called with audit_filter_mutex */
 543int audit_remove_tree_rule(struct audit_krule *rule)
 544{
 545        struct audit_tree *tree;
 546        tree = rule->tree;
 547        if (tree) {
 548                spin_lock(&hash_lock);
 549                list_del_init(&rule->rlist);
 550                if (list_empty(&tree->rules) && !tree->goner) {
 551                        tree->root = NULL;
 552                        list_del_init(&tree->same_root);
 553                        tree->goner = 1;
 554                        list_move(&tree->list, &prune_list);
 555                        rule->tree = NULL;
 556                        spin_unlock(&hash_lock);
 557                        audit_schedule_prune();
 558                        return 1;
 559                }
 560                rule->tree = NULL;
 561                spin_unlock(&hash_lock);
 562                return 1;
 563        }
 564        return 0;
 565}
 566
 567static int compare_root(struct vfsmount *mnt, void *arg)
 568{
 569        return mnt->mnt_root->d_inode == arg;
 570}
 571
 572void audit_trim_trees(void)
 573{
 574        struct list_head cursor;
 575
 576        mutex_lock(&audit_filter_mutex);
 577        list_add(&cursor, &tree_list);
 578        while (cursor.next != &tree_list) {
 579                struct audit_tree *tree;
 580                struct path path;
 581                struct vfsmount *root_mnt;
 582                struct node *node;
 583                int err;
 584
 585                tree = container_of(cursor.next, struct audit_tree, list);
 586                get_tree(tree);
 587                list_del(&cursor);
 588                list_add(&cursor, &tree->list);
 589                mutex_unlock(&audit_filter_mutex);
 590
 591                err = kern_path(tree->pathname, 0, &path);
 592                if (err)
 593                        goto skip_it;
 594
 595                root_mnt = collect_mounts(&path);
 596                path_put(&path);
 597                if (!root_mnt)
 598                        goto skip_it;
 599
 600                spin_lock(&hash_lock);
 601                list_for_each_entry(node, &tree->chunks, list) {
 602                        struct audit_chunk *chunk = find_chunk(node);
 603                        /* this could be NULL if the watch is dying else where... */
 604                        struct inode *inode = chunk->mark.i.inode;
 605                        node->index |= 1U<<31;
 606                        if (iterate_mounts(compare_root, inode, root_mnt))
 607                                node->index &= ~(1U<<31);
 608                }
 609                spin_unlock(&hash_lock);
 610                trim_marked(tree);
 611                put_tree(tree);
 612                drop_collected_mounts(root_mnt);
 613skip_it:
 614                mutex_lock(&audit_filter_mutex);
 615        }
 616        list_del(&cursor);
 617        mutex_unlock(&audit_filter_mutex);
 618}
 619
 620int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 621{
 622
 623        if (pathname[0] != '/' ||
 624            rule->listnr != AUDIT_FILTER_EXIT ||
 625            op != Audit_equal ||
 626            rule->inode_f || rule->watch || rule->tree)
 627                return -EINVAL;
 628        rule->tree = alloc_tree(pathname);
 629        if (!rule->tree)
 630                return -ENOMEM;
 631        return 0;
 632}
 633
 634void audit_put_tree(struct audit_tree *tree)
 635{
 636        put_tree(tree);
 637}
 638
 639static int tag_mount(struct vfsmount *mnt, void *arg)
 640{
 641        return tag_chunk(mnt->mnt_root->d_inode, arg);
 642}
 643
 644/* called with audit_filter_mutex */
 645int audit_add_tree_rule(struct audit_krule *rule)
 646{
 647        struct audit_tree *seed = rule->tree, *tree;
 648        struct path path;
 649        struct vfsmount *mnt;
 650        int err;
 651
 652        list_for_each_entry(tree, &tree_list, list) {
 653                if (!strcmp(seed->pathname, tree->pathname)) {
 654                        put_tree(seed);
 655                        rule->tree = tree;
 656                        list_add(&rule->rlist, &tree->rules);
 657                        return 0;
 658                }
 659        }
 660        tree = seed;
 661        list_add(&tree->list, &tree_list);
 662        list_add(&rule->rlist, &tree->rules);
 663        /* do not set rule->tree yet */
 664        mutex_unlock(&audit_filter_mutex);
 665
 666        err = kern_path(tree->pathname, 0, &path);
 667        if (err)
 668                goto Err;
 669        mnt = collect_mounts(&path);
 670        path_put(&path);
 671        if (!mnt) {
 672                err = -ENOMEM;
 673                goto Err;
 674        }
 675
 676        get_tree(tree);
 677        err = iterate_mounts(tag_mount, tree, mnt);
 678        drop_collected_mounts(mnt);
 679
 680        if (!err) {
 681                struct node *node;
 682                spin_lock(&hash_lock);
 683                list_for_each_entry(node, &tree->chunks, list)
 684                        node->index &= ~(1U<<31);
 685                spin_unlock(&hash_lock);
 686        } else {
 687                trim_marked(tree);
 688                goto Err;
 689        }
 690
 691        mutex_lock(&audit_filter_mutex);
 692        if (list_empty(&rule->rlist)) {
 693                put_tree(tree);
 694                return -ENOENT;
 695        }
 696        rule->tree = tree;
 697        put_tree(tree);
 698
 699        return 0;
 700Err:
 701        mutex_lock(&audit_filter_mutex);
 702        list_del_init(&tree->list);
 703        list_del_init(&tree->rules);
 704        put_tree(tree);
 705        return err;
 706}
 707
 708int audit_tag_tree(char *old, char *new)
 709{
 710        struct list_head cursor, barrier;
 711        int failed = 0;
 712        struct path path1, path2;
 713        struct vfsmount *tagged;
 714        int err;
 715
 716        err = kern_path(new, 0, &path2);
 717        if (err)
 718                return err;
 719        tagged = collect_mounts(&path2);
 720        path_put(&path2);
 721        if (!tagged)
 722                return -ENOMEM;
 723
 724        err = kern_path(old, 0, &path1);
 725        if (err) {
 726                drop_collected_mounts(tagged);
 727                return err;
 728        }
 729
 730        mutex_lock(&audit_filter_mutex);
 731        list_add(&barrier, &tree_list);
 732        list_add(&cursor, &barrier);
 733
 734        while (cursor.next != &tree_list) {
 735                struct audit_tree *tree;
 736                int good_one = 0;
 737
 738                tree = container_of(cursor.next, struct audit_tree, list);
 739                get_tree(tree);
 740                list_del(&cursor);
 741                list_add(&cursor, &tree->list);
 742                mutex_unlock(&audit_filter_mutex);
 743
 744                err = kern_path(tree->pathname, 0, &path2);
 745                if (!err) {
 746                        good_one = path_is_under(&path1, &path2);
 747                        path_put(&path2);
 748                }
 749
 750                if (!good_one) {
 751                        put_tree(tree);
 752                        mutex_lock(&audit_filter_mutex);
 753                        continue;
 754                }
 755
 756                failed = iterate_mounts(tag_mount, tree, tagged);
 757                if (failed) {
 758                        put_tree(tree);
 759                        mutex_lock(&audit_filter_mutex);
 760                        break;
 761                }
 762
 763                mutex_lock(&audit_filter_mutex);
 764                spin_lock(&hash_lock);
 765                if (!tree->goner) {
 766                        list_del(&tree->list);
 767                        list_add(&tree->list, &tree_list);
 768                }
 769                spin_unlock(&hash_lock);
 770                put_tree(tree);
 771        }
 772
 773        while (barrier.prev != &tree_list) {
 774                struct audit_tree *tree;
 775
 776                tree = container_of(barrier.prev, struct audit_tree, list);
 777                get_tree(tree);
 778                list_del(&tree->list);
 779                list_add(&tree->list, &barrier);
 780                mutex_unlock(&audit_filter_mutex);
 781
 782                if (!failed) {
 783                        struct node *node;
 784                        spin_lock(&hash_lock);
 785                        list_for_each_entry(node, &tree->chunks, list)
 786                                node->index &= ~(1U<<31);
 787                        spin_unlock(&hash_lock);
 788                } else {
 789                        trim_marked(tree);
 790                }
 791
 792                put_tree(tree);
 793                mutex_lock(&audit_filter_mutex);
 794        }
 795        list_del(&barrier);
 796        list_del(&cursor);
 797        mutex_unlock(&audit_filter_mutex);
 798        path_put(&path1);
 799        drop_collected_mounts(tagged);
 800        return failed;
 801}
 802
 803/*
 804 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 805 * Runs from a separate thread.
 806 */
 807static int prune_tree_thread(void *unused)
 808{
 809        mutex_lock(&audit_cmd_mutex);
 810        mutex_lock(&audit_filter_mutex);
 811
 812        while (!list_empty(&prune_list)) {
 813                struct audit_tree *victim;
 814
 815                victim = list_entry(prune_list.next, struct audit_tree, list);
 816                list_del_init(&victim->list);
 817
 818                mutex_unlock(&audit_filter_mutex);
 819
 820                prune_one(victim);
 821
 822                mutex_lock(&audit_filter_mutex);
 823        }
 824
 825        mutex_unlock(&audit_filter_mutex);
 826        mutex_unlock(&audit_cmd_mutex);
 827        return 0;
 828}
 829
 830static void audit_schedule_prune(void)
 831{
 832        kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
 833}
 834
 835/*
 836 * ... and that one is done if evict_chunk() decides to delay until the end
 837 * of syscall.  Runs synchronously.
 838 */
 839void audit_kill_trees(struct list_head *list)
 840{
 841        mutex_lock(&audit_cmd_mutex);
 842        mutex_lock(&audit_filter_mutex);
 843
 844        while (!list_empty(list)) {
 845                struct audit_tree *victim;
 846
 847                victim = list_entry(list->next, struct audit_tree, list);
 848                kill_rules(victim);
 849                list_del_init(&victim->list);
 850
 851                mutex_unlock(&audit_filter_mutex);
 852
 853                prune_one(victim);
 854
 855                mutex_lock(&audit_filter_mutex);
 856        }
 857
 858        mutex_unlock(&audit_filter_mutex);
 859        mutex_unlock(&audit_cmd_mutex);
 860}
 861
 862/*
 863 *  Here comes the stuff asynchronous to auditctl operations
 864 */
 865
 866static void evict_chunk(struct audit_chunk *chunk)
 867{
 868        struct audit_tree *owner;
 869        struct list_head *postponed = audit_killed_trees();
 870        int need_prune = 0;
 871        int n;
 872
 873        if (chunk->dead)
 874                return;
 875
 876        chunk->dead = 1;
 877        mutex_lock(&audit_filter_mutex);
 878        spin_lock(&hash_lock);
 879        while (!list_empty(&chunk->trees)) {
 880                owner = list_entry(chunk->trees.next,
 881                                   struct audit_tree, same_root);
 882                owner->goner = 1;
 883                owner->root = NULL;
 884                list_del_init(&owner->same_root);
 885                spin_unlock(&hash_lock);
 886                if (!postponed) {
 887                        kill_rules(owner);
 888                        list_move(&owner->list, &prune_list);
 889                        need_prune = 1;
 890                } else {
 891                        list_move(&owner->list, postponed);
 892                }
 893                spin_lock(&hash_lock);
 894        }
 895        list_del_rcu(&chunk->hash);
 896        for (n = 0; n < chunk->count; n++)
 897                list_del_init(&chunk->owners[n].list);
 898        spin_unlock(&hash_lock);
 899        if (need_prune)
 900                audit_schedule_prune();
 901        mutex_unlock(&audit_filter_mutex);
 902}
 903
 904static int audit_tree_handle_event(struct fsnotify_group *group,
 905                                   struct fsnotify_mark *inode_mark,
 906                                   struct fsnotify_mark *vfsmonut_mark,
 907                                   struct fsnotify_event *event)
 908{
 909        BUG();
 910        return -EOPNOTSUPP;
 911}
 912
 913static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
 914{
 915        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 916
 917        evict_chunk(chunk);
 918        fsnotify_put_mark(entry);
 919}
 920
 921static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
 922                                  struct fsnotify_mark *inode_mark,
 923                                  struct fsnotify_mark *vfsmount_mark,
 924                                  __u32 mask, void *data, int data_type)
 925{
 926        return false;
 927}
 928
 929static const struct fsnotify_ops audit_tree_ops = {
 930        .handle_event = audit_tree_handle_event,
 931        .should_send_event = audit_tree_send_event,
 932        .free_group_priv = NULL,
 933        .free_event_priv = NULL,
 934        .freeing_mark = audit_tree_freeing_mark,
 935};
 936
 937static int __init audit_tree_init(void)
 938{
 939        int i;
 940
 941        audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
 942        if (IS_ERR(audit_tree_group))
 943                audit_panic("cannot initialize fsnotify group for rectree watches");
 944
 945        for (i = 0; i < HASH_SIZE; i++)
 946                INIT_LIST_HEAD(&chunk_hash_heads[i]);
 947
 948        return 0;
 949}
 950__initcall(audit_tree_init);
 951
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.