linux/fs/dcache.c
<<
>>
Prefs
   1/*
   2 * fs/dcache.c
   3 *
   4 * Complete reimplementation
   5 * (C) 1997 Thomas Schoebel-Theuer,
   6 * with heavy changes by Linus Torvalds
   7 */
   8
   9/*
  10 * Notes on the allocation strategy:
  11 *
  12 * The dcache is a master of the icache - whenever a dcache entry
  13 * exists, the inode will always exist. "iput()" is done either when
  14 * the dcache entry is deleted or garbage collected.
  15 */
  16
  17#include <linux/syscalls.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/fs.h>
  21#include <linux/fsnotify.h>
  22#include <linux/slab.h>
  23#include <linux/init.h>
  24#include <linux/hash.h>
  25#include <linux/cache.h>
  26#include <linux/export.h>
  27#include <linux/mount.h>
  28#include <linux/file.h>
  29#include <asm/uaccess.h>
  30#include <linux/security.h>
  31#include <linux/seqlock.h>
  32#include <linux/swap.h>
  33#include <linux/bootmem.h>
  34#include <linux/fs_struct.h>
  35#include <linux/hardirq.h>
  36#include <linux/bit_spinlock.h>
  37#include <linux/rculist_bl.h>
  38#include <linux/prefetch.h>
  39#include <linux/ratelimit.h>
  40#include <linux/list_lru.h>
  41#include "internal.h"
  42#include "mount.h"
  43
  44/*
  45 * Usage:
  46 * dcache->d_inode->i_lock protects:
  47 *   - i_dentry, d_alias, d_inode of aliases
  48 * dcache_hash_bucket lock protects:
  49 *   - the dcache hash table
  50 * s_anon bl list spinlock protects:
  51 *   - the s_anon list (see __d_drop)
  52 * dentry->d_sb->s_dentry_lru_lock protects:
  53 *   - the dcache lru lists and counters
  54 * d_lock protects:
  55 *   - d_flags
  56 *   - d_name
  57 *   - d_lru
  58 *   - d_count
  59 *   - d_unhashed()
  60 *   - d_parent and d_subdirs
  61 *   - childrens' d_child and d_parent
  62 *   - d_alias, d_inode
  63 *
  64 * Ordering:
  65 * dentry->d_inode->i_lock
  66 *   dentry->d_lock
  67 *     dentry->d_sb->s_dentry_lru_lock
  68 *     dcache_hash_bucket lock
  69 *     s_anon lock
  70 *
  71 * If there is an ancestor relationship:
  72 * dentry->d_parent->...->d_parent->d_lock
  73 *   ...
  74 *     dentry->d_parent->d_lock
  75 *       dentry->d_lock
  76 *
  77 * If no ancestor relationship:
  78 * if (dentry1 < dentry2)
  79 *   dentry1->d_lock
  80 *     dentry2->d_lock
  81 */
  82int sysctl_vfs_cache_pressure __read_mostly = 100;
  83EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  84
  85__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  86
  87EXPORT_SYMBOL(rename_lock);
  88
  89static struct kmem_cache *dentry_cache __read_mostly;
  90
  91/*
  92 * This is the single most critical data structure when it comes
  93 * to the dcache: the hashtable for lookups. Somebody should try
  94 * to make this good - I've just made it work.
  95 *
  96 * This hash-function tries to avoid losing too many bits of hash
  97 * information, yet avoid using a prime hash-size or similar.
  98 */
  99
 100static unsigned int d_hash_mask __read_mostly;
 101static unsigned int d_hash_shift __read_mostly;
 102
 103static struct hlist_bl_head *dentry_hashtable __read_mostly;
 104
 105static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
 106                                        unsigned int hash)
 107{
 108        hash += (unsigned long) parent / L1_CACHE_BYTES;
 109        hash = hash + (hash >> d_hash_shift);
 110        return dentry_hashtable + (hash & d_hash_mask);
 111}
 112
 113/* Statistics gathering. */
 114struct dentry_stat_t dentry_stat = {
 115        .age_limit = 45,
 116};
 117
 118static DEFINE_PER_CPU(long, nr_dentry);
 119static DEFINE_PER_CPU(long, nr_dentry_unused);
 120
 121#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 122
 123/*
 124 * Here we resort to our own counters instead of using generic per-cpu counters
 125 * for consistency with what the vfs inode code does. We are expected to harvest
 126 * better code and performance by having our own specialized counters.
 127 *
 128 * Please note that the loop is done over all possible CPUs, not over all online
 129 * CPUs. The reason for this is that we don't want to play games with CPUs going
 130 * on and off. If one of them goes off, we will just keep their counters.
 131 *
 132 * glommer: See cffbc8a for details, and if you ever intend to change this,
 133 * please update all vfs counters to match.
 134 */
 135static long get_nr_dentry(void)
 136{
 137        int i;
 138        long sum = 0;
 139        for_each_possible_cpu(i)
 140                sum += per_cpu(nr_dentry, i);
 141        return sum < 0 ? 0 : sum;
 142}
 143
 144static long get_nr_dentry_unused(void)
 145{
 146        int i;
 147        long sum = 0;
 148        for_each_possible_cpu(i)
 149                sum += per_cpu(nr_dentry_unused, i);
 150        return sum < 0 ? 0 : sum;
 151}
 152
 153int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
 154                   size_t *lenp, loff_t *ppos)
 155{
 156        dentry_stat.nr_dentry = get_nr_dentry();
 157        dentry_stat.nr_unused = get_nr_dentry_unused();
 158        return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 159}
 160#endif
 161
 162/*
 163 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 164 * The strings are both count bytes long, and count is non-zero.
 165 */
 166#ifdef CONFIG_DCACHE_WORD_ACCESS
 167
 168#include <asm/word-at-a-time.h>
 169/*
 170 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 171 * aligned allocation for this particular component. We don't
 172 * strictly need the load_unaligned_zeropad() safety, but it
 173 * doesn't hurt either.
 174 *
 175 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 176 * need the careful unaligned handling.
 177 */
 178static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 179{
 180        unsigned long a,b,mask;
 181
 182        for (;;) {
 183                a = *(unsigned long *)cs;
 184                b = load_unaligned_zeropad(ct);
 185                if (tcount < sizeof(unsigned long))
 186                        break;
 187                if (unlikely(a != b))
 188                        return 1;
 189                cs += sizeof(unsigned long);
 190                ct += sizeof(unsigned long);
 191                tcount -= sizeof(unsigned long);
 192                if (!tcount)
 193                        return 0;
 194        }
 195        mask = bytemask_from_count(tcount);
 196        return unlikely(!!((a ^ b) & mask));
 197}
 198
 199#else
 200
 201static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 202{
 203        do {
 204                if (*cs != *ct)
 205                        return 1;
 206                cs++;
 207                ct++;
 208                tcount--;
 209        } while (tcount);
 210        return 0;
 211}
 212
 213#endif
 214
 215static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 216{
 217        const unsigned char *cs;
 218        /*
 219         * Be careful about RCU walk racing with rename:
 220         * use ACCESS_ONCE to fetch the name pointer.
 221         *
 222         * NOTE! Even if a rename will mean that the length
 223         * was not loaded atomically, we don't care. The
 224         * RCU walk will check the sequence count eventually,
 225         * and catch it. And we won't overrun the buffer,
 226         * because we're reading the name pointer atomically,
 227         * and a dentry name is guaranteed to be properly
 228         * terminated with a NUL byte.
 229         *
 230         * End result: even if 'len' is wrong, we'll exit
 231         * early because the data cannot match (there can
 232         * be no NUL in the ct/tcount data)
 233         */
 234        cs = ACCESS_ONCE(dentry->d_name.name);
 235        smp_read_barrier_depends();
 236        return dentry_string_cmp(cs, ct, tcount);
 237}
 238
 239static void __d_free(struct rcu_head *head)
 240{
 241        struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 242
 243        WARN_ON(!hlist_unhashed(&dentry->d_alias));
 244        if (dname_external(dentry))
 245                kfree(dentry->d_name.name);
 246        kmem_cache_free(dentry_cache, dentry); 
 247}
 248
 249/*
 250 * no locks, please.
 251 */
 252static void d_free(struct dentry *dentry)
 253{
 254        BUG_ON((int)dentry->d_lockref.count > 0);
 255        this_cpu_dec(nr_dentry);
 256        if (dentry->d_op && dentry->d_op->d_release)
 257                dentry->d_op->d_release(dentry);
 258
 259        /* if dentry was never visible to RCU, immediate free is OK */
 260        if (!(dentry->d_flags & DCACHE_RCUACCESS))
 261                __d_free(&dentry->d_u.d_rcu);
 262        else
 263                call_rcu(&dentry->d_u.d_rcu, __d_free);
 264}
 265
 266/**
 267 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
 268 * @dentry: the target dentry
 269 * After this call, in-progress rcu-walk path lookup will fail. This
 270 * should be called after unhashing, and after changing d_inode (if
 271 * the dentry has not already been unhashed).
 272 */
 273static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
 274{
 275        assert_spin_locked(&dentry->d_lock);
 276        /* Go through a barrier */
 277        write_seqcount_barrier(&dentry->d_seq);
 278}
 279
 280/*
 281 * Release the dentry's inode, using the filesystem
 282 * d_iput() operation if defined. Dentry has no refcount
 283 * and is unhashed.
 284 */
 285static void dentry_iput(struct dentry * dentry)
 286        __releases(dentry->d_lock)
 287        __releases(dentry->d_inode->i_lock)
 288{
 289        struct inode *inode = dentry->d_inode;
 290        if (inode) {
 291                dentry->d_inode = NULL;
 292                hlist_del_init(&dentry->d_alias);
 293                spin_unlock(&dentry->d_lock);
 294                spin_unlock(&inode->i_lock);
 295                if (!inode->i_nlink)
 296                        fsnotify_inoderemove(inode);
 297                if (dentry->d_op && dentry->d_op->d_iput)
 298                        dentry->d_op->d_iput(dentry, inode);
 299                else
 300                        iput(inode);
 301        } else {
 302                spin_unlock(&dentry->d_lock);
 303        }
 304}
 305
 306/*
 307 * Release the dentry's inode, using the filesystem
 308 * d_iput() operation if defined. dentry remains in-use.
 309 */
 310static void dentry_unlink_inode(struct dentry * dentry)
 311        __releases(dentry->d_lock)
 312        __releases(dentry->d_inode->i_lock)
 313{
 314        struct inode *inode = dentry->d_inode;
 315        __d_clear_type(dentry);
 316        dentry->d_inode = NULL;
 317        hlist_del_init(&dentry->d_alias);
 318        dentry_rcuwalk_barrier(dentry);
 319        spin_unlock(&dentry->d_lock);
 320        spin_unlock(&inode->i_lock);
 321        if (!inode->i_nlink)
 322                fsnotify_inoderemove(inode);
 323        if (dentry->d_op && dentry->d_op->d_iput)
 324                dentry->d_op->d_iput(dentry, inode);
 325        else
 326                iput(inode);
 327}
 328
 329/*
 330 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 331 * is in use - which includes both the "real" per-superblock
 332 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 333 *
 334 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 335 * on the shrink list (ie not on the superblock LRU list).
 336 *
 337 * The per-cpu "nr_dentry_unused" counters are updated with
 338 * the DCACHE_LRU_LIST bit.
 339 *
 340 * These helper functions make sure we always follow the
 341 * rules. d_lock must be held by the caller.
 342 */
 343#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 344static void d_lru_add(struct dentry *dentry)
 345{
 346        D_FLAG_VERIFY(dentry, 0);
 347        dentry->d_flags |= DCACHE_LRU_LIST;
 348        this_cpu_inc(nr_dentry_unused);
 349        WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 350}
 351
 352static void d_lru_del(struct dentry *dentry)
 353{
 354        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 355        dentry->d_flags &= ~DCACHE_LRU_LIST;
 356        this_cpu_dec(nr_dentry_unused);
 357        WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 358}
 359
 360static void d_shrink_del(struct dentry *dentry)
 361{
 362        D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 363        list_del_init(&dentry->d_lru);
 364        dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 365        this_cpu_dec(nr_dentry_unused);
 366}
 367
 368static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 369{
 370        D_FLAG_VERIFY(dentry, 0);
 371        list_add(&dentry->d_lru, list);
 372        dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 373        this_cpu_inc(nr_dentry_unused);
 374}
 375
 376/*
 377 * These can only be called under the global LRU lock, ie during the
 378 * callback for freeing the LRU list. "isolate" removes it from the
 379 * LRU lists entirely, while shrink_move moves it to the indicated
 380 * private list.
 381 */
 382static void d_lru_isolate(struct dentry *dentry)
 383{
 384        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 385        dentry->d_flags &= ~DCACHE_LRU_LIST;
 386        this_cpu_dec(nr_dentry_unused);
 387        list_del_init(&dentry->d_lru);
 388}
 389
 390static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
 391{
 392        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 393        dentry->d_flags |= DCACHE_SHRINK_LIST;
 394        list_move_tail(&dentry->d_lru, list);
 395}
 396
 397/*
 398 * dentry_lru_(add|del)_list) must be called with d_lock held.
 399 */
 400static void dentry_lru_add(struct dentry *dentry)
 401{
 402        if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
 403                d_lru_add(dentry);
 404}
 405
 406/*
 407 * Remove a dentry with references from the LRU.
 408 *
 409 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
 410 * lose our last reference through the parent walk. In this case, we need to
 411 * remove ourselves from the shrink list, not the LRU.
 412 */
 413static void dentry_lru_del(struct dentry *dentry)
 414{
 415        if (dentry->d_flags & DCACHE_LRU_LIST) {
 416                if (dentry->d_flags & DCACHE_SHRINK_LIST)
 417                        return d_shrink_del(dentry);
 418                d_lru_del(dentry);
 419        }
 420}
 421
 422/**
 423 * d_kill - kill dentry and return parent
 424 * @dentry: dentry to kill
 425 * @parent: parent dentry
 426 *
 427 * The dentry must already be unhashed and removed from the LRU.
 428 *
 429 * If this is the root of the dentry tree, return NULL.
 430 *
 431 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
 432 * d_kill.
 433 */
 434static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
 435        __releases(dentry->d_lock)
 436        __releases(parent->d_lock)
 437        __releases(dentry->d_inode->i_lock)
 438{
 439        list_del(&dentry->d_u.d_child);
 440        /*
 441         * Inform d_walk() that we are no longer attached to the
 442         * dentry tree
 443         */
 444        dentry->d_flags |= DCACHE_DENTRY_KILLED;
 445        if (parent)
 446                spin_unlock(&parent->d_lock);
 447        dentry_iput(dentry);
 448        /*
 449         * dentry_iput drops the locks, at which point nobody (except
 450         * transient RCU lookups) can reach this dentry.
 451         */
 452        d_free(dentry);
 453        return parent;
 454}
 455
 456/**
 457 * d_drop - drop a dentry
 458 * @dentry: dentry to drop
 459 *
 460 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 461 * be found through a VFS lookup any more. Note that this is different from
 462 * deleting the dentry - d_delete will try to mark the dentry negative if
 463 * possible, giving a successful _negative_ lookup, while d_drop will
 464 * just make the cache lookup fail.
 465 *
 466 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 467 * reason (NFS timeouts or autofs deletes).
 468 *
 469 * __d_drop requires dentry->d_lock.
 470 */
 471void __d_drop(struct dentry *dentry)
 472{
 473        if (!d_unhashed(dentry)) {
 474                struct hlist_bl_head *b;
 475                /*
 476                 * Hashed dentries are normally on the dentry hashtable,
 477                 * with the exception of those newly allocated by
 478                 * d_obtain_alias, which are always IS_ROOT:
 479                 */
 480                if (unlikely(IS_ROOT(dentry)))
 481                        b = &dentry->d_sb->s_anon;
 482                else
 483                        b = d_hash(dentry->d_parent, dentry->d_name.hash);
 484
 485                hlist_bl_lock(b);
 486                __hlist_bl_del(&dentry->d_hash);
 487                dentry->d_hash.pprev = NULL;
 488                hlist_bl_unlock(b);
 489                dentry_rcuwalk_barrier(dentry);
 490        }
 491}
 492EXPORT_SYMBOL(__d_drop);
 493
 494void d_drop(struct dentry *dentry)
 495{
 496        spin_lock(&dentry->d_lock);
 497        __d_drop(dentry);
 498        spin_unlock(&dentry->d_lock);
 499}
 500EXPORT_SYMBOL(d_drop);
 501
 502/*
 503 * Finish off a dentry we've decided to kill.
 504 * dentry->d_lock must be held, returns with it unlocked.
 505 * If ref is non-zero, then decrement the refcount too.
 506 * Returns dentry requiring refcount drop, or NULL if we're done.
 507 */
 508static struct dentry *
 509dentry_kill(struct dentry *dentry, int unlock_on_failure)
 510        __releases(dentry->d_lock)
 511{
 512        struct inode *inode;
 513        struct dentry *parent;
 514
 515        inode = dentry->d_inode;
 516        if (inode && !spin_trylock(&inode->i_lock)) {
 517relock:
 518                if (unlock_on_failure) {
 519                        spin_unlock(&dentry->d_lock);
 520                        cpu_relax();
 521                }
 522                return dentry; /* try again with same dentry */
 523        }
 524        if (IS_ROOT(dentry))
 525                parent = NULL;
 526        else
 527                parent = dentry->d_parent;
 528        if (parent && !spin_trylock(&parent->d_lock)) {
 529                if (inode)
 530                        spin_unlock(&inode->i_lock);
 531                goto relock;
 532        }
 533
 534        /*
 535         * The dentry is now unrecoverably dead to the world.
 536         */
 537        lockref_mark_dead(&dentry->d_lockref);
 538
 539        /*
 540         * inform the fs via d_prune that this dentry is about to be
 541         * unhashed and destroyed.
 542         */
 543        if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
 544                dentry->d_op->d_prune(dentry);
 545
 546        dentry_lru_del(dentry);
 547        /* if it was on the hash then remove it */
 548        __d_drop(dentry);
 549        return d_kill(dentry, parent);
 550}
 551
 552/* 
 553 * This is dput
 554 *
 555 * This is complicated by the fact that we do not want to put
 556 * dentries that are no longer on any hash chain on the unused
 557 * list: we'd much rather just get rid of them immediately.
 558 *
 559 * However, that implies that we have to traverse the dentry
 560 * tree upwards to the parents which might _also_ now be
 561 * scheduled for deletion (it may have been only waiting for
 562 * its last child to go away).
 563 *
 564 * This tail recursion is done by hand as we don't want to depend
 565 * on the compiler to always get this right (gcc generally doesn't).
 566 * Real recursion would eat up our stack space.
 567 */
 568
 569/*
 570 * dput - release a dentry
 571 * @dentry: dentry to release 
 572 *
 573 * Release a dentry. This will drop the usage count and if appropriate
 574 * call the dentry unlink method as well as removing it from the queues and
 575 * releasing its resources. If the parent dentries were scheduled for release
 576 * they too may now get deleted.
 577 */
 578void dput(struct dentry *dentry)
 579{
 580        if (unlikely(!dentry))
 581                return;
 582
 583repeat:
 584        if (lockref_put_or_lock(&dentry->d_lockref))
 585                return;
 586
 587        /* Unreachable? Get rid of it */
 588        if (unlikely(d_unhashed(dentry)))
 589                goto kill_it;
 590
 591        if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
 592                if (dentry->d_op->d_delete(dentry))
 593                        goto kill_it;
 594        }
 595
 596        if (!(dentry->d_flags & DCACHE_REFERENCED))
 597                dentry->d_flags |= DCACHE_REFERENCED;
 598        dentry_lru_add(dentry);
 599
 600        dentry->d_lockref.count--;
 601        spin_unlock(&dentry->d_lock);
 602        return;
 603
 604kill_it:
 605        dentry = dentry_kill(dentry, 1);
 606        if (dentry)
 607                goto repeat;
 608}
 609EXPORT_SYMBOL(dput);
 610
 611/**
 612 * d_invalidate - invalidate a dentry
 613 * @dentry: dentry to invalidate
 614 *
 615 * Try to invalidate the dentry if it turns out to be
 616 * possible. If there are other dentries that can be
 617 * reached through this one we can't delete it and we
 618 * return -EBUSY. On success we return 0.
 619 *
 620 * no dcache lock.
 621 */
 622 
 623int d_invalidate(struct dentry * dentry)
 624{
 625        /*
 626         * If it's already been dropped, return OK.
 627         */
 628        spin_lock(&dentry->d_lock);
 629        if (d_unhashed(dentry)) {
 630                spin_unlock(&dentry->d_lock);
 631                return 0;
 632        }
 633        /*
 634         * Check whether to do a partial shrink_dcache
 635         * to get rid of unused child entries.
 636         */
 637        if (!list_empty(&dentry->d_subdirs)) {
 638                spin_unlock(&dentry->d_lock);
 639                shrink_dcache_parent(dentry);
 640                spin_lock(&dentry->d_lock);
 641        }
 642
 643        /*
 644         * Somebody else still using it?
 645         *
 646         * If it's a directory, we can't drop it
 647         * for fear of somebody re-populating it
 648         * with children (even though dropping it
 649         * would make it unreachable from the root,
 650         * we might still populate it if it was a
 651         * working directory or similar).
 652         * We also need to leave mountpoints alone,
 653         * directory or not.
 654         */
 655        if (dentry->d_lockref.count > 1 && dentry->d_inode) {
 656                if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
 657                        spin_unlock(&dentry->d_lock);
 658                        return -EBUSY;
 659                }
 660        }
 661
 662        __d_drop(dentry);
 663        spin_unlock(&dentry->d_lock);
 664        return 0;
 665}
 666EXPORT_SYMBOL(d_invalidate);
 667
 668/* This must be called with d_lock held */
 669static inline void __dget_dlock(struct dentry *dentry)
 670{
 671        dentry->d_lockref.count++;
 672}
 673
 674static inline void __dget(struct dentry *dentry)
 675{
 676        lockref_get(&dentry->d_lockref);
 677}
 678
 679struct dentry *dget_parent(struct dentry *dentry)
 680{
 681        int gotref;
 682        struct dentry *ret;
 683
 684        /*
 685         * Do optimistic parent lookup without any
 686         * locking.
 687         */
 688        rcu_read_lock();
 689        ret = ACCESS_ONCE(dentry->d_parent);
 690        gotref = lockref_get_not_zero(&ret->d_lockref);
 691        rcu_read_unlock();
 692        if (likely(gotref)) {
 693                if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
 694                        return ret;
 695                dput(ret);
 696        }
 697
 698repeat:
 699        /*
 700         * Don't need rcu_dereference because we re-check it was correct under
 701         * the lock.
 702         */
 703        rcu_read_lock();
 704        ret = dentry->d_parent;
 705        spin_lock(&ret->d_lock);
 706        if (unlikely(ret != dentry->d_parent)) {
 707                spin_unlock(&ret->d_lock);
 708                rcu_read_unlock();
 709                goto repeat;
 710        }
 711        rcu_read_unlock();
 712        BUG_ON(!ret->d_lockref.count);
 713        ret->d_lockref.count++;
 714        spin_unlock(&ret->d_lock);
 715        return ret;
 716}
 717EXPORT_SYMBOL(dget_parent);
 718
 719/**
 720 * d_find_alias - grab a hashed alias of inode
 721 * @inode: inode in question
 722 * @want_discon:  flag, used by d_splice_alias, to request
 723 *          that only a DISCONNECTED alias be returned.
 724 *
 725 * If inode has a hashed alias, or is a directory and has any alias,
 726 * acquire the reference to alias and return it. Otherwise return NULL.
 727 * Notice that if inode is a directory there can be only one alias and
 728 * it can be unhashed only if it has no children, or if it is the root
 729 * of a filesystem.
 730 *
 731 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 732 * any other hashed alias over that one unless @want_discon is set,
 733 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
 734 */
 735static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 736{
 737        struct dentry *alias, *discon_alias;
 738
 739again:
 740        discon_alias = NULL;
 741        hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
 742                spin_lock(&alias->d_lock);
 743                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 744                        if (IS_ROOT(alias) &&
 745                            (alias->d_flags & DCACHE_DISCONNECTED)) {
 746                                discon_alias = alias;
 747                        } else if (!want_discon) {
 748                                __dget_dlock(alias);
 749                                spin_unlock(&alias->d_lock);
 750                                return alias;
 751                        }
 752                }
 753                spin_unlock(&alias->d_lock);
 754        }
 755        if (discon_alias) {
 756                alias = discon_alias;
 757                spin_lock(&alias->d_lock);
 758                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 759                        if (IS_ROOT(alias) &&
 760                            (alias->d_flags & DCACHE_DISCONNECTED)) {
 761                                __dget_dlock(alias);
 762                                spin_unlock(&alias->d_lock);
 763                                return alias;
 764                        }
 765                }
 766                spin_unlock(&alias->d_lock);
 767                goto again;
 768        }
 769        return NULL;
 770}
 771
 772struct dentry *d_find_alias(struct inode *inode)
 773{
 774        struct dentry *de = NULL;
 775
 776        if (!hlist_empty(&inode->i_dentry)) {
 777                spin_lock(&inode->i_lock);
 778                de = __d_find_alias(inode, 0);
 779                spin_unlock(&inode->i_lock);
 780        }
 781        return de;
 782}
 783EXPORT_SYMBOL(d_find_alias);
 784
 785/*
 786 *      Try to kill dentries associated with this inode.
 787 * WARNING: you must own a reference to inode.
 788 */
 789void d_prune_aliases(struct inode *inode)
 790{
 791        struct dentry *dentry;
 792restart:
 793        spin_lock(&inode->i_lock);
 794        hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
 795                spin_lock(&dentry->d_lock);
 796                if (!dentry->d_lockref.count) {
 797                        /*
 798                         * inform the fs via d_prune that this dentry
 799                         * is about to be unhashed and destroyed.
 800                         */
 801                        if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
 802                            !d_unhashed(dentry))
 803                                dentry->d_op->d_prune(dentry);
 804
 805                        __dget_dlock(dentry);
 806                        __d_drop(dentry);
 807                        spin_unlock(&dentry->d_lock);
 808                        spin_unlock(&inode->i_lock);
 809                        dput(dentry);
 810                        goto restart;
 811                }
 812                spin_unlock(&dentry->d_lock);
 813        }
 814        spin_unlock(&inode->i_lock);
 815}
 816EXPORT_SYMBOL(d_prune_aliases);
 817
 818/*
 819 * Try to throw away a dentry - free the inode, dput the parent.
 820 * Requires dentry->d_lock is held, and dentry->d_count == 0.
 821 * Releases dentry->d_lock.
 822 *
 823 * This may fail if locks cannot be acquired no problem, just try again.
 824 */
 825static struct dentry * try_prune_one_dentry(struct dentry *dentry)
 826        __releases(dentry->d_lock)
 827{
 828        struct dentry *parent;
 829
 830        parent = dentry_kill(dentry, 0);
 831        /*
 832         * If dentry_kill returns NULL, we have nothing more to do.
 833         * if it returns the same dentry, trylocks failed. In either
 834         * case, just loop again.
 835         *
 836         * Otherwise, we need to prune ancestors too. This is necessary
 837         * to prevent quadratic behavior of shrink_dcache_parent(), but
 838         * is also expected to be beneficial in reducing dentry cache
 839         * fragmentation.
 840         */
 841        if (!parent)
 842                return NULL;
 843        if (parent == dentry)
 844                return dentry;
 845
 846        /* Prune ancestors. */
 847        dentry = parent;
 848        while (dentry) {
 849                if (lockref_put_or_lock(&dentry->d_lockref))
 850                        return NULL;
 851                dentry = dentry_kill(dentry, 1);
 852        }
 853        return NULL;
 854}
 855
 856static void shrink_dentry_list(struct list_head *list)
 857{
 858        struct dentry *dentry;
 859
 860        rcu_read_lock();
 861        for (;;) {
 862                dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
 863                if (&dentry->d_lru == list)
 864                        break; /* empty */
 865
 866                /*
 867                 * Get the dentry lock, and re-verify that the dentry is
 868                 * this on the shrinking list. If it is, we know that
 869                 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
 870                 */
 871                spin_lock(&dentry->d_lock);
 872                if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
 873                        spin_unlock(&dentry->d_lock);
 874                        continue;
 875                }
 876
 877                /*
 878                 * The dispose list is isolated and dentries are not accounted
 879                 * to the LRU here, so we can simply remove it from the list
 880                 * here regardless of whether it is referenced or not.
 881                 */
 882                d_shrink_del(dentry);
 883
 884                /*
 885                 * We found an inuse dentry which was not removed from
 886                 * the LRU because of laziness during lookup. Do not free it.
 887                 */
 888                if (dentry->d_lockref.count) {
 889                        spin_unlock(&dentry->d_lock);
 890                        continue;
 891                }
 892                rcu_read_unlock();
 893
 894                /*
 895                 * If 'try_to_prune()' returns a dentry, it will
 896                 * be the same one we passed in, and d_lock will
 897                 * have been held the whole time, so it will not
 898                 * have been added to any other lists. We failed
 899                 * to get the inode lock.
 900                 *
 901                 * We just add it back to the shrink list.
 902                 */
 903                dentry = try_prune_one_dentry(dentry);
 904
 905                rcu_read_lock();
 906                if (dentry) {
 907                        d_shrink_add(dentry, list);
 908                        spin_unlock(&dentry->d_lock);
 909                }
 910        }
 911        rcu_read_unlock();
 912}
 913
 914static enum lru_status
 915dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
 916{
 917        struct list_head *freeable = arg;
 918        struct dentry   *dentry = container_of(item, struct dentry, d_lru);
 919
 920
 921        /*
 922         * we are inverting the lru lock/dentry->d_lock here,
 923         * so use a trylock. If we fail to get the lock, just skip
 924         * it
 925         */
 926        if (!spin_trylock(&dentry->d_lock))
 927                return LRU_SKIP;
 928
 929        /*
 930         * Referenced dentries are still in use. If they have active
 931         * counts, just remove them from the LRU. Otherwise give them
 932         * another pass through the LRU.
 933         */
 934        if (dentry->d_lockref.count) {
 935                d_lru_isolate(dentry);
 936                spin_unlock(&dentry->d_lock);
 937                return LRU_REMOVED;
 938        }
 939
 940        if (dentry->d_flags & DCACHE_REFERENCED) {
 941                dentry->d_flags &= ~DCACHE_REFERENCED;
 942                spin_unlock(&dentry->d_lock);
 943
 944                /*
 945                 * The list move itself will be made by the common LRU code. At
 946                 * this point, we've dropped the dentry->d_lock but keep the
 947                 * lru lock. This is safe to do, since every list movement is
 948                 * protected by the lru lock even if both locks are held.
 949                 *
 950                 * This is guaranteed by the fact that all LRU management
 951                 * functions are intermediated by the LRU API calls like
 952                 * list_lru_add and list_lru_del. List movement in this file
 953                 * only ever occur through this functions or through callbacks
 954                 * like this one, that are called from the LRU API.
 955                 *
 956                 * The only exceptions to this are functions like
 957                 * shrink_dentry_list, and code that first checks for the
 958                 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
 959                 * operating only with stack provided lists after they are
 960                 * properly isolated from the main list.  It is thus, always a
 961                 * local access.
 962                 */
 963                return LRU_ROTATE;
 964        }
 965
 966        d_lru_shrink_move(dentry, freeable);
 967        spin_unlock(&dentry->d_lock);
 968
 969        return LRU_REMOVED;
 970}
 971
 972/**
 973 * prune_dcache_sb - shrink the dcache
 974 * @sb: superblock
 975 * @nr_to_scan : number of entries to try to free
 976 * @nid: which node to scan for freeable entities
 977 *
 978 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
 979 * done when we need more memory an called from the superblock shrinker
 980 * function.
 981 *
 982 * This function may fail to free any resources if all the dentries are in
 983 * use.
 984 */
 985long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
 986                     int nid)
 987{
 988        LIST_HEAD(dispose);
 989        long freed;
 990
 991        freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
 992                                       &dispose, &nr_to_scan);
 993        shrink_dentry_list(&dispose);
 994        return freed;
 995}
 996
 997static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
 998                                                spinlock_t *lru_lock, void *arg)
 999{
1000        struct list_head *freeable = arg;
1001        struct dentry   *dentry = container_of(item, struct dentry, d_lru);
1002
1003        /*
1004         * we are inverting the lru lock/dentry->d_lock here,
1005         * so use a trylock. If we fail to get the lock, just skip
1006         * it
1007         */
1008        if (!spin_trylock(&dentry->d_lock))
1009                return LRU_SKIP;
1010
1011        d_lru_shrink_move(dentry, freeable);
1012        spin_unlock(&dentry->d_lock);
1013
1014        return LRU_REMOVED;
1015}
1016
1017
1018/**
1019 * shrink_dcache_sb - shrink dcache for a superblock
1020 * @sb: superblock
1021 *
1022 * Shrink the dcache for the specified super block. This is used to free
1023 * the dcache before unmounting a file system.
1024 */
1025void shrink_dcache_sb(struct super_block *sb)
1026{
1027        long freed;
1028
1029        do {
1030                LIST_HEAD(dispose);
1031
1032                freed = list_lru_walk(&sb->s_dentry_lru,
1033                        dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1034
1035                this_cpu_sub(nr_dentry_unused, freed);
1036                shrink_dentry_list(&dispose);
1037        } while (freed > 0);
1038}
1039EXPORT_SYMBOL(shrink_dcache_sb);
1040
1041/**
1042 * enum d_walk_ret - action to talke during tree walk
1043 * @D_WALK_CONTINUE:    contrinue walk
1044 * @D_WALK_QUIT:        quit walk
1045 * @D_WALK_NORETRY:     quit when retry is needed
1046 * @D_WALK_SKIP:        skip this dentry and its children
1047 */
1048enum d_walk_ret {
1049        D_WALK_CONTINUE,
1050        D_WALK_QUIT,
1051        D_WALK_NORETRY,
1052        D_WALK_SKIP,
1053};
1054
1055/**
1056 * d_walk - walk the dentry tree
1057 * @parent:     start of walk
1058 * @data:       data passed to @enter() and @finish()
1059 * @enter:      callback when first entering the dentry
1060 * @finish:     callback when successfully finished the walk
1061 *
1062 * The @enter() and @finish() callbacks are called with d_lock held.
1063 */
1064static void d_walk(struct dentry *parent, void *data,
1065                   enum d_walk_ret (*enter)(void *, struct dentry *),
1066                   void (*finish)(void *))
1067{
1068        struct dentry *this_parent;
1069        struct list_head *next;
1070        unsigned seq = 0;
1071        enum d_walk_ret ret;
1072        bool retry = true;
1073
1074again:
1075        read_seqbegin_or_lock(&rename_lock, &seq);
1076        this_parent = parent;
1077        spin_lock(&this_parent->d_lock);
1078
1079        ret = enter(data, this_parent);
1080        switch (ret) {
1081        case D_WALK_CONTINUE:
1082                break;
1083        case D_WALK_QUIT:
1084        case D_WALK_SKIP:
1085                goto out_unlock;
1086        case D_WALK_NORETRY:
1087                retry = false;
1088                break;
1089        }
1090repeat:
1091        next = this_parent->d_subdirs.next;
1092resume:
1093        while (next != &this_parent->d_subdirs) {
1094                struct list_head *tmp = next;
1095                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1096                next = tmp->next;
1097
1098                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1099
1100                ret = enter(data, dentry);
1101                switch (ret) {
1102                case D_WALK_CONTINUE:
1103                        break;
1104                case D_WALK_QUIT:
1105                        spin_unlock(&dentry->d_lock);
1106                        goto out_unlock;
1107                case D_WALK_NORETRY:
1108                        retry = false;
1109                        break;
1110                case D_WALK_SKIP:
1111                        spin_unlock(&dentry->d_lock);
1112                        continue;
1113                }
1114
1115                if (!list_empty(&dentry->d_subdirs)) {
1116                        spin_unlock(&this_parent->d_lock);
1117                        spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1118                        this_parent = dentry;
1119                        spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1120                        goto repeat;
1121                }
1122                spin_unlock(&dentry->d_lock);
1123        }
1124        /*
1125         * All done at this level ... ascend and resume the search.
1126         */
1127        if (this_parent != parent) {
1128                struct dentry *child = this_parent;
1129                this_parent = child->d_parent;
1130
1131                rcu_read_lock();
1132                spin_unlock(&child->d_lock);
1133                spin_lock(&this_parent->d_lock);
1134
1135                /*
1136                 * might go back up the wrong parent if we have had a rename
1137                 * or deletion
1138                 */
1139                if (this_parent != child->d_parent ||
1140                         (child->d_flags & DCACHE_DENTRY_KILLED) ||
1141                         need_seqretry(&rename_lock, seq)) {
1142                        spin_unlock(&this_parent->d_lock);
1143                        rcu_read_unlock();
1144                        goto rename_retry;
1145                }
1146                rcu_read_unlock();
1147                next = child->d_u.d_child.next;
1148                goto resume;
1149        }
1150        if (need_seqretry(&rename_lock, seq)) {
1151                spin_unlock(&this_parent->d_lock);
1152                goto rename_retry;
1153        }
1154        if (finish)
1155                finish(data);
1156
1157out_unlock:
1158        spin_unlock(&this_parent->d_lock);
1159        done_seqretry(&rename_lock, seq);
1160        return;
1161
1162rename_retry:
1163        if (!retry)
1164                return;
1165        seq = 1;
1166        goto again;
1167}
1168
1169/*
1170 * Search for at least 1 mount point in the dentry's subdirs.
1171 * We descend to the next level whenever the d_subdirs
1172 * list is non-empty and continue searching.
1173 */
1174
1175static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1176{
1177        int *ret = data;
1178        if (d_mountpoint(dentry)) {
1179                *ret = 1;
1180                return D_WALK_QUIT;
1181        }
1182        return D_WALK_CONTINUE;
1183}
1184
1185/**
1186 * have_submounts - check for mounts over a dentry
1187 * @parent: dentry to check.
1188 *
1189 * Return true if the parent or its subdirectories contain
1190 * a mount point
1191 */
1192int have_submounts(struct dentry *parent)
1193{
1194        int ret = 0;
1195
1196        d_walk(parent, &ret, check_mount, NULL);
1197
1198        return ret;
1199}
1200EXPORT_SYMBOL(have_submounts);
1201
1202/*
1203 * Called by mount code to set a mountpoint and check if the mountpoint is
1204 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1205 * subtree can become unreachable).
1206 *
1207 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed.  For
1208 * this reason take rename_lock and d_lock on dentry and ancestors.
1209 */
1210int d_set_mounted(struct dentry *dentry)
1211{
1212        struct dentry *p;
1213        int ret = -ENOENT;
1214        write_seqlock(&rename_lock);
1215        for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1216                /* Need exclusion wrt. check_submounts_and_drop() */
1217                spin_lock(&p->d_lock);
1218                if (unlikely(d_unhashed(p))) {
1219                        spin_unlock(&p->d_lock);
1220                        goto out;
1221                }
1222                spin_unlock(&p->d_lock);
1223        }
1224        spin_lock(&dentry->d_lock);
1225        if (!d_unlinked(dentry)) {
1226                dentry->d_flags |= DCACHE_MOUNTED;
1227                ret = 0;
1228        }
1229        spin_unlock(&dentry->d_lock);
1230out:
1231        write_sequnlock(&rename_lock);
1232        return ret;
1233}
1234
1235/*
1236 * Search the dentry child list of the specified parent,
1237 * and move any unused dentries to the end of the unused
1238 * list for prune_dcache(). We descend to the next level
1239 * whenever the d_subdirs list is non-empty and continue
1240 * searching.
1241 *
1242 * It returns zero iff there are no unused children,
1243 * otherwise  it returns the number of children moved to
1244 * the end of the unused list. This may not be the total
1245 * number of unused children, because select_parent can
1246 * drop the lock and return early due to latency
1247 * constraints.
1248 */
1249
1250struct select_data {
1251        struct dentry *start;
1252        struct list_head dispose;
1253        int found;
1254};
1255
1256static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1257{
1258        struct select_data *data = _data;
1259        enum d_walk_ret ret = D_WALK_CONTINUE;
1260
1261        if (data->start == dentry)
1262                goto out;
1263
1264        /*
1265         * move only zero ref count dentries to the dispose list.
1266         *
1267         * Those which are presently on the shrink list, being processed
1268         * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
1269         * loop in shrink_dcache_parent() might not make any progress
1270         * and loop forever.
1271         */
1272        if (dentry->d_lockref.count) {
1273                dentry_lru_del(dentry);
1274        } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1275                /*
1276                 * We can't use d_lru_shrink_move() because we
1277                 * need to get the global LRU lock and do the
1278                 * LRU accounting.
1279                 */
1280                d_lru_del(dentry);
1281                d_shrink_add(dentry, &data->dispose);
1282                data->found++;
1283                ret = D_WALK_NORETRY;
1284        }
1285        /*
1286         * We can return to the caller if we have found some (this
1287         * ensures forward progress). We'll be coming back to find
1288         * the rest.
1289         */
1290        if (data->found && need_resched())
1291                ret = D_WALK_QUIT;
1292out:
1293        return ret;
1294}
1295
1296/**
1297 * shrink_dcache_parent - prune dcache
1298 * @parent: parent of entries to prune
1299 *
1300 * Prune the dcache to remove unused children of the parent dentry.
1301 */
1302void shrink_dcache_parent(struct dentry *parent)
1303{
1304        for (;;) {
1305                struct select_data data;
1306
1307                INIT_LIST_HEAD(&data.dispose);
1308                data.start = parent;
1309                data.found = 0;
1310
1311                d_walk(parent, &data, select_collect, NULL);
1312                if (!data.found)
1313                        break;
1314
1315                shrink_dentry_list(&data.dispose);
1316                cond_resched();
1317        }
1318}
1319EXPORT_SYMBOL(shrink_dcache_parent);
1320
1321static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
1322{
1323        struct select_data *data = _data;
1324        enum d_walk_ret ret = D_WALK_CONTINUE;
1325
1326        if (dentry->d_lockref.count) {
1327                dentry_lru_del(dentry);
1328                if (likely(!list_empty(&dentry->d_subdirs)))
1329                        goto out;
1330                if (dentry == data->start && dentry->d_lockref.count == 1)
1331                        goto out;
1332                printk(KERN_ERR
1333                       "BUG: Dentry %p{i=%lx,n=%s}"
1334                       " still in use (%d)"
1335                       " [unmount of %s %s]\n",
1336                       dentry,
1337                       dentry->d_inode ?
1338                       dentry->d_inode->i_ino : 0UL,
1339                       dentry->d_name.name,
1340                       dentry->d_lockref.count,
1341                       dentry->d_sb->s_type->name,
1342                       dentry->d_sb->s_id);
1343                BUG();
1344        } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1345                /*
1346                 * We can't use d_lru_shrink_move() because we
1347                 * need to get the global LRU lock and do the
1348                 * LRU accounting.
1349                 */
1350                if (dentry->d_flags & DCACHE_LRU_LIST)
1351                        d_lru_del(dentry);
1352                d_shrink_add(dentry, &data->dispose);
1353                data->found++;
1354                ret = D_WALK_NORETRY;
1355        }
1356out:
1357        if (data->found && need_resched())
1358                ret = D_WALK_QUIT;
1359        return ret;
1360}
1361
1362/*
1363 * destroy the dentries attached to a superblock on unmounting
1364 */
1365void shrink_dcache_for_umount(struct super_block *sb)
1366{
1367        struct dentry *dentry;
1368
1369        if (down_read_trylock(&sb->s_umount))
1370                BUG();
1371
1372        dentry = sb->s_root;
1373        sb->s_root = NULL;
1374        for (;;) {
1375                struct select_data data;
1376
1377                INIT_LIST_HEAD(&data.dispose);
1378                data.start = dentry;
1379                data.found = 0;
1380
1381                d_walk(dentry, &data, umount_collect, NULL);
1382                if (!data.found)
1383                        break;
1384
1385                shrink_dentry_list(&data.dispose);
1386                cond_resched();
1387        }
1388        d_drop(dentry);
1389        dput(dentry);
1390
1391        while (!hlist_bl_empty(&sb->s_anon)) {
1392                struct select_data data;
1393                dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1394
1395                INIT_LIST_HEAD(&data.dispose);
1396                data.start = NULL;
1397                data.found = 0;
1398
1399                d_walk(dentry, &data, umount_collect, NULL);
1400                if (data.found)
1401                        shrink_dentry_list(&data.dispose);
1402                cond_resched();
1403        }
1404}
1405
1406static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1407{
1408        struct select_data *data = _data;
1409
1410        if (d_mountpoint(dentry)) {
1411                data->found = -EBUSY;
1412                return D_WALK_QUIT;
1413        }
1414
1415        return select_collect(_data, dentry);
1416}
1417
1418static void check_and_drop(void *_data)
1419{
1420        struct select_data *data = _data;
1421
1422        if (d_mountpoint(data->start))
1423                data->found = -EBUSY;
1424        if (!data->found)
1425                __d_drop(data->start);
1426}
1427
1428/**
1429 * check_submounts_and_drop - prune dcache, check for submounts and drop
1430 *
1431 * All done as a single atomic operation relative to has_unlinked_ancestor().
1432 * Returns 0 if successfully unhashed @parent.  If there were submounts then
1433 * return -EBUSY.
1434 *
1435 * @dentry: dentry to prune and drop
1436 */
1437int check_submounts_and_drop(struct dentry *dentry)
1438{
1439        int ret = 0;
1440
1441        /* Negative dentries can be dropped without further checks */
1442        if (!dentry->d_inode) {
1443                d_drop(dentry);
1444                goto out;
1445        }
1446
1447        for (;;) {
1448                struct select_data data;
1449
1450                INIT_LIST_HEAD(&data.dispose);
1451                data.start = dentry;
1452                data.found = 0;
1453
1454                d_walk(dentry, &data, check_and_collect, check_and_drop);
1455                ret = data.found;
1456
1457                if (!list_empty(&data.dispose))
1458                        shrink_dentry_list(&data.dispose);
1459
1460                if (ret <= 0)
1461                        break;
1462
1463                cond_resched();
1464        }
1465
1466out:
1467        return ret;
1468}
1469EXPORT_SYMBOL(check_submounts_and_drop);
1470
1471/**
1472 * __d_alloc    -       allocate a dcache entry
1473 * @sb: filesystem it will belong to
1474 * @name: qstr of the name
1475 *
1476 * Allocates a dentry. It returns %NULL if there is insufficient memory
1477 * available. On a success the dentry is returned. The name passed in is
1478 * copied and the copy passed in may be reused after this call.
1479 */
1480 
1481struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1482{
1483        struct dentry *dentry;
1484        char *dname;
1485
1486        dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1487        if (!dentry)
1488                return NULL;
1489
1490        /*
1491         * We guarantee that the inline name is always NUL-terminated.
1492         * This way the memcpy() done by the name switching in rename
1493         * will still always have a NUL at the end, even if we might
1494         * be overwriting an internal NUL character
1495         */
1496        dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1497        if (name->len > DNAME_INLINE_LEN-1) {
1498                dname = kmalloc(name->len + 1, GFP_KERNEL);
1499                if (!dname) {
1500                        kmem_cache_free(dentry_cache, dentry); 
1501                        return NULL;
1502                }
1503        } else  {
1504                dname = dentry->d_iname;
1505        }       
1506
1507        dentry->d_name.len = name->len;
1508        dentry->d_name.hash = name->hash;
1509        memcpy(dname, name->name, name->len);
1510        dname[name->len] = 0;
1511
1512        /* Make sure we always see the terminating NUL character */
1513        smp_wmb();
1514        dentry->d_name.name = dname;
1515
1516        dentry->d_lockref.count = 1;
1517        dentry->d_flags = 0;
1518        spin_lock_init(&dentry->d_lock);
1519        seqcount_init(&dentry->d_seq);
1520        dentry->d_inode = NULL;
1521        dentry->d_parent = dentry;
1522        dentry->d_sb = sb;
1523        dentry->d_op = NULL;
1524        dentry->d_fsdata = NULL;
1525        INIT_HLIST_BL_NODE(&dentry->d_hash);
1526        INIT_LIST_HEAD(&dentry->d_lru);
1527        INIT_LIST_HEAD(&dentry->d_subdirs);
1528        INIT_HLIST_NODE(&dentry->d_alias);
1529        INIT_LIST_HEAD(&dentry->d_u.d_child);
1530        d_set_d_op(dentry, dentry->d_sb->s_d_op);
1531
1532        this_cpu_inc(nr_dentry);
1533
1534        return dentry;
1535}
1536
1537/**
1538 * d_alloc      -       allocate a dcache entry
1539 * @parent: parent of entry to allocate
1540 * @name: qstr of the name
1541 *
1542 * Allocates a dentry. It returns %NULL if there is insufficient memory
1543 * available. On a success the dentry is returned. The name passed in is
1544 * copied and the copy passed in may be reused after this call.
1545 */
1546struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1547{
1548        struct dentry *dentry = __d_alloc(parent->d_sb, name);
1549        if (!dentry)
1550                return NULL;
1551
1552        spin_lock(&parent->d_lock);
1553        /*
1554         * don't need child lock because it is not subject
1555         * to concurrency here
1556         */
1557        __dget_dlock(parent);
1558        dentry->d_parent = parent;
1559        list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1560        spin_unlock(&parent->d_lock);
1561
1562        return dentry;
1563}
1564EXPORT_SYMBOL(d_alloc);
1565
1566/**
1567 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1568 * @sb: the superblock
1569 * @name: qstr of the name
1570 *
1571 * For a filesystem that just pins its dentries in memory and never
1572 * performs lookups at all, return an unhashed IS_ROOT dentry.
1573 */
1574struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1575{
1576        return __d_alloc(sb, name);
1577}
1578EXPORT_SYMBOL(d_alloc_pseudo);
1579
1580struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1581{
1582        struct qstr q;
1583
1584        q.name = name;
1585        q.len = strlen(name);
1586        q.hash = full_name_hash(q.name, q.len);
1587        return d_alloc(parent, &q);
1588}
1589EXPORT_SYMBOL(d_alloc_name);
1590
1591void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1592{
1593        WARN_ON_ONCE(dentry->d_op);
1594        WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH  |
1595                                DCACHE_OP_COMPARE       |
1596                                DCACHE_OP_REVALIDATE    |
1597                                DCACHE_OP_WEAK_REVALIDATE       |
1598                                DCACHE_OP_DELETE ));
1599        dentry->d_op = op;
1600        if (!op)
1601                return;
1602        if (op->d_hash)
1603                dentry->d_flags |= DCACHE_OP_HASH;
1604        if (op->d_compare)
1605                dentry->d_flags |= DCACHE_OP_COMPARE;
1606        if (op->d_revalidate)
1607                dentry->d_flags |= DCACHE_OP_REVALIDATE;
1608        if (op->d_weak_revalidate)
1609                dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1610        if (op->d_delete)
1611                dentry->d_flags |= DCACHE_OP_DELETE;
1612        if (op->d_prune)
1613                dentry->d_flags |= DCACHE_OP_PRUNE;
1614
1615}
1616EXPORT_SYMBOL(d_set_d_op);
1617
1618static unsigned d_flags_for_inode(struct inode *inode)
1619{
1620        unsigned add_flags = DCACHE_FILE_TYPE;
1621
1622        if (!inode)
1623                return DCACHE_MISS_TYPE;
1624
1625        if (S_ISDIR(inode->i_mode)) {
1626                add_flags = DCACHE_DIRECTORY_TYPE;
1627                if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1628                        if (unlikely(!inode->i_op->lookup))
1629                                add_flags = DCACHE_AUTODIR_TYPE;
1630                        else
1631                                inode->i_opflags |= IOP_LOOKUP;
1632                }
1633        } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1634                if (unlikely(inode->i_op->follow_link))
1635                        add_flags = DCACHE_SYMLINK_TYPE;
1636                else
1637                        inode->i_opflags |= IOP_NOFOLLOW;
1638        }
1639
1640        if (unlikely(IS_AUTOMOUNT(inode)))
1641                add_flags |= DCACHE_NEED_AUTOMOUNT;
1642        return add_flags;
1643}
1644
1645static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1646{
1647        unsigned add_flags = d_flags_for_inode(inode);
1648
1649        spin_lock(&dentry->d_lock);
1650        dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
1651        dentry->d_flags |= add_flags;
1652        if (inode)
1653                hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1654        dentry->d_inode = inode;
1655        dentry_rcuwalk_barrier(dentry);
1656        spin_unlock(&dentry->d_lock);
1657        fsnotify_d_instantiate(dentry, inode);
1658}
1659
1660/**
1661 * d_instantiate - fill in inode information for a dentry
1662 * @entry: dentry to complete
1663 * @inode: inode to attach to this dentry
1664 *
1665 * Fill in inode information in the entry.
1666 *
1667 * This turns negative dentries into productive full members
1668 * of society.
1669 *
1670 * NOTE! This assumes that the inode count has been incremented
1671 * (or otherwise set) by the caller to indicate that it is now
1672 * in use by the dcache.
1673 */
1674 
1675void d_instantiate(struct dentry *entry, struct inode * inode)
1676{
1677        BUG_ON(!hlist_unhashed(&entry->d_alias));
1678        if (inode)
1679                spin_lock(&inode->i_lock);
1680        __d_instantiate(entry, inode);
1681        if (inode)
1682                spin_unlock(&inode->i_lock);
1683        security_d_instantiate(entry, inode);
1684}
1685EXPORT_SYMBOL(d_instantiate);
1686
1687/**
1688 * d_instantiate_unique - instantiate a non-aliased dentry
1689 * @entry: dentry to instantiate
1690 * @inode: inode to attach to this dentry
1691 *
1692 * Fill in inode information in the entry. On success, it returns NULL.
1693 * If an unhashed alias of "entry" already exists, then we return the
1694 * aliased dentry instead and drop one reference to inode.
1695 *
1696 * Note that in order to avoid conflicts with rename() etc, the caller
1697 * had better be holding the parent directory semaphore.
1698 *
1699 * This also assumes that the inode count has been incremented
1700 * (or otherwise set) by the caller to indicate that it is now
1701 * in use by the dcache.
1702 */
1703static struct dentry *__d_instantiate_unique(struct dentry *entry,
1704                                             struct inode *inode)
1705{
1706        struct dentry *alias;
1707        int len = entry->d_name.len;
1708        const char *name = entry->d_name.name;
1709        unsigned int hash = entry->d_name.hash;
1710
1711        if (!inode) {
1712                __d_instantiate(entry, NULL);
1713                return NULL;
1714        }
1715
1716        hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1717                /*
1718                 * Don't need alias->d_lock here, because aliases with
1719                 * d_parent == entry->d_parent are not subject to name or
1720                 * parent changes, because the parent inode i_mutex is held.
1721                 */
1722                if (alias->d_name.hash != hash)
1723                        continue;
1724                if (alias->d_parent != entry->d_parent)
1725                        continue;
1726                if (alias->d_name.len != len)
1727                        continue;
1728                if (dentry_cmp(alias, name, len))
1729                        continue;
1730                __dget(alias);
1731                return alias;
1732        }
1733
1734        __d_instantiate(entry, inode);
1735        return NULL;
1736}
1737
1738struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1739{
1740        struct dentry *result;
1741
1742        BUG_ON(!hlist_unhashed(&entry->d_alias));
1743
1744        if (inode)
1745                spin_lock(&inode->i_lock);
1746        result = __d_instantiate_unique(entry, inode);
1747        if (inode)
1748                spin_unlock(&inode->i_lock);
1749
1750        if (!result) {
1751                security_d_instantiate(entry, inode);
1752                return NULL;
1753        }
1754
1755        BUG_ON(!d_unhashed(result));
1756        iput(inode);
1757        return result;
1758}
1759
1760EXPORT_SYMBOL(d_instantiate_unique);
1761
1762/**
1763 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1764 * @entry: dentry to complete
1765 * @inode: inode to attach to this dentry
1766 *
1767 * Fill in inode information in the entry.  If a directory alias is found, then
1768 * return an error (and drop inode).  Together with d_materialise_unique() this
1769 * guarantees that a directory inode may never have more than one alias.
1770 */
1771int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1772{
1773        BUG_ON(!hlist_unhashed(&entry->d_alias));
1774
1775        spin_lock(&inode->i_lock);
1776        if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1777                spin_unlock(&inode->i_lock);
1778                iput(inode);
1779                return -EBUSY;
1780        }
1781        __d_instantiate(entry, inode);
1782        spin_unlock(&inode->i_lock);
1783        security_d_instantiate(entry, inode);
1784
1785        return 0;
1786}
1787EXPORT_SYMBOL(d_instantiate_no_diralias);
1788
1789struct dentry *d_make_root(struct inode *root_inode)
1790{
1791        struct dentry *res = NULL;
1792
1793        if (root_inode) {
1794                static const struct qstr name = QSTR_INIT("/", 1);
1795
1796                res = __d_alloc(root_inode->i_sb, &name);
1797                if (res)
1798                        d_instantiate(res, root_inode);
1799                else
1800                        iput(root_inode);
1801        }
1802        return res;
1803}
1804EXPORT_SYMBOL(d_make_root);
1805
1806static struct dentry * __d_find_any_alias(struct inode *inode)
1807{
1808        struct dentry *alias;
1809
1810        if (hlist_empty(&inode->i_dentry))
1811                return NULL;
1812        alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1813        __dget(alias);
1814        return alias;
1815}
1816
1817/**
1818 * d_find_any_alias - find any alias for a given inode
1819 * @inode: inode to find an alias for
1820 *
1821 * If any aliases exist for the given inode, take and return a
1822 * reference for one of them.  If no aliases exist, return %NULL.
1823 */
1824struct dentry *d_find_any_alias(struct inode *inode)
1825{
1826        struct dentry *de;
1827
1828        spin_lock(&inode->i_lock);
1829        de = __d_find_any_alias(inode);
1830        spin_unlock(&inode->i_lock);
1831        return de;
1832}
1833EXPORT_SYMBOL(d_find_any_alias);
1834
1835/**
1836 * d_obtain_alias - find or allocate a dentry for a given inode
1837 * @inode: inode to allocate the dentry for
1838 *
1839 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1840 * similar open by handle operations.  The returned dentry may be anonymous,
1841 * or may have a full name (if the inode was already in the cache).
1842 *
1843 * When called on a directory inode, we must ensure that the inode only ever
1844 * has one dentry.  If a dentry is found, that is returned instead of
1845 * allocating a new one.
1846 *
1847 * On successful return, the reference to the inode has been transferred
1848 * to the dentry.  In case of an error the reference on the inode is released.
1849 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1850 * be passed in and will be the error will be propagate to the return value,
1851 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1852 */
1853struct dentry *d_obtain_alias(struct inode *inode)
1854{
1855        static const struct qstr anonstring = QSTR_INIT("/", 1);
1856        struct dentry *tmp;
1857        struct dentry *res;
1858        unsigned add_flags;
1859
1860        if (!inode)
1861                return ERR_PTR(-ESTALE);
1862        if (IS_ERR(inode))
1863                return ERR_CAST(inode);
1864
1865        res = d_find_any_alias(inode);
1866        if (res)
1867                goto out_iput;
1868
1869        tmp = __d_alloc(inode->i_sb, &anonstring);
1870        if (!tmp) {
1871                res = ERR_PTR(-ENOMEM);
1872                goto out_iput;
1873        }
1874
1875        spin_lock(&inode->i_lock);
1876        res = __d_find_any_alias(inode);
1877        if (res) {
1878                spin_unlock(&inode->i_lock);
1879                dput(tmp);
1880                goto out_iput;
1881        }
1882
1883        /* attach a disconnected dentry */
1884        add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1885
1886        spin_lock(&tmp->d_lock);
1887        tmp->d_inode = inode;
1888        tmp->d_flags |= add_flags;
1889        hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1890        hlist_bl_lock(&tmp->d_sb->s_anon);
1891        hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1892        hlist_bl_unlock(&tmp->d_sb->s_anon);
1893        spin_unlock(&tmp->d_lock);
1894        spin_unlock(&inode->i_lock);
1895        security_d_instantiate(tmp, inode);
1896
1897        return tmp;
1898
1899 out_iput:
1900        if (res && !IS_ERR(res))
1901                security_d_instantiate(res, inode);
1902        iput(inode);
1903        return res;
1904}
1905EXPORT_SYMBOL(d_obtain_alias);
1906
1907/**
1908 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1909 * @inode:  the inode which may have a disconnected dentry
1910 * @dentry: a negative dentry which we want to point to the inode.
1911 *
1912 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1913 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1914 * and return it, else simply d_add the inode to the dentry and return NULL.
1915 *
1916 * This is needed in the lookup routine of any filesystem that is exportable
1917 * (via knfsd) so that we can build dcache paths to directories effectively.
1918 *
1919 * If a dentry was found and moved, then it is returned.  Otherwise NULL
1920 * is returned.  This matches the expected return value of ->lookup.
1921 *
1922 * Cluster filesystems may call this function with a negative, hashed dentry.
1923 * In that case, we know that the inode will be a regular file, and also this
1924 * will only occur during atomic_open. So we need to check for the dentry
1925 * being already hashed only in the final case.
1926 */
1927struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1928{
1929        struct dentry *new = NULL;
1930
1931        if (IS_ERR(inode))
1932                return ERR_CAST(inode);
1933
1934        if (inode && S_ISDIR(inode->i_mode)) {
1935                spin_lock(&inode->i_lock);
1936                new = __d_find_alias(inode, 1);
1937                if (new) {
1938                        BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1939                        spin_unlock(&inode->i_lock);
1940                        security_d_instantiate(new, inode);
1941                        d_move(new, dentry);
1942                        iput(inode);
1943                } else {
1944                        /* already taking inode->i_lock, so d_add() by hand */
1945                        __d_instantiate(dentry, inode);
1946                        spin_unlock(&inode->i_lock);
1947                        security_d_instantiate(dentry, inode);
1948                        d_rehash(dentry);
1949                }
1950        } else {
1951                d_instantiate(dentry, inode);
1952                if (d_unhashed(dentry))
1953                        d_rehash(dentry);
1954        }
1955        return new;
1956}
1957EXPORT_SYMBOL(d_splice_alias);
1958
1959/**
1960 * d_add_ci - lookup or allocate new dentry with case-exact name
1961 * @inode:  the inode case-insensitive lookup has found
1962 * @dentry: the negative dentry that was passed to the parent's lookup func
1963 * @name:   the case-exact name to be associated with the returned dentry
1964 *
1965 * This is to avoid filling the dcache with case-insensitive names to the
1966 * same inode, only the actual correct case is stored in the dcache for
1967 * case-insensitive filesystems.
1968 *
1969 * For a case-insensitive lookup match and if the the case-exact dentry
1970 * already exists in in the dcache, use it and return it.
1971 *
1972 * If no entry exists with the exact case name, allocate new dentry with
1973 * the exact case, and return the spliced entry.
1974 */
1975struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1976                        struct qstr *name)
1977{
1978        struct dentry *found;
1979        struct dentry *new;
1980
1981        /*
1982         * First check if a dentry matching the name already exists,
1983         * if not go ahead and create it now.
1984         */
1985        found = d_hash_and_lookup(dentry->d_parent, name);
1986        if (unlikely(IS_ERR(found)))
1987                goto err_out;
1988        if (!found) {
1989                new = d_alloc(dentry->d_parent, name);
1990                if (!new) {
1991                        found = ERR_PTR(-ENOMEM);
1992                        goto err_out;
1993                }
1994
1995                found = d_splice_alias(inode, new);
1996                if (found) {
1997                        dput(new);
1998                        return found;
1999                }
2000                return new;
2001        }
2002
2003        /*
2004         * If a matching dentry exists, and it's not negative use it.
2005         *
2006         * Decrement the reference count to balance the iget() done
2007         * earlier on.
2008         */
2009        if (found->d_inode) {
2010                if (unlikely(found->d_inode != inode)) {
2011                        /* This can't happen because bad inodes are unhashed. */
2012                        BUG_ON(!is_bad_inode(inode));
2013                        BUG_ON(!is_bad_inode(found->d_inode));
2014                }
2015                iput(inode);
2016                return found;
2017        }
2018
2019        /*
2020         * Negative dentry: instantiate it unless the inode is a directory and
2021         * already has a dentry.
2022         */
2023        new = d_splice_alias(inode, found);
2024        if (new) {
2025                dput(found);
2026                found = new;
2027        }
2028        return found;
2029
2030err_out:
2031        iput(inode);
2032        return found;
2033}
2034EXPORT_SYMBOL(d_add_ci);
2035
2036/*
2037 * Do the slow-case of the dentry name compare.
2038 *
2039 * Unlike the dentry_cmp() function, we need to atomically
2040 * load the name and length information, so that the
2041 * filesystem can rely on them, and can use the 'name' and
2042 * 'len' information without worrying about walking off the
2043 * end of memory etc.
2044 *
2045 * Thus the read_seqcount_retry() and the "duplicate" info
2046 * in arguments (the low-level filesystem should not look
2047 * at the dentry inode or name contents directly, since
2048 * rename can change them while we're in RCU mode).
2049 */
2050enum slow_d_compare {
2051        D_COMP_OK,
2052        D_COMP_NOMATCH,
2053        D_COMP_SEQRETRY,
2054};
2055
2056static noinline enum slow_d_compare slow_dentry_cmp(
2057                const struct dentry *parent,
2058                struct dentry *dentry,
2059                unsigned int seq,
2060                const struct qstr *name)
2061{
2062        int tlen = dentry->d_name.len;
2063        const char *tname = dentry->d_name.name;
2064
2065        if (read_seqcount_retry(&dentry->d_seq, seq)) {
2066                cpu_relax();
2067                return D_COMP_SEQRETRY;
2068        }
2069        if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2070                return D_COMP_NOMATCH;
2071        return D_COMP_OK;
2072}
2073
2074/**
2075 * __d_lookup_rcu - search for a dentry (racy, store-free)
2076 * @parent: parent dentry
2077 * @name: qstr of name we wish to find
2078 * @seqp: returns d_seq value at the point where the dentry was found
2079 * Returns: dentry, or NULL
2080 *
2081 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2082 * resolution (store-free path walking) design described in
2083 * Documentation/filesystems/path-lookup.txt.
2084 *
2085 * This is not to be used outside core vfs.
2086 *
2087 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2088 * held, and rcu_read_lock held. The returned dentry must not be stored into
2089 * without taking d_lock and checking d_seq sequence count against @seq
2090 * returned here.
2091 *
2092 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2093 * function.
2094 *
2095 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2096 * the returned dentry, so long as its parent's seqlock is checked after the
2097 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2098 * is formed, giving integrity down the path walk.
2099 *
2100 * NOTE! The caller *has* to check the resulting dentry against the sequence
2101 * number we've returned before using any of the resulting dentry state!
2102 */
2103struct dentry *__d_lookup_rcu(const struct dentry *parent,
2104                                const struct qstr *name,
2105                                unsigned *seqp)
2106{
2107        u64 hashlen = name->hash_len;
2108        const unsigned char *str = name->name;
2109        struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2110        struct hlist_bl_node *node;
2111        struct dentry *dentry;
2112
2113        /*
2114         * Note: There is significant duplication with __d_lookup_rcu which is
2115         * required to prevent single threaded performance regressions
2116         * especially on architectures where smp_rmb (in seqcounts) are costly.
2117         * Keep the two functions in sync.
2118         */
2119
2120        /*
2121         * The hash list is protected using RCU.
2122         *
2123         * Carefully use d_seq when comparing a candidate dentry, to avoid
2124         * races with d_move().
2125         *
2126         * It is possible that concurrent renames can mess up our list
2127         * walk here and result in missing our dentry, resulting in the
2128         * false-negative result. d_lookup() protects against concurrent
2129         * renames using rename_lock seqlock.
2130         *
2131         * See Documentation/filesystems/path-lookup.txt for more details.
2132         */
2133        hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2134                unsigned seq;
2135
2136seqretry:
2137                /*
2138                 * The dentry sequence count protects us from concurrent
2139                 * renames, and thus protects parent and name fields.
2140                 *
2141                 * The caller must perform a seqcount check in order
2142                 * to do anything useful with the returned dentry.
2143                 *
2144                 * NOTE! We do a "raw" seqcount_begin here. That means that
2145                 * we don't wait for the sequence count to stabilize if it
2146                 * is in the middle of a sequence change. If we do the slow
2147                 * dentry compare, we will do seqretries until it is stable,
2148                 * and if we end up with a successful lookup, we actually
2149                 * want to exit RCU lookup anyway.
2150                 */
2151                seq = raw_seqcount_begin(&dentry->d_seq);
2152                if (dentry->d_parent != parent)
2153                        continue;
2154                if (d_unhashed(dentry))
2155                        continue;
2156
2157                if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2158                        if (dentry->d_name.hash != hashlen_hash(hashlen))
2159                                continue;
2160                        *seqp = seq;
2161                        switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2162                        case D_COMP_OK:
2163                                return dentry;
2164                        case D_COMP_NOMATCH:
2165                                continue;
2166                        default:
2167                                goto seqretry;
2168                        }
2169                }
2170
2171                if (dentry->d_name.hash_len != hashlen)
2172                        continue;
2173                *seqp = seq;
2174                if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2175                        return dentry;
2176        }
2177        return NULL;
2178}
2179
2180/**
2181 * d_lookup - search for a dentry
2182 * @parent: parent dentry
2183 * @name: qstr of name we wish to find
2184 * Returns: dentry, or NULL
2185 *
2186 * d_lookup searches the children of the parent dentry for the name in
2187 * question. If the dentry is found its reference count is incremented and the
2188 * dentry is returned. The caller must use dput to free the entry when it has
2189 * finished using it. %NULL is returned if the dentry does not exist.
2190 */
2191struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2192{
2193        struct dentry *dentry;
2194        unsigned seq;
2195
2196        do {
2197                seq = read_seqbegin(&rename_lock);
2198                dentry = __d_lookup(parent, name);
2199                if (dentry)
2200                        break;
2201        } while (read_seqretry(&rename_lock, seq));
2202        return dentry;
2203}
2204EXPORT_SYMBOL(d_lookup);
2205
2206/**
2207 * __d_lookup - search for a dentry (racy)
2208 * @parent: parent dentry
2209 * @name: qstr of name we wish to find
2210 * Returns: dentry, or NULL
2211 *
2212 * __d_lookup is like d_lookup, however it may (rarely) return a
2213 * false-negative result due to unrelated rename activity.
2214 *
2215 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2216 * however it must be used carefully, eg. with a following d_lookup in
2217 * the case of failure.
2218 *
2219 * __d_lookup callers must be commented.
2220 */
2221struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2222{
2223        unsigned int len = name->len;
2224        unsigned int hash = name->hash;
2225        const unsigned char *str = name->name;
2226        struct hlist_bl_head *b = d_hash(parent, hash);
2227        struct hlist_bl_node *node;
2228        struct dentry *found = NULL;
2229        struct dentry *dentry;
2230
2231        /*
2232         * Note: There is significant duplication with __d_lookup_rcu which is
2233         * required to prevent single threaded performance regressions
2234         * especially on architectures where smp_rmb (in seqcounts) are costly.
2235         * Keep the two functions in sync.
2236         */
2237
2238        /*
2239         * The hash list is protected using RCU.
2240         *
2241         * Take d_lock when comparing a candidate dentry, to avoid races
2242         * with d_move().
2243         *
2244         * It is possible that concurrent renames can mess up our list
2245         * walk here and result in missing our dentry, resulting in the
2246         * false-negative result. d_lookup() protects against concurrent
2247         * renames using rename_lock seqlock.
2248         *
2249         * See Documentation/filesystems/path-lookup.txt for more details.
2250         */
2251        rcu_read_lock();
2252        
2253        hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2254
2255                if (dentry->d_name.hash != hash)
2256                        continue;
2257
2258                spin_lock(&dentry->d_lock);
2259                if (dentry->d_parent != parent)
2260                        goto next;
2261                if (d_unhashed(dentry))
2262                        goto next;
2263
2264                /*
2265                 * It is safe to compare names since d_move() cannot
2266                 * change the qstr (protected by d_lock).
2267                 */
2268                if (parent->d_flags & DCACHE_OP_COMPARE) {
2269                        int tlen = dentry->d_name.len;
2270                        const char *tname = dentry->d_name.name;
2271                        if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2272                                goto next;
2273                } else {
2274                        if (dentry->d_name.len != len)
2275                                goto next;
2276                        if (dentry_cmp(dentry, str, len))
2277                                goto next;
2278                }
2279
2280                dentry->d_lockref.count++;
2281                found = dentry;
2282                spin_unlock(&dentry->d_lock);
2283                break;
2284next:
2285                spin_unlock(&dentry->d_lock);
2286        }
2287        rcu_read_unlock();
2288
2289        return found;
2290}
2291
2292/**
2293 * d_hash_and_lookup - hash the qstr then search for a dentry
2294 * @dir: Directory to search in
2295 * @name: qstr of name we wish to find
2296 *
2297 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2298 */
2299struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2300{
2301        /*
2302         * Check for a fs-specific hash function. Note that we must
2303         * calculate the standard hash first, as the d_op->d_hash()
2304         * routine may choose to leave the hash value unchanged.
2305         */
2306        name->hash = full_name_hash(name->name, name->len);
2307        if (dir->d_flags & DCACHE_OP_HASH) {
2308                int err = dir->d_op->d_hash(dir, name);
2309                if (unlikely(err < 0))
2310                        return ERR_PTR(err);
2311        }
2312        return d_lookup(dir, name);
2313}
2314EXPORT_SYMBOL(d_hash_and_lookup);
2315
2316/**
2317 * d_validate - verify dentry provided from insecure source (deprecated)
2318 * @dentry: The dentry alleged to be valid child of @dparent
2319 * @dparent: The parent dentry (known to be valid)
2320 *
2321 * An insecure source has sent us a dentry, here we verify it and dget() it.
2322 * This is used by ncpfs in its readdir implementation.
2323 * Zero is returned in the dentry is invalid.
2324 *
2325 * This function is slow for big directories, and deprecated, do not use it.
2326 */
2327int d_validate(struct dentry *dentry, struct dentry *dparent)
2328{
2329        struct dentry *child;
2330
2331        spin_lock(&dparent->d_lock);
2332        list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2333                if (dentry == child) {
2334                        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2335                        __dget_dlock(dentry);
2336                        spin_unlock(&dentry->d_lock);
2337                        spin_unlock(&dparent->d_lock);
2338                        return 1;
2339                }
2340        }
2341        spin_unlock(&dparent->d_lock);
2342
2343        return 0;
2344}
2345EXPORT_SYMBOL(d_validate);
2346
2347/*
2348 * When a file is deleted, we have two options:
2349 * - turn this dentry into a negative dentry
2350 * - unhash this dentry and free it.
2351 *
2352 * Usually, we want to just turn this into
2353 * a negative dentry, but if anybody else is
2354 * currently using the dentry or the inode
2355 * we can't do that and we fall back on removing
2356 * it from the hash queues and waiting for
2357 * it to be deleted later when it has no users
2358 */
2359 
2360/**
2361 * d_delete - delete a dentry
2362 * @dentry: The dentry to delete
2363 *
2364 * Turn the dentry into a negative dentry if possible, otherwise
2365 * remove it from the hash queues so it can be deleted later
2366 */
2367 
2368void d_delete(struct dentry * dentry)
2369{
2370        struct inode *inode;
2371        int isdir = 0;
2372        /*
2373         * Are we the only user?
2374         */
2375again:
2376        spin_lock(&dentry->d_lock);
2377        inode = dentry->d_inode;
2378        isdir = S_ISDIR(inode->i_mode);
2379        if (dentry->d_lockref.count == 1) {
2380                if (!spin_trylock(&inode->i_lock)) {
2381                        spin_unlock(&dentry->d_lock);
2382                        cpu_relax();
2383                        goto again;
2384                }
2385                dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2386                dentry_unlink_inode(dentry);
2387                fsnotify_nameremove(dentry, isdir);
2388                return;
2389        }
2390
2391        if (!d_unhashed(dentry))
2392                __d_drop(dentry);
2393
2394        spin_unlock(&dentry->d_lock);
2395
2396        fsnotify_nameremove(dentry, isdir);
2397}
2398EXPORT_SYMBOL(d_delete);
2399
2400static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2401{
2402        BUG_ON(!d_unhashed(entry));
2403        hlist_bl_lock(b);
2404        entry->d_flags |= DCACHE_RCUACCESS;
2405        hlist_bl_add_head_rcu(&entry->d_hash, b);
2406        hlist_bl_unlock(b);
2407}
2408
2409static void _d_rehash(struct dentry * entry)
2410{
2411        __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2412}
2413
2414/**
2415 * d_rehash     - add an entry back to the hash
2416 * @entry: dentry to add to the hash
2417 *
2418 * Adds a dentry to the hash according to its name.
2419 */
2420 
2421void d_rehash(struct dentry * entry)
2422{
2423        spin_lock(&entry->d_lock);
2424        _d_rehash(entry);
2425        spin_unlock(&entry->d_lock);
2426}
2427EXPORT_SYMBOL(d_rehash);
2428
2429/**
2430 * dentry_update_name_case - update case insensitive dentry with a new name
2431 * @dentry: dentry to be updated
2432 * @name: new name
2433 *
2434 * Update a case insensitive dentry with new case of name.
2435 *
2436 * dentry must have been returned by d_lookup with name @name. Old and new
2437 * name lengths must match (ie. no d_compare which allows mismatched name
2438 * lengths).
2439 *
2440 * Parent inode i_mutex must be held over d_lookup and into this call (to
2441 * keep renames and concurrent inserts, and readdir(2) away).
2442 */
2443void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2444{
2445        BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2446        BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2447
2448        spin_lock(&dentry->d_lock);
2449        write_seqcount_begin(&dentry->d_seq);
2450        memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2451        write_seqcount_end(&dentry->d_seq);
2452        spin_unlock(&dentry->d_lock);
2453}
2454EXPORT_SYMBOL(dentry_update_name_case);
2455
2456static void switch_names(struct dentry *dentry, struct dentry *target)
2457{
2458        if (dname_external(target)) {
2459                if (dname_external(dentry)) {
2460                        /*
2461                         * Both external: swap the pointers
2462                         */
2463                        swap(target->d_name.name, dentry->d_name.name);
2464                } else {
2465                        /*
2466                         * dentry:internal, target:external.  Steal target's
2467                         * storage and make target internal.
2468                         */
2469                        memcpy(target->d_iname, dentry->d_name.name,
2470                                        dentry->d_name.len + 1);
2471                        dentry->d_name.name = target->d_name.name;
2472                        target->d_name.name = target->d_iname;
2473                }
2474        } else {
2475                if (dname_external(dentry)) {
2476                        /*
2477                         * dentry:external, target:internal.  Give dentry's
2478                         * storage to target and make dentry internal
2479                         */
2480                        memcpy(dentry->d_iname, target->d_name.name,
2481                                        target->d_name.len + 1);
2482                        target->d_name.name = dentry->d_name.name;
2483                        dentry->d_name.name = dentry->d_iname;
2484                } else {
2485                        /*
2486                         * Both are internal.  Just copy target to dentry
2487                         */
2488                        memcpy(dentry->d_iname, target->d_name.name,
2489                                        target->d_name.len + 1);
2490                        dentry->d_name.len = target->d_name.len;
2491                        return;
2492                }
2493        }
2494        swap(dentry->d_name.len, target->d_name.len);
2495}
2496
2497static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2498{
2499        /*
2500         * XXXX: do we really need to take target->d_lock?
2501         */
2502        if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2503                spin_lock(&target->d_parent->d_lock);
2504        else {
2505                if (d_ancestor(dentry->d_parent, target->d_parent)) {
2506                        spin_lock(&dentry->d_parent->d_lock);
2507                        spin_lock_nested(&target->d_parent->d_lock,
2508                                                DENTRY_D_LOCK_NESTED);
2509                } else {
2510                        spin_lock(&target->d_parent->d_lock);
2511                        spin_lock_nested(&dentry->d_parent->d_lock,
2512                                                DENTRY_D_LOCK_NESTED);
2513                }
2514        }
2515        if (target < dentry) {
2516                spin_lock_nested(&target->d_lock, 2);
2517                spin_lock_nested(&dentry->d_lock, 3);
2518        } else {
2519                spin_lock_nested(&dentry->d_lock, 2);
2520                spin_lock_nested(&target->d_lock, 3);
2521        }
2522}
2523
2524static void dentry_unlock_parents_for_move(struct dentry *dentry,
2525                                        struct dentry *target)
2526{
2527        if (target->d_parent != dentry->d_parent)
2528                spin_unlock(&dentry->d_parent->d_lock);
2529        if (target->d_parent != target)
2530                spin_unlock(&target->d_parent->d_lock);
2531}
2532
2533/*
2534 * When switching names, the actual string doesn't strictly have to
2535 * be preserved in the target - because we're dropping the target
2536 * anyway. As such, we can just do a simple memcpy() to copy over
2537 * the new name before we switch.
2538 *
2539 * Note that we have to be a lot more careful about getting the hash
2540 * switched - we have to switch the hash value properly even if it
2541 * then no longer matches the actual (corrupted) string of the target.
2542 * The hash value has to match the hash queue that the dentry is on..
2543 */
2544/*
2545 * __d_move - move a dentry
2546 * @dentry: entry to move
2547 * @target: new dentry
2548 *
2549 * Update the dcache to reflect the move of a file name. Negative
2550 * dcache entries should not be moved in this way. Caller must hold
2551 * rename_lock, the i_mutex of the source and target directories,
2552 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2553 */
2554static void __d_move(struct dentry * dentry, struct dentry * target)
2555{
2556        if (!dentry->d_inode)
2557                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2558
2559        BUG_ON(d_ancestor(dentry, target));
2560        BUG_ON(d_ancestor(target, dentry));
2561
2562        dentry_lock_for_move(dentry, target);
2563
2564        write_seqcount_begin(&dentry->d_seq);
2565        write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2566
2567        /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2568
2569        /*
2570         * Move the dentry to the target hash queue. Don't bother checking
2571         * for the same hash queue because of how unlikely it is.
2572         */
2573        __d_drop(dentry);
2574        __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2575
2576        /* Unhash the target: dput() will then get rid of it */
2577        __d_drop(target);
2578
2579        list_del(&dentry->d_u.d_child);
2580        list_del(&target->d_u.d_child);
2581
2582        /* Switch the names.. */
2583        switch_names(dentry, target);
2584        swap(dentry->d_name.hash, target->d_name.hash);
2585
2586        /* ... and switch the parents */
2587        if (IS_ROOT(dentry)) {
2588                dentry->d_parent = target->d_parent;
2589                target->d_parent = target;
2590                INIT_LIST_HEAD(&target->d_u.d_child);
2591        } else {
2592                swap(dentry->d_parent, target->d_parent);
2593
2594                /* And add them back to the (new) parent lists */
2595                list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2596        }
2597
2598        list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2599
2600        write_seqcount_end(&target->d_seq);
2601        write_seqcount_end(&dentry->d_seq);
2602
2603        dentry_unlock_parents_for_move(dentry, target);
2604        spin_unlock(&target->d_lock);
2605        fsnotify_d_move(dentry);
2606        spin_unlock(&dentry->d_lock);
2607}
2608
2609/*
2610 * d_move - move a dentry
2611 * @dentry: entry to move
2612 * @target: new dentry
2613 *
2614 * Update the dcache to reflect the move of a file name. Negative
2615 * dcache entries should not be moved in this way. See the locking
2616 * requirements for __d_move.
2617 */
2618void d_move(struct dentry *dentry, struct dentry *target)
2619{
2620        write_seqlock(&rename_lock);
2621        __d_move(dentry, target);
2622        write_sequnlock(&rename_lock);
2623}
2624EXPORT_SYMBOL(d_move);
2625
2626/**
2627 * d_ancestor - search for an ancestor
2628 * @p1: ancestor dentry
2629 * @p2: child dentry
2630 *
2631 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2632 * an ancestor of p2, else NULL.
2633 */
2634struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2635{
2636        struct dentry *p;
2637
2638        for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2639                if (p->d_parent == p1)
2640                        return p;
2641        }
2642        return NULL;
2643}
2644
2645/*
2646 * This helper attempts to cope with remotely renamed directories
2647 *
2648 * It assumes that the caller is already holding
2649 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2650 *
2651 * Note: If ever the locking in lock_rename() changes, then please
2652 * remember to update this too...
2653 */
2654static struct dentry *__d_unalias(struct inode *inode,
2655                struct dentry *dentry, struct dentry *alias)
2656{
2657        struct mutex *m1 = NULL, *m2 = NULL;
2658        struct dentry *ret = ERR_PTR(-EBUSY);
2659
2660        /* If alias and dentry share a parent, then no extra locks required */
2661        if (alias->d_parent == dentry->d_parent)
2662                goto out_unalias;
2663
2664        /* See lock_rename() */
2665        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2666                goto out_err;
2667        m1 = &dentry->d_sb->s_vfs_rename_mutex;
2668        if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2669                goto out_err;
2670        m2 = &alias->d_parent->d_inode->i_mutex;
2671out_unalias:
2672        if (likely(!d_mountpoint(alias))) {
2673                __d_move(alias, dentry);
2674                ret = alias;
2675        }
2676out_err:
2677        spin_unlock(&inode->i_lock);
2678        if (m2)
2679                mutex_unlock(m2);
2680        if (m1)
2681                mutex_unlock(m1);
2682        return ret;
2683}
2684
2685/*
2686 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2687 * named dentry in place of the dentry to be replaced.
2688 * returns with anon->d_lock held!
2689 */
2690static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2691{
2692        struct dentry *dparent;
2693
2694        dentry_lock_for_move(anon, dentry);
2695
2696        write_seqcount_begin(&dentry->d_seq);
2697        write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2698
2699        dparent = dentry->d_parent;
2700
2701        switch_names(dentry, anon);
2702        swap(dentry->d_name.hash, anon->d_name.hash);
2703
2704        dentry->d_parent = dentry;
2705        list_del_init(&dentry->d_u.d_child);
2706        anon->d_parent = dparent;
2707        list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2708
2709        write_seqcount_end(&dentry->d_seq);
2710        write_seqcount_end(&anon->d_seq);
2711
2712        dentry_unlock_parents_for_move(anon, dentry);
2713        spin_unlock(&dentry->d_lock);
2714
2715        /* anon->d_lock still locked, returns locked */
2716}
2717
2718/**
2719 * d_materialise_unique - introduce an inode into the tree
2720 * @dentry: candidate dentry
2721 * @inode: inode to bind to the dentry, to which aliases may be attached
2722 *
2723 * Introduces an dentry into the tree, substituting an extant disconnected
2724 * root directory alias in its place if there is one. Caller must hold the
2725 * i_mutex of the parent directory.
2726 */
2727struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2728{
2729        struct dentry *actual;
2730
2731        BUG_ON(!d_unhashed(dentry));
2732
2733        if (!inode) {
2734                actual = dentry;
2735                __d_instantiate(dentry, NULL);
2736                d_rehash(actual);
2737                goto out_nolock;
2738        }
2739
2740        spin_lock(&inode->i_lock);
2741
2742        if (S_ISDIR(inode->i_mode)) {
2743                struct dentry *alias;
2744
2745                /* Does an aliased dentry already exist? */
2746                alias = __d_find_alias(inode, 0);
2747                if (alias) {
2748                        actual = alias;
2749                        write_seqlock(&rename_lock);
2750
2751                        if (d_ancestor(alias, dentry)) {
2752                                /* Check for loops */
2753                                actual = ERR_PTR(-ELOOP);
2754                                spin_unlock(&inode->i_lock);
2755                        } else if (IS_ROOT(alias)) {
2756                                /* Is this an anonymous mountpoint that we
2757                                 * could splice into our tree? */
2758                                __d_materialise_dentry(dentry, alias);
2759                                write_sequnlock(&rename_lock);
2760                                __d_drop(alias);
2761                                goto found;
2762                        } else {
2763                                /* Nope, but we must(!) avoid directory
2764                                 * aliasing. This drops inode->i_lock */
2765                                actual = __d_unalias(inode, dentry, alias);
2766                        }
2767                        write_sequnlock(&rename_lock);
2768                        if (IS_ERR(actual)) {
2769                                if (PTR_ERR(actual) == -ELOOP)
2770                                        pr_warn_ratelimited(
2771                                                "VFS: Lookup of '%s' in %s %s"
2772                                                " would have caused loop\n",
2773                                                dentry->d_name.name,
2774                                                inode->i_sb->s_type->name,
2775                                                inode->i_sb->s_id);
2776                                dput(alias);
2777                        }
2778                        goto out_nolock;
2779                }
2780        }
2781
2782        /* Add a unique reference */
2783        actual = __d_instantiate_unique(dentry, inode);
2784        if (!actual)
2785                actual = dentry;
2786        else
2787                BUG_ON(!d_unhashed(actual));
2788
2789        spin_lock(&actual->d_lock);
2790found:
2791        _d_rehash(actual);
2792        spin_unlock(&actual->d_lock);
2793        spin_unlock(&inode->i_lock);
2794out_nolock:
2795        if (actual == dentry) {
2796                security_d_instantiate(dentry, inode);
2797                return NULL;
2798        }
2799
2800        iput(inode);
2801        return actual;
2802}
2803EXPORT_SYMBOL_GPL(d_materialise_unique);
2804
2805static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2806{
2807        *buflen -= namelen;
2808        if (*buflen < 0)
2809                return -ENAMETOOLONG;
2810        *buffer -= namelen;
2811        memcpy(*buffer, str, namelen);
2812        return 0;
2813}
2814
2815/**
2816 * prepend_name - prepend a pathname in front of current buffer pointer
2817 * @buffer: buffer pointer
2818 * @buflen: allocated length of the buffer
2819 * @name:   name string and length qstr structure
2820 *
2821 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2822 * make sure that either the old or the new name pointer and length are
2823 * fetched. However, there may be mismatch between length and pointer.
2824 * The length cannot be trusted, we need to copy it byte-by-byte until
2825 * the length is reached or a null byte is found. It also prepends "/" at
2826 * the beginning of the name. The sequence number check at the caller will
2827 * retry it again when a d_move() does happen. So any garbage in the buffer
2828 * due to mismatched pointer and length will be discarded.
2829 */
2830static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2831{
2832        const char *dname = ACCESS_ONCE(name->name);
2833        u32 dlen = ACCESS_ONCE(name->len);
2834        char *p;
2835
2836        if (*buflen < dlen + 1)
2837                return -ENAMETOOLONG;
2838        *buflen -= dlen + 1;
2839        p = *buffer -= dlen + 1;
2840        *p++ = '/';
2841        while (dlen--) {
2842                char c = *dname++;
2843                if (!c)
2844                        break;
2845                *p++ = c;
2846        }
2847        return 0;
2848}
2849
2850/**
2851 * prepend_path - Prepend path string to a buffer
2852 * @path: the dentry/vfsmount to report
2853 * @root: root vfsmnt/dentry
2854 * @buffer: pointer to the end of the buffer
2855 * @buflen: pointer to buffer length
2856 *
2857 * The function will first try to write out the pathname without taking any
2858 * lock other than the RCU read lock to make sure that dentries won't go away.
2859 * It only checks the sequence number of the global rename_lock as any change
2860 * in the dentry's d_seq will be preceded by changes in the rename_lock
2861 * sequence number. If the sequence number had been changed, it will restart
2862 * the whole pathname back-tracing sequence again by taking the rename_lock.
2863 * In this case, there is no need to take the RCU read lock as the recursive
2864 * parent pointer references will keep the dentry chain alive as long as no
2865 * rename operation is performed.
2866 */
2867static int prepend_path(const struct path *path,
2868                        const struct path *root,
2869                        char **buffer, int *buflen)
2870{
2871        struct dentry *dentry;
2872        struct vfsmount *vfsmnt;
2873        struct mount *mnt;
2874        int error = 0;
2875        unsigned seq, m_seq = 0;
2876        char *bptr;
2877        int blen;
2878
2879        rcu_read_lock();
2880restart_mnt:
2881        read_seqbegin_or_lock(&mount_lock, &m_seq);
2882        seq = 0;
2883        rcu_read_lock();
2884restart:
2885        bptr = *buffer;
2886        blen = *buflen;
2887        error = 0;
2888        dentry = path->dentry;
2889        vfsmnt = path->mnt;
2890        mnt = real_mount(vfsmnt);
2891        read_seqbegin_or_lock(&rename_lock, &seq);
2892        while (dentry != root->dentry || vfsmnt != root->mnt) {
2893                struct dentry * parent;
2894
2895                if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2896                        struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2897                        /* Global root? */
2898                        if (mnt != parent) {
2899                                dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2900                                mnt = parent;
2901                                vfsmnt = &mnt->mnt;
2902                                continue;
2903                        }
2904                        /*
2905                         * Filesystems needing to implement special "root names"
2906                         * should do so with ->d_dname()
2907                         */
2908                        if (IS_ROOT(dentry) &&
2909                           (dentry->d_name.len != 1 ||
2910                            dentry->d_name.name[0] != '/')) {
2911                                WARN(1, "Root dentry has weird name <%.*s>\n",
2912                                     (int) dentry->d_name.len,
2913                                     dentry->d_name.name);
2914                        }
2915                        if (!error)
2916                                error = is_mounted(vfsmnt) ? 1 : 2;
2917                        break;
2918                }
2919                parent = dentry->d_parent;
2920                prefetch(parent);
2921                error = prepend_name(&bptr, &blen, &dentry->d_name);
2922                if (error)
2923                        break;
2924
2925                dentry = parent;
2926        }
2927        if (!(seq & 1))
2928                rcu_read_unlock();
2929        if (need_seqretry(&rename_lock, seq)) {
2930                seq = 1;
2931                goto restart;
2932        }
2933        done_seqretry(&rename_lock, seq);
2934
2935        if (!(m_seq & 1))
2936                rcu_read_unlock();
2937        if (need_seqretry(&mount_lock, m_seq)) {
2938                m_seq = 1;
2939                goto restart_mnt;
2940        }
2941        done_seqretry(&mount_lock, m_seq);
2942
2943        if (error >= 0 && bptr == *buffer) {
2944                if (--blen < 0)
2945                        error = -ENAMETOOLONG;
2946                else
2947                        *--bptr = '/';
2948        }
2949        *buffer = bptr;
2950        *buflen = blen;
2951        return error;
2952}
2953
2954/**
2955 * __d_path - return the path of a dentry
2956 * @path: the dentry/vfsmount to report
2957 * @root: root vfsmnt/dentry
2958 * @buf: buffer to return value in
2959 * @buflen: buffer length
2960 *
2961 * Convert a dentry into an ASCII path name.
2962 *
2963 * Returns a pointer into the buffer or an error code if the
2964 * path was too long.
2965 *
2966 * "buflen" should be positive.
2967 *
2968 * If the path is not reachable from the supplied root, return %NULL.
2969 */
2970char *__d_path(const struct path *path,
2971               const struct path *root,
2972               char *buf, int buflen)
2973{
2974        char *res = buf + buflen;
2975        int error;
2976
2977        prepend(&res, &buflen, "\0", 1);
2978        error = prepend_path(path, root, &res, &buflen);
2979
2980        if (error < 0)
2981                return ERR_PTR(error);
2982        if (error > 0)
2983                return NULL;
2984        return res;
2985}
2986
2987char *d_absolute_path(const struct path *path,
2988               char *buf, int buflen)
2989{
2990        struct path root = {};
2991        char *res = buf + buflen;
2992        int error;
2993
2994        prepend(&res, &buflen, "\0", 1);
2995        error = prepend_path(path, &root, &res, &buflen);
2996
2997        if (error > 1)
2998                error = -EINVAL;
2999        if (error < 0)
3000                return ERR_PTR(error);
3001        return res;
3002}
3003
3004/*
3005 * same as __d_path but appends "(deleted)" for unlinked files.
3006 */
3007static int path_with_deleted(const struct path *path,
3008                             const struct path *root,
3009                             char **buf, int *buflen)
3010{
3011        prepend(buf, buflen, "\0", 1);
3012        if (d_unlinked(path->dentry)) {
3013                int error = prepend(buf, buflen, " (deleted)", 10);
3014                if (error)
3015                        return error;
3016        }
3017
3018        return prepend_path(path, root, buf, buflen);
3019}
3020
3021static int prepend_unreachable(char **buffer, int *buflen)
3022{
3023        return prepend(buffer, buflen, "(unreachable)", 13);
3024}
3025
3026static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3027{
3028        unsigned seq;
3029
3030        do {
3031                seq = read_seqcount_begin(&fs->seq);
3032                *root = fs->root;
3033        } while (read_seqcount_retry(&fs->seq, seq));
3034}
3035
3036/**
3037 * d_path - return the path of a dentry
3038 * @path: path to report
3039 * @buf: buffer to return value in
3040 * @buflen: buffer length
3041 *
3042 * Convert a dentry into an ASCII path name. If the entry has been deleted
3043 * the string " (deleted)" is appended. Note that this is ambiguous.
3044 *
3045 * Returns a pointer into the buffer or an error code if the path was
3046 * too long. Note: Callers should use the returned pointer, not the passed
3047 * in buffer, to use the name! The implementation often starts at an offset
3048 * into the buffer, and may leave 0 bytes at the start.
3049 *
3050 * "buflen" should be positive.
3051 */
3052char *d_path(const struct path *path, char *buf, int buflen)
3053{
3054        char *res = buf + buflen;
3055        struct path root;
3056        int error;
3057
3058        /*
3059         * We have various synthetic filesystems that never get mounted.  On
3060         * these filesystems dentries are never used for lookup purposes, and
3061         * thus don't need to be hashed.  They also don't need a name until a
3062         * user wants to identify the object in /proc/pid/fd/.  The little hack
3063         * below allows us to generate a name for these objects on demand:
3064         *
3065         * Some pseudo inodes are mountable.  When they are mounted
3066         * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3067         * and instead have d_path return the mounted path.
3068         */
3069        if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3070            (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3071                return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3072
3073        rcu_read_lock();
3074        get_fs_root_rcu(current->fs, &root);
3075        error = path_with_deleted(path, &root, &res, &buflen);
3076        rcu_read_unlock();
3077
3078        if (error < 0)
3079                res = ERR_PTR(error);
3080        return res;
3081}
3082EXPORT_SYMBOL(d_path);
3083
3084/*
3085 * Helper function for dentry_operations.d_dname() members
3086 */
3087char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3088                        const char *fmt, ...)
3089{
3090        va_list args;
3091        char temp[64];
3092        int sz;
3093
3094        va_start(args, fmt);
3095        sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3096        va_end(args);
3097
3098        if (sz > sizeof(temp) || sz > buflen)
3099                return ERR_PTR(-ENAMETOOLONG);
3100
3101        buffer += buflen - sz;
3102        return memcpy(buffer, temp, sz);
3103}
3104
3105char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3106{
3107        char *end = buffer + buflen;
3108        /* these dentries are never renamed, so d_lock is not needed */
3109        if (prepend(&end, &buflen, " (deleted)", 11) ||
3110            prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3111            prepend(&end, &buflen, "/", 1))  
3112                end = ERR_PTR(-ENAMETOOLONG);
3113        return end;
3114}
3115
3116/*
3117 * Write full pathname from the root of the filesystem into the buffer.
3118 */
3119static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
3120{
3121        char *end, *retval;
3122        int len, seq = 0;
3123        int error = 0;
3124
3125        rcu_read_lock();
3126restart:
3127        end = buf + buflen;
3128        len = buflen;
3129        prepend(&end, &len, "\0", 1);
3130        if (buflen < 1)
3131                goto Elong;
3132        /* Get '/' right */
3133        retval = end-1;
3134        *retval = '/';
3135        read_seqbegin_or_lock(&rename_lock, &seq);
3136        while (!IS_ROOT(dentry)) {
3137                struct dentry *parent = dentry->d_parent;
3138
3139                prefetch(parent);
3140                error = prepend_name(&end, &len, &dentry->d_name);
3141                if (error)
3142                        break;
3143
3144                retval = end;
3145                dentry = parent;
3146        }
3147        if (!(seq & 1))
3148                rcu_read_unlock();
3149        if (need_seqretry(&rename_lock, seq)) {
3150                seq = 1;
3151                goto restart;
3152        }
3153        done_seqretry(&rename_lock, seq);
3154        if (error)
3155                goto Elong;
3156        return retval;
3157Elong:
3158        return ERR_PTR(-ENAMETOOLONG);
3159}
3160
3161char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3162{
3163        return __dentry_path(dentry, buf, buflen);
3164}
3165EXPORT_SYMBOL(dentry_path_raw);
3166
3167char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3168{
3169        char *p = NULL;
3170        char *retval;
3171
3172        if (d_unlinked(dentry)) {
3173                p = buf + buflen;
3174                if (prepend(&p, &buflen, "//deleted", 10) != 0)
3175                        goto Elong;
3176                buflen++;
3177        }
3178        retval = __dentry_path(dentry, buf, buflen);
3179        if (!IS_ERR(retval) && p)
3180                *p = '/';       /* restore '/' overriden with '\0' */
3181        return retval;
3182Elong:
3183        return ERR_PTR(-ENAMETOOLONG);
3184}
3185
3186static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3187                                    struct path *pwd)
3188{
3189        unsigned seq;
3190
3191        do {
3192                seq = read_seqcount_begin(&fs->seq);
3193                *root = fs->root;
3194                *pwd = fs->pwd;
3195        } while (read_seqcount_retry(&fs->seq, seq));
3196}
3197
3198/*
3199 * NOTE! The user-level library version returns a
3200 * character pointer. The kernel system call just
3201 * returns the length of the buffer filled (which
3202 * includes the ending '\0' character), or a negative
3203 * error value. So libc would do something like
3204 *
3205 *      char *getcwd(char * buf, size_t size)
3206 *      {
3207 *              int retval;
3208 *
3209 *              retval = sys_getcwd(buf, size);
3210 *              if (retval >= 0)
3211 *                      return buf;
3212 *              errno = -retval;
3213 *              return NULL;
3214 *      }
3215 */
3216SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3217{
3218        int error;
3219        struct path pwd, root;
3220        char *page = __getname();
3221
3222        if (!page)
3223                return -ENOMEM;
3224
3225        rcu_read_lock();
3226        get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3227
3228        error = -ENOENT;
3229        if (!d_unlinked(pwd.dentry)) {
3230                unsigned long len;
3231                char *cwd = page + PATH_MAX;
3232                int buflen = PATH_MAX;
3233
3234                prepend(&cwd, &buflen, "\0", 1);
3235                error = prepend_path(&pwd, &root, &cwd, &buflen);
3236                rcu_read_unlock();
3237
3238                if (error < 0)
3239                        goto out;
3240
3241                /* Unreachable from current root */
3242                if (error > 0) {
3243                        error = prepend_unreachable(&cwd, &buflen);
3244                        if (error)
3245                                goto out;
3246                }
3247
3248                error = -ERANGE;
3249                len = PATH_MAX + page - cwd;
3250                if (len <= size) {
3251                        error = len;
3252                        if (copy_to_user(buf, cwd, len))
3253                                error = -EFAULT;
3254                }
3255        } else {
3256                rcu_read_unlock();
3257        }
3258
3259out:
3260        __putname(page);
3261        return error;
3262}
3263
3264/*
3265 * Test whether new_dentry is a subdirectory of old_dentry.
3266 *
3267 * Trivially implemented using the dcache structure
3268 */
3269
3270/**
3271 * is_subdir - is new dentry a subdirectory of old_dentry
3272 * @new_dentry: new dentry
3273 * @old_dentry: old dentry
3274 *
3275 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3276 * Returns 0 otherwise.
3277 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3278 */
3279  
3280int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3281{
3282        int result;
3283        unsigned seq;
3284
3285        if (new_dentry == old_dentry)
3286                return 1;
3287
3288        do {
3289                /* for restarting inner loop in case of seq retry */
3290                seq = read_seqbegin(&rename_lock);
3291                /*
3292                 * Need rcu_readlock to protect against the d_parent trashing
3293                 * due to d_move
3294                 */
3295                rcu_read_lock();
3296                if (d_ancestor(old_dentry, new_dentry))
3297                        result = 1;
3298                else
3299                        result = 0;
3300                rcu_read_unlock();
3301        } while (read_seqretry(&rename_lock, seq));
3302
3303        return result;
3304}
3305
3306static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3307{
3308        struct dentry *root = data;
3309        if (dentry != root) {
3310                if (d_unhashed(dentry) || !dentry->d_inode)
3311                        return D_WALK_SKIP;
3312
3313                if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3314                        dentry->d_flags |= DCACHE_GENOCIDE;
3315                        dentry->d_lockref.count--;
3316                }
3317        }
3318        return D_WALK_CONTINUE;
3319}
3320
3321void d_genocide(struct dentry *parent)
3322{
3323        d_walk(parent, parent, d_genocide_kill, NULL);
3324}
3325
3326void d_tmpfile(struct dentry *dentry, struct inode *inode)
3327{
3328        inode_dec_link_count(inode);
3329        BUG_ON(dentry->d_name.name != dentry->d_iname ||
3330                !hlist_unhashed(&dentry->d_alias) ||
3331                !d_unlinked(dentry));
3332        spin_lock(&dentry->d_parent->d_lock);
3333        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3334        dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3335                                (unsigned long long)inode->i_ino);
3336        spin_unlock(&dentry->d_lock);
3337        spin_unlock(&dentry->d_parent->d_lock);
3338        d_instantiate(dentry, inode);
3339}
3340EXPORT_SYMBOL(d_tmpfile);
3341
3342static __initdata unsigned long dhash_entries;
3343static int __init set_dhash_entries(char *str)
3344{
3345        if (!str)
3346                return 0;
3347        dhash_entries = simple_strtoul(str, &str, 0);
3348        return 1;
3349}
3350__setup("dhash_entries=", set_dhash_entries);
3351
3352static void __init dcache_init_early(void)
3353{
3354        unsigned int loop;
3355
3356        /* If hashes are distributed across NUMA nodes, defer
3357         * hash allocation until vmalloc space is available.
3358         */
3359        if (hashdist)
3360                return;
3361
3362        dentry_hashtable =
3363                alloc_large_system_hash("Dentry cache",
3364                                        sizeof(struct hlist_bl_head),
3365                                        dhash_entries,
3366                                        13,
3367                                        HASH_EARLY,
3368                                        &d_hash_shift,
3369                                        &d_hash_mask,
3370                                        0,
3371                                        0);
3372
3373        for (loop = 0; loop < (1U << d_hash_shift); loop++)
3374                INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3375}
3376
3377static void __init dcache_init(void)
3378{
3379        unsigned int loop;
3380
3381        /* 
3382         * A constructor could be added for stable state like the lists,
3383         * but it is probably not worth it because of the cache nature
3384         * of the dcache. 
3385         */
3386        dentry_cache = KMEM_CACHE(dentry,
3387                SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3388
3389        /* Hash may have been set up in dcache_init_early */
3390        if (!hashdist)
3391                return;
3392
3393        dentry_hashtable =
3394                alloc_large_system_hash("Dentry cache",
3395                                        sizeof(struct hlist_bl_head),
3396                                        dhash_entries,
3397                                        13,
3398                                        0,
3399                                        &d_hash_shift,
3400                                        &d_hash_mask,
3401                                        0,
3402                                        0);
3403
3404        for (loop = 0; loop < (1U << d_hash_shift); loop++)
3405                INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3406}
3407
3408/* SLAB cache for __getname() consumers */
3409struct kmem_cache *names_cachep __read_mostly;
3410EXPORT_SYMBOL(names_cachep);
3411
3412EXPORT_SYMBOL(d_genocide);
3413
3414void __init vfs_caches_init_early(void)
3415{
3416        dcache_init_early();
3417        inode_init_early();
3418}
3419
3420void __init vfs_caches_init(unsigned long mempages)
3421{
3422        unsigned long reserve;
3423
3424        /* Base hash sizes on available memory, with a reserve equal to
3425           150% of current kernel size */
3426
3427        reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3428        mempages -= reserve;
3429
3430        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3431                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3432
3433        dcache_init();
3434        inode_init();
3435        files_init(mempages);
3436        mnt_init();
3437        bdev_cache_init();
3438        chrdev_init();
3439}
3440