linux/fs/dcache.c
<<
>>
Prefs
   1/*
   2 * fs/dcache.c
   3 *
   4 * Complete reimplementation
   5 * (C) 1997 Thomas Schoebel-Theuer,
   6 * with heavy changes by Linus Torvalds
   7 */
   8
   9/*
  10 * Notes on the allocation strategy:
  11 *
  12 * The dcache is a master of the icache - whenever a dcache entry
  13 * exists, the inode will always exist. "iput()" is done either when
  14 * the dcache entry is deleted or garbage collected.
  15 */
  16
  17#include <linux/syscalls.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/fdtable.h>
  21#include <linux/fs.h>
  22#include <linux/fsnotify.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/cache.h>
  27#include <linux/module.h>
  28#include <linux/mount.h>
  29#include <linux/file.h>
  30#include <asm/uaccess.h>
  31#include <linux/security.h>
  32#include <linux/seqlock.h>
  33#include <linux/swap.h>
  34#include <linux/bootmem.h>
  35#include "internal.h"
  36
  37
  38int sysctl_vfs_cache_pressure __read_mostly = 100;
  39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  40
  41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
  42__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  43
  44EXPORT_SYMBOL(dcache_lock);
  45
  46static struct kmem_cache *dentry_cache __read_mostly;
  47
  48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
  49
  50/*
  51 * This is the single most critical data structure when it comes
  52 * to the dcache: the hashtable for lookups. Somebody should try
  53 * to make this good - I've just made it work.
  54 *
  55 * This hash-function tries to avoid losing too many bits of hash
  56 * information, yet avoid using a prime hash-size or similar.
  57 */
  58#define D_HASHBITS     d_hash_shift
  59#define D_HASHMASK     d_hash_mask
  60
  61static unsigned int d_hash_mask __read_mostly;
  62static unsigned int d_hash_shift __read_mostly;
  63static struct hlist_head *dentry_hashtable __read_mostly;
  64
  65/* Statistics gathering. */
  66struct dentry_stat_t dentry_stat = {
  67        .age_limit = 45,
  68};
  69
  70static void __d_free(struct dentry *dentry)
  71{
  72        WARN_ON(!list_empty(&dentry->d_alias));
  73        if (dname_external(dentry))
  74                kfree(dentry->d_name.name);
  75        kmem_cache_free(dentry_cache, dentry); 
  76}
  77
  78static void d_callback(struct rcu_head *head)
  79{
  80        struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
  81        __d_free(dentry);
  82}
  83
  84/*
  85 * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  86 * inside dcache_lock.
  87 */
  88static void d_free(struct dentry *dentry)
  89{
  90        if (dentry->d_op && dentry->d_op->d_release)
  91                dentry->d_op->d_release(dentry);
  92        /* if dentry was never inserted into hash, immediate free is OK */
  93        if (hlist_unhashed(&dentry->d_hash))
  94                __d_free(dentry);
  95        else
  96                call_rcu(&dentry->d_u.d_rcu, d_callback);
  97}
  98
  99/*
 100 * Release the dentry's inode, using the filesystem
 101 * d_iput() operation if defined.
 102 */
 103static void dentry_iput(struct dentry * dentry)
 104        __releases(dentry->d_lock)
 105        __releases(dcache_lock)
 106{
 107        struct inode *inode = dentry->d_inode;
 108        if (inode) {
 109                dentry->d_inode = NULL;
 110                list_del_init(&dentry->d_alias);
 111                spin_unlock(&dentry->d_lock);
 112                spin_unlock(&dcache_lock);
 113                if (!inode->i_nlink)
 114                        fsnotify_inoderemove(inode);
 115                if (dentry->d_op && dentry->d_op->d_iput)
 116                        dentry->d_op->d_iput(dentry, inode);
 117                else
 118                        iput(inode);
 119        } else {
 120                spin_unlock(&dentry->d_lock);
 121                spin_unlock(&dcache_lock);
 122        }
 123}
 124
 125/*
 126 * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held.
 127 */
 128static void dentry_lru_add(struct dentry *dentry)
 129{
 130        list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 131        dentry->d_sb->s_nr_dentry_unused++;
 132        dentry_stat.nr_unused++;
 133}
 134
 135static void dentry_lru_add_tail(struct dentry *dentry)
 136{
 137        list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 138        dentry->d_sb->s_nr_dentry_unused++;
 139        dentry_stat.nr_unused++;
 140}
 141
 142static void dentry_lru_del(struct dentry *dentry)
 143{
 144        if (!list_empty(&dentry->d_lru)) {
 145                list_del(&dentry->d_lru);
 146                dentry->d_sb->s_nr_dentry_unused--;
 147                dentry_stat.nr_unused--;
 148        }
 149}
 150
 151static void dentry_lru_del_init(struct dentry *dentry)
 152{
 153        if (likely(!list_empty(&dentry->d_lru))) {
 154                list_del_init(&dentry->d_lru);
 155                dentry->d_sb->s_nr_dentry_unused--;
 156                dentry_stat.nr_unused--;
 157        }
 158}
 159
 160/**
 161 * d_kill - kill dentry and return parent
 162 * @dentry: dentry to kill
 163 *
 164 * The dentry must already be unhashed and removed from the LRU.
 165 *
 166 * If this is the root of the dentry tree, return NULL.
 167 */
 168static struct dentry *d_kill(struct dentry *dentry)
 169        __releases(dentry->d_lock)
 170        __releases(dcache_lock)
 171{
 172        struct dentry *parent;
 173
 174        list_del(&dentry->d_u.d_child);
 175        dentry_stat.nr_dentry--;        /* For d_free, below */
 176        /*drops the locks, at that point nobody can reach this dentry */
 177        dentry_iput(dentry);
 178        if (IS_ROOT(dentry))
 179                parent = NULL;
 180        else
 181                parent = dentry->d_parent;
 182        d_free(dentry);
 183        return parent;
 184}
 185
 186/* 
 187 * This is dput
 188 *
 189 * This is complicated by the fact that we do not want to put
 190 * dentries that are no longer on any hash chain on the unused
 191 * list: we'd much rather just get rid of them immediately.
 192 *
 193 * However, that implies that we have to traverse the dentry
 194 * tree upwards to the parents which might _also_ now be
 195 * scheduled for deletion (it may have been only waiting for
 196 * its last child to go away).
 197 *
 198 * This tail recursion is done by hand as we don't want to depend
 199 * on the compiler to always get this right (gcc generally doesn't).
 200 * Real recursion would eat up our stack space.
 201 */
 202
 203/*
 204 * dput - release a dentry
 205 * @dentry: dentry to release 
 206 *
 207 * Release a dentry. This will drop the usage count and if appropriate
 208 * call the dentry unlink method as well as removing it from the queues and
 209 * releasing its resources. If the parent dentries were scheduled for release
 210 * they too may now get deleted.
 211 *
 212 * no dcache lock, please.
 213 */
 214
 215void dput(struct dentry *dentry)
 216{
 217        if (!dentry)
 218                return;
 219
 220repeat:
 221        if (atomic_read(&dentry->d_count) == 1)
 222                might_sleep();
 223        if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
 224                return;
 225
 226        spin_lock(&dentry->d_lock);
 227        if (atomic_read(&dentry->d_count)) {
 228                spin_unlock(&dentry->d_lock);
 229                spin_unlock(&dcache_lock);
 230                return;
 231        }
 232
 233        /*
 234         * AV: ->d_delete() is _NOT_ allowed to block now.
 235         */
 236        if (dentry->d_op && dentry->d_op->d_delete) {
 237                if (dentry->d_op->d_delete(dentry))
 238                        goto unhash_it;
 239        }
 240        /* Unreachable? Get rid of it */
 241        if (d_unhashed(dentry))
 242                goto kill_it;
 243        if (list_empty(&dentry->d_lru)) {
 244                dentry->d_flags |= DCACHE_REFERENCED;
 245                dentry_lru_add(dentry);
 246        }
 247        spin_unlock(&dentry->d_lock);
 248        spin_unlock(&dcache_lock);
 249        return;
 250
 251unhash_it:
 252        __d_drop(dentry);
 253kill_it:
 254        /* if dentry was on the d_lru list delete it from there */
 255        dentry_lru_del(dentry);
 256        dentry = d_kill(dentry);
 257        if (dentry)
 258                goto repeat;
 259}
 260
 261/**
 262 * d_invalidate - invalidate a dentry
 263 * @dentry: dentry to invalidate
 264 *
 265 * Try to invalidate the dentry if it turns out to be
 266 * possible. If there are other dentries that can be
 267 * reached through this one we can't delete it and we
 268 * return -EBUSY. On success we return 0.
 269 *
 270 * no dcache lock.
 271 */
 272 
 273int d_invalidate(struct dentry * dentry)
 274{
 275        /*
 276         * If it's already been dropped, return OK.
 277         */
 278        spin_lock(&dcache_lock);
 279        if (d_unhashed(dentry)) {
 280                spin_unlock(&dcache_lock);
 281                return 0;
 282        }
 283        /*
 284         * Check whether to do a partial shrink_dcache
 285         * to get rid of unused child entries.
 286         */
 287        if (!list_empty(&dentry->d_subdirs)) {
 288                spin_unlock(&dcache_lock);
 289                shrink_dcache_parent(dentry);
 290                spin_lock(&dcache_lock);
 291        }
 292
 293        /*
 294         * Somebody else still using it?
 295         *
 296         * If it's a directory, we can't drop it
 297         * for fear of somebody re-populating it
 298         * with children (even though dropping it
 299         * would make it unreachable from the root,
 300         * we might still populate it if it was a
 301         * working directory or similar).
 302         */
 303        spin_lock(&dentry->d_lock);
 304        if (atomic_read(&dentry->d_count) > 1) {
 305                if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
 306                        spin_unlock(&dentry->d_lock);
 307                        spin_unlock(&dcache_lock);
 308                        return -EBUSY;
 309                }
 310        }
 311
 312        __d_drop(dentry);
 313        spin_unlock(&dentry->d_lock);
 314        spin_unlock(&dcache_lock);
 315        return 0;
 316}
 317
 318/* This should be called _only_ with dcache_lock held */
 319
 320static inline struct dentry * __dget_locked(struct dentry *dentry)
 321{
 322        atomic_inc(&dentry->d_count);
 323        dentry_lru_del_init(dentry);
 324        return dentry;
 325}
 326
 327struct dentry * dget_locked(struct dentry *dentry)
 328{
 329        return __dget_locked(dentry);
 330}
 331
 332/**
 333 * d_find_alias - grab a hashed alias of inode
 334 * @inode: inode in question
 335 * @want_discon:  flag, used by d_splice_alias, to request
 336 *          that only a DISCONNECTED alias be returned.
 337 *
 338 * If inode has a hashed alias, or is a directory and has any alias,
 339 * acquire the reference to alias and return it. Otherwise return NULL.
 340 * Notice that if inode is a directory there can be only one alias and
 341 * it can be unhashed only if it has no children, or if it is the root
 342 * of a filesystem.
 343 *
 344 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 345 * any other hashed alias over that one unless @want_discon is set,
 346 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
 347 */
 348
 349static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
 350{
 351        struct list_head *head, *next, *tmp;
 352        struct dentry *alias, *discon_alias=NULL;
 353
 354        head = &inode->i_dentry;
 355        next = inode->i_dentry.next;
 356        while (next != head) {
 357                tmp = next;
 358                next = tmp->next;
 359                prefetch(next);
 360                alias = list_entry(tmp, struct dentry, d_alias);
 361                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 362                        if (IS_ROOT(alias) &&
 363                            (alias->d_flags & DCACHE_DISCONNECTED))
 364                                discon_alias = alias;
 365                        else if (!want_discon) {
 366                                __dget_locked(alias);
 367                                return alias;
 368                        }
 369                }
 370        }
 371        if (discon_alias)
 372                __dget_locked(discon_alias);
 373        return discon_alias;
 374}
 375
 376struct dentry * d_find_alias(struct inode *inode)
 377{
 378        struct dentry *de = NULL;
 379
 380        if (!list_empty(&inode->i_dentry)) {
 381                spin_lock(&dcache_lock);
 382                de = __d_find_alias(inode, 0);
 383                spin_unlock(&dcache_lock);
 384        }
 385        return de;
 386}
 387
 388/*
 389 *      Try to kill dentries associated with this inode.
 390 * WARNING: you must own a reference to inode.
 391 */
 392void d_prune_aliases(struct inode *inode)
 393{
 394        struct dentry *dentry;
 395restart:
 396        spin_lock(&dcache_lock);
 397        list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
 398                spin_lock(&dentry->d_lock);
 399                if (!atomic_read(&dentry->d_count)) {
 400                        __dget_locked(dentry);
 401                        __d_drop(dentry);
 402                        spin_unlock(&dentry->d_lock);
 403                        spin_unlock(&dcache_lock);
 404                        dput(dentry);
 405                        goto restart;
 406                }
 407                spin_unlock(&dentry->d_lock);
 408        }
 409        spin_unlock(&dcache_lock);
 410}
 411
 412/*
 413 * Throw away a dentry - free the inode, dput the parent.  This requires that
 414 * the LRU list has already been removed.
 415 *
 416 * Try to prune ancestors as well.  This is necessary to prevent
 417 * quadratic behavior of shrink_dcache_parent(), but is also expected
 418 * to be beneficial in reducing dentry cache fragmentation.
 419 */
 420static void prune_one_dentry(struct dentry * dentry)
 421        __releases(dentry->d_lock)
 422        __releases(dcache_lock)
 423        __acquires(dcache_lock)
 424{
 425        __d_drop(dentry);
 426        dentry = d_kill(dentry);
 427
 428        /*
 429         * Prune ancestors.  Locking is simpler than in dput(),
 430         * because dcache_lock needs to be taken anyway.
 431         */
 432        spin_lock(&dcache_lock);
 433        while (dentry) {
 434                if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
 435                        return;
 436
 437                if (dentry->d_op && dentry->d_op->d_delete)
 438                        dentry->d_op->d_delete(dentry);
 439                dentry_lru_del_init(dentry);
 440                __d_drop(dentry);
 441                dentry = d_kill(dentry);
 442                spin_lock(&dcache_lock);
 443        }
 444}
 445
 446/*
 447 * Shrink the dentry LRU on a given superblock.
 448 * @sb   : superblock to shrink dentry LRU.
 449 * @count: If count is NULL, we prune all dentries on superblock.
 450 * @flags: If flags is non-zero, we need to do special processing based on
 451 * which flags are set. This means we don't need to maintain multiple
 452 * similar copies of this loop.
 453 */
 454static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
 455{
 456        LIST_HEAD(referenced);
 457        LIST_HEAD(tmp);
 458        struct dentry *dentry;
 459        int cnt = 0;
 460
 461        BUG_ON(!sb);
 462        BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
 463        spin_lock(&dcache_lock);
 464        if (count != NULL)
 465                /* called from prune_dcache() and shrink_dcache_parent() */
 466                cnt = *count;
 467restart:
 468        if (count == NULL)
 469                list_splice_init(&sb->s_dentry_lru, &tmp);
 470        else {
 471                while (!list_empty(&sb->s_dentry_lru)) {
 472                        dentry = list_entry(sb->s_dentry_lru.prev,
 473                                        struct dentry, d_lru);
 474                        BUG_ON(dentry->d_sb != sb);
 475
 476                        spin_lock(&dentry->d_lock);
 477                        /*
 478                         * If we are honouring the DCACHE_REFERENCED flag and
 479                         * the dentry has this flag set, don't free it. Clear
 480                         * the flag and put it back on the LRU.
 481                         */
 482                        if ((flags & DCACHE_REFERENCED)
 483                                && (dentry->d_flags & DCACHE_REFERENCED)) {
 484                                dentry->d_flags &= ~DCACHE_REFERENCED;
 485                                list_move_tail(&dentry->d_lru, &referenced);
 486                                spin_unlock(&dentry->d_lock);
 487                        } else {
 488                                list_move_tail(&dentry->d_lru, &tmp);
 489                                spin_unlock(&dentry->d_lock);
 490                                cnt--;
 491                                if (!cnt)
 492                                        break;
 493                        }
 494                        cond_resched_lock(&dcache_lock);
 495                }
 496        }
 497        while (!list_empty(&tmp)) {
 498                dentry = list_entry(tmp.prev, struct dentry, d_lru);
 499                dentry_lru_del_init(dentry);
 500                spin_lock(&dentry->d_lock);
 501                /*
 502                 * We found an inuse dentry which was not removed from
 503                 * the LRU because of laziness during lookup.  Do not free
 504                 * it - just keep it off the LRU list.
 505                 */
 506                if (atomic_read(&dentry->d_count)) {
 507                        spin_unlock(&dentry->d_lock);
 508                        continue;
 509                }
 510                prune_one_dentry(dentry);
 511                /* dentry->d_lock was dropped in prune_one_dentry() */
 512                cond_resched_lock(&dcache_lock);
 513        }
 514        if (count == NULL && !list_empty(&sb->s_dentry_lru))
 515                goto restart;
 516        if (count != NULL)
 517                *count = cnt;
 518        if (!list_empty(&referenced))
 519                list_splice(&referenced, &sb->s_dentry_lru);
 520        spin_unlock(&dcache_lock);
 521}
 522
 523/**
 524 * prune_dcache - shrink the dcache
 525 * @count: number of entries to try to free
 526 *
 527 * Shrink the dcache. This is done when we need more memory, or simply when we
 528 * need to unmount something (at which point we need to unuse all dentries).
 529 *
 530 * This function may fail to free any resources if all the dentries are in use.
 531 */
 532static void prune_dcache(int count)
 533{
 534        struct super_block *sb;
 535        int w_count;
 536        int unused = dentry_stat.nr_unused;
 537        int prune_ratio;
 538        int pruned;
 539
 540        if (unused == 0 || count == 0)
 541                return;
 542        spin_lock(&dcache_lock);
 543restart:
 544        if (count >= unused)
 545                prune_ratio = 1;
 546        else
 547                prune_ratio = unused / count;
 548        spin_lock(&sb_lock);
 549        list_for_each_entry(sb, &super_blocks, s_list) {
 550                if (sb->s_nr_dentry_unused == 0)
 551                        continue;
 552                sb->s_count++;
 553                /* Now, we reclaim unused dentrins with fairness.
 554                 * We reclaim them same percentage from each superblock.
 555                 * We calculate number of dentries to scan on this sb
 556                 * as follows, but the implementation is arranged to avoid
 557                 * overflows:
 558                 * number of dentries to scan on this sb =
 559                 * count * (number of dentries on this sb /
 560                 * number of dentries in the machine)
 561                 */
 562                spin_unlock(&sb_lock);
 563                if (prune_ratio != 1)
 564                        w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
 565                else
 566                        w_count = sb->s_nr_dentry_unused;
 567                pruned = w_count;
 568                /*
 569                 * We need to be sure this filesystem isn't being unmounted,
 570                 * otherwise we could race with generic_shutdown_super(), and
 571                 * end up holding a reference to an inode while the filesystem
 572                 * is unmounted.  So we try to get s_umount, and make sure
 573                 * s_root isn't NULL.
 574                 */
 575                if (down_read_trylock(&sb->s_umount)) {
 576                        if ((sb->s_root != NULL) &&
 577                            (!list_empty(&sb->s_dentry_lru))) {
 578                                spin_unlock(&dcache_lock);
 579                                __shrink_dcache_sb(sb, &w_count,
 580                                                DCACHE_REFERENCED);
 581                                pruned -= w_count;
 582                                spin_lock(&dcache_lock);
 583                        }
 584                        up_read(&sb->s_umount);
 585                }
 586                spin_lock(&sb_lock);
 587                count -= pruned;
 588                /*
 589                 * restart only when sb is no longer on the list and
 590                 * we have more work to do.
 591                 */
 592                if (__put_super_and_need_restart(sb) && count > 0) {
 593                        spin_unlock(&sb_lock);
 594                        goto restart;
 595                }
 596        }
 597        spin_unlock(&sb_lock);
 598        spin_unlock(&dcache_lock);
 599}
 600
 601/**
 602 * shrink_dcache_sb - shrink dcache for a superblock
 603 * @sb: superblock
 604 *
 605 * Shrink the dcache for the specified super block. This
 606 * is used to free the dcache before unmounting a file
 607 * system
 608 */
 609void shrink_dcache_sb(struct super_block * sb)
 610{
 611        __shrink_dcache_sb(sb, NULL, 0);
 612}
 613
 614/*
 615 * destroy a single subtree of dentries for unmount
 616 * - see the comments on shrink_dcache_for_umount() for a description of the
 617 *   locking
 618 */
 619static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
 620{
 621        struct dentry *parent;
 622        unsigned detached = 0;
 623
 624        BUG_ON(!IS_ROOT(dentry));
 625
 626        /* detach this root from the system */
 627        spin_lock(&dcache_lock);
 628        dentry_lru_del_init(dentry);
 629        __d_drop(dentry);
 630        spin_unlock(&dcache_lock);
 631
 632        for (;;) {
 633                /* descend to the first leaf in the current subtree */
 634                while (!list_empty(&dentry->d_subdirs)) {
 635                        struct dentry *loop;
 636
 637                        /* this is a branch with children - detach all of them
 638                         * from the system in one go */
 639                        spin_lock(&dcache_lock);
 640                        list_for_each_entry(loop, &dentry->d_subdirs,
 641                                            d_u.d_child) {
 642                                dentry_lru_del_init(loop);
 643                                __d_drop(loop);
 644                                cond_resched_lock(&dcache_lock);
 645                        }
 646                        spin_unlock(&dcache_lock);
 647
 648                        /* move to the first child */
 649                        dentry = list_entry(dentry->d_subdirs.next,
 650                                            struct dentry, d_u.d_child);
 651                }
 652
 653                /* consume the dentries from this leaf up through its parents
 654                 * until we find one with children or run out altogether */
 655                do {
 656                        struct inode *inode;
 657
 658                        if (atomic_read(&dentry->d_count) != 0) {
 659                                printk(KERN_ERR
 660                                       "BUG: Dentry %p{i=%lx,n=%s}"
 661                                       " still in use (%d)"
 662                                       " [unmount of %s %s]\n",
 663                                       dentry,
 664                                       dentry->d_inode ?
 665                                       dentry->d_inode->i_ino : 0UL,
 666                                       dentry->d_name.name,
 667                                       atomic_read(&dentry->d_count),
 668                                       dentry->d_sb->s_type->name,
 669                                       dentry->d_sb->s_id);
 670                                BUG();
 671                        }
 672
 673                        if (IS_ROOT(dentry))
 674                                parent = NULL;
 675                        else {
 676                                parent = dentry->d_parent;
 677                                atomic_dec(&parent->d_count);
 678                        }
 679
 680                        list_del(&dentry->d_u.d_child);
 681                        detached++;
 682
 683                        inode = dentry->d_inode;
 684                        if (inode) {
 685                                dentry->d_inode = NULL;
 686                                list_del_init(&dentry->d_alias);
 687                                if (dentry->d_op && dentry->d_op->d_iput)
 688                                        dentry->d_op->d_iput(dentry, inode);
 689                                else
 690                                        iput(inode);
 691                        }
 692
 693                        d_free(dentry);
 694
 695                        /* finished when we fall off the top of the tree,
 696                         * otherwise we ascend to the parent and move to the
 697                         * next sibling if there is one */
 698                        if (!parent)
 699                                goto out;
 700
 701                        dentry = parent;
 702
 703                } while (list_empty(&dentry->d_subdirs));
 704
 705                dentry = list_entry(dentry->d_subdirs.next,
 706                                    struct dentry, d_u.d_child);
 707        }
 708out:
 709        /* several dentries were freed, need to correct nr_dentry */
 710        spin_lock(&dcache_lock);
 711        dentry_stat.nr_dentry -= detached;
 712        spin_unlock(&dcache_lock);
 713}
 714
 715/*
 716 * destroy the dentries attached to a superblock on unmounting
 717 * - we don't need to use dentry->d_lock, and only need dcache_lock when
 718 *   removing the dentry from the system lists and hashes because:
 719 *   - the superblock is detached from all mountings and open files, so the
 720 *     dentry trees will not be rearranged by the VFS
 721 *   - s_umount is write-locked, so the memory pressure shrinker will ignore
 722 *     any dentries belonging to this superblock that it comes across
 723 *   - the filesystem itself is no longer permitted to rearrange the dentries
 724 *     in this superblock
 725 */
 726void shrink_dcache_for_umount(struct super_block *sb)
 727{
 728        struct dentry *dentry;
 729
 730        if (down_read_trylock(&sb->s_umount))
 731                BUG();
 732
 733        dentry = sb->s_root;
 734        sb->s_root = NULL;
 735        atomic_dec(&dentry->d_count);
 736        shrink_dcache_for_umount_subtree(dentry);
 737
 738        while (!hlist_empty(&sb->s_anon)) {
 739                dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
 740                shrink_dcache_for_umount_subtree(dentry);
 741        }
 742}
 743
 744/*
 745 * Search for at least 1 mount point in the dentry's subdirs.
 746 * We descend to the next level whenever the d_subdirs
 747 * list is non-empty and continue searching.
 748 */
 749 
 750/**
 751 * have_submounts - check for mounts over a dentry
 752 * @parent: dentry to check.
 753 *
 754 * Return true if the parent or its subdirectories contain
 755 * a mount point
 756 */
 757 
 758int have_submounts(struct dentry *parent)
 759{
 760        struct dentry *this_parent = parent;
 761        struct list_head *next;
 762
 763        spin_lock(&dcache_lock);
 764        if (d_mountpoint(parent))
 765                goto positive;
 766repeat:
 767        next = this_parent->d_subdirs.next;
 768resume:
 769        while (next != &this_parent->d_subdirs) {
 770                struct list_head *tmp = next;
 771                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 772                next = tmp->next;
 773                /* Have we found a mount point ? */
 774                if (d_mountpoint(dentry))
 775                        goto positive;
 776                if (!list_empty(&dentry->d_subdirs)) {
 777                        this_parent = dentry;
 778                        goto repeat;
 779                }
 780        }
 781        /*
 782         * All done at this level ... ascend and resume the search.
 783         */
 784        if (this_parent != parent) {
 785                next = this_parent->d_u.d_child.next;
 786                this_parent = this_parent->d_parent;
 787                goto resume;
 788        }
 789        spin_unlock(&dcache_lock);
 790        return 0; /* No mount points found in tree */
 791positive:
 792        spin_unlock(&dcache_lock);
 793        return 1;
 794}
 795
 796/*
 797 * Search the dentry child list for the specified parent,
 798 * and move any unused dentries to the end of the unused
 799 * list for prune_dcache(). We descend to the next level
 800 * whenever the d_subdirs list is non-empty and continue
 801 * searching.
 802 *
 803 * It returns zero iff there are no unused children,
 804 * otherwise  it returns the number of children moved to
 805 * the end of the unused list. This may not be the total
 806 * number of unused children, because select_parent can
 807 * drop the lock and return early due to latency
 808 * constraints.
 809 */
 810static int select_parent(struct dentry * parent)
 811{
 812        struct dentry *this_parent = parent;
 813        struct list_head *next;
 814        int found = 0;
 815
 816        spin_lock(&dcache_lock);
 817repeat:
 818        next = this_parent->d_subdirs.next;
 819resume:
 820        while (next != &this_parent->d_subdirs) {
 821                struct list_head *tmp = next;
 822                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 823                next = tmp->next;
 824
 825                dentry_lru_del_init(dentry);
 826                /* 
 827                 * move only zero ref count dentries to the end 
 828                 * of the unused list for prune_dcache
 829                 */
 830                if (!atomic_read(&dentry->d_count)) {
 831                        dentry_lru_add_tail(dentry);
 832                        found++;
 833                }
 834
 835                /*
 836                 * We can return to the caller if we have found some (this
 837                 * ensures forward progress). We'll be coming back to find
 838                 * the rest.
 839                 */
 840                if (found && need_resched())
 841                        goto out;
 842
 843                /*
 844                 * Descend a level if the d_subdirs list is non-empty.
 845                 */
 846                if (!list_empty(&dentry->d_subdirs)) {
 847                        this_parent = dentry;
 848                        goto repeat;
 849                }
 850        }
 851        /*
 852         * All done at this level ... ascend and resume the search.
 853         */
 854        if (this_parent != parent) {
 855                next = this_parent->d_u.d_child.next;
 856                this_parent = this_parent->d_parent;
 857                goto resume;
 858        }
 859out:
 860        spin_unlock(&dcache_lock);
 861        return found;
 862}
 863
 864/**
 865 * shrink_dcache_parent - prune dcache
 866 * @parent: parent of entries to prune
 867 *
 868 * Prune the dcache to remove unused children of the parent dentry.
 869 */
 870 
 871void shrink_dcache_parent(struct dentry * parent)
 872{
 873        struct super_block *sb = parent->d_sb;
 874        int found;
 875
 876        while ((found = select_parent(parent)) != 0)
 877                __shrink_dcache_sb(sb, &found, 0);
 878}
 879
 880/*
 881 * Scan `nr' dentries and return the number which remain.
 882 *
 883 * We need to avoid reentering the filesystem if the caller is performing a
 884 * GFP_NOFS allocation attempt.  One example deadlock is:
 885 *
 886 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
 887 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
 888 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
 889 *
 890 * In this case we return -1 to tell the caller that we baled.
 891 */
 892static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
 893{
 894        if (nr) {
 895                if (!(gfp_mask & __GFP_FS))
 896                        return -1;
 897                prune_dcache(nr);
 898        }
 899        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 900}
 901
 902static struct shrinker dcache_shrinker = {
 903        .shrink = shrink_dcache_memory,
 904        .seeks = DEFAULT_SEEKS,
 905};
 906
 907/**
 908 * d_alloc      -       allocate a dcache entry
 909 * @parent: parent of entry to allocate
 910 * @name: qstr of the name
 911 *
 912 * Allocates a dentry. It returns %NULL if there is insufficient memory
 913 * available. On a success the dentry is returned. The name passed in is
 914 * copied and the copy passed in may be reused after this call.
 915 */
 916 
 917struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 918{
 919        struct dentry *dentry;
 920        char *dname;
 921
 922        dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 923        if (!dentry)
 924                return NULL;
 925
 926        if (name->len > DNAME_INLINE_LEN-1) {
 927                dname = kmalloc(name->len + 1, GFP_KERNEL);
 928                if (!dname) {
 929                        kmem_cache_free(dentry_cache, dentry); 
 930                        return NULL;
 931                }
 932        } else  {
 933                dname = dentry->d_iname;
 934        }       
 935        dentry->d_name.name = dname;
 936
 937        dentry->d_name.len = name->len;
 938        dentry->d_name.hash = name->hash;
 939        memcpy(dname, name->name, name->len);
 940        dname[name->len] = 0;
 941
 942        atomic_set(&dentry->d_count, 1);
 943        dentry->d_flags = DCACHE_UNHASHED;
 944        spin_lock_init(&dentry->d_lock);
 945        dentry->d_inode = NULL;
 946        dentry->d_parent = NULL;
 947        dentry->d_sb = NULL;
 948        dentry->d_op = NULL;
 949        dentry->d_fsdata = NULL;
 950        dentry->d_mounted = 0;
 951#ifdef CONFIG_PROFILING
 952        dentry->d_cookie = NULL;
 953#endif
 954        INIT_HLIST_NODE(&dentry->d_hash);
 955        INIT_LIST_HEAD(&dentry->d_lru);
 956        INIT_LIST_HEAD(&dentry->d_subdirs);
 957        INIT_LIST_HEAD(&dentry->d_alias);
 958
 959        if (parent) {
 960                dentry->d_parent = dget(parent);
 961                dentry->d_sb = parent->d_sb;
 962        } else {
 963                INIT_LIST_HEAD(&dentry->d_u.d_child);
 964        }
 965
 966        spin_lock(&dcache_lock);
 967        if (parent)
 968                list_add(&dentry->d_u.d_child, &parent->d_subdirs);
 969        dentry_stat.nr_dentry++;
 970        spin_unlock(&dcache_lock);
 971
 972        return dentry;
 973}
 974
 975struct dentry *d_alloc_name(struct dentry *parent, const char *name)
 976{
 977        struct qstr q;
 978
 979        q.name = name;
 980        q.len = strlen(name);
 981        q.hash = full_name_hash(q.name, q.len);
 982        return d_alloc(parent, &q);
 983}
 984
 985/* the caller must hold dcache_lock */
 986static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 987{
 988        if (inode)
 989                list_add(&dentry->d_alias, &inode->i_dentry);
 990        dentry->d_inode = inode;
 991        fsnotify_d_instantiate(dentry, inode);
 992}
 993
 994/**
 995 * d_instantiate - fill in inode information for a dentry
 996 * @entry: dentry to complete
 997 * @inode: inode to attach to this dentry
 998 *
 999 * Fill in inode information in the entry.
1000 *
1001 * This turns negative dentries into productive full members
1002 * of society.
1003 *
1004 * NOTE! This assumes that the inode count has been incremented
1005 * (or otherwise set) by the caller to indicate that it is now
1006 * in use by the dcache.
1007 */
1008 
1009void d_instantiate(struct dentry *entry, struct inode * inode)
1010{
1011        BUG_ON(!list_empty(&entry->d_alias));
1012        spin_lock(&dcache_lock);
1013        __d_instantiate(entry, inode);
1014        spin_unlock(&dcache_lock);
1015        security_d_instantiate(entry, inode);
1016}
1017
1018/**
1019 * d_instantiate_unique - instantiate a non-aliased dentry
1020 * @entry: dentry to instantiate
1021 * @inode: inode to attach to this dentry
1022 *
1023 * Fill in inode information in the entry. On success, it returns NULL.
1024 * If an unhashed alias of "entry" already exists, then we return the
1025 * aliased dentry instead and drop one reference to inode.
1026 *
1027 * Note that in order to avoid conflicts with rename() etc, the caller
1028 * had better be holding the parent directory semaphore.
1029 *
1030 * This also assumes that the inode count has been incremented
1031 * (or otherwise set) by the caller to indicate that it is now
1032 * in use by the dcache.
1033 */
1034static struct dentry *__d_instantiate_unique(struct dentry *entry,
1035                                             struct inode *inode)
1036{
1037        struct dentry *alias;
1038        int len = entry->d_name.len;
1039        const char *name = entry->d_name.name;
1040        unsigned int hash = entry->d_name.hash;
1041
1042        if (!inode) {
1043                __d_instantiate(entry, NULL);
1044                return NULL;
1045        }
1046
1047        list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1048                struct qstr *qstr = &alias->d_name;
1049
1050                if (qstr->hash != hash)
1051                        continue;
1052                if (alias->d_parent != entry->d_parent)
1053                        continue;
1054                if (qstr->len != len)
1055                        continue;
1056                if (memcmp(qstr->name, name, len))
1057                        continue;
1058                dget_locked(alias);
1059                return alias;
1060        }
1061
1062        __d_instantiate(entry, inode);
1063        return NULL;
1064}
1065
1066struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1067{
1068        struct dentry *result;
1069
1070        BUG_ON(!list_empty(&entry->d_alias));
1071
1072        spin_lock(&dcache_lock);
1073        result = __d_instantiate_unique(entry, inode);
1074        spin_unlock(&dcache_lock);
1075
1076        if (!result) {
1077                security_d_instantiate(entry, inode);
1078                return NULL;
1079        }
1080
1081        BUG_ON(!d_unhashed(result));
1082        iput(inode);
1083        return result;
1084}
1085
1086EXPORT_SYMBOL(d_instantiate_unique);
1087
1088/**
1089 * d_alloc_root - allocate root dentry
1090 * @root_inode: inode to allocate the root for
1091 *
1092 * Allocate a root ("/") dentry for the inode given. The inode is
1093 * instantiated and returned. %NULL is returned if there is insufficient
1094 * memory or the inode passed is %NULL.
1095 */
1096 
1097struct dentry * d_alloc_root(struct inode * root_inode)
1098{
1099        struct dentry *res = NULL;
1100
1101        if (root_inode) {
1102                static const struct qstr name = { .name = "/", .len = 1 };
1103
1104                res = d_alloc(NULL, &name);
1105                if (res) {
1106                        res->d_sb = root_inode->i_sb;
1107                        res->d_parent = res;
1108                        d_instantiate(res, root_inode);
1109                }
1110        }
1111        return res;
1112}
1113
1114static inline struct hlist_head *d_hash(struct dentry *parent,
1115                                        unsigned long hash)
1116{
1117        hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1118        hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1119        return dentry_hashtable + (hash & D_HASHMASK);
1120}
1121
1122/**
1123 * d_obtain_alias - find or allocate a dentry for a given inode
1124 * @inode: inode to allocate the dentry for
1125 *
1126 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1127 * similar open by handle operations.  The returned dentry may be anonymous,
1128 * or may have a full name (if the inode was already in the cache).
1129 *
1130 * When called on a directory inode, we must ensure that the inode only ever
1131 * has one dentry.  If a dentry is found, that is returned instead of
1132 * allocating a new one.
1133 *
1134 * On successful return, the reference to the inode has been transferred
1135 * to the dentry.  In case of an error the reference on the inode is released.
1136 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1137 * be passed in and will be the error will be propagate to the return value,
1138 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1139 */
1140struct dentry *d_obtain_alias(struct inode *inode)
1141{
1142        static const struct qstr anonstring = { .name = "" };
1143        struct dentry *tmp;
1144        struct dentry *res;
1145
1146        if (!inode)
1147                return ERR_PTR(-ESTALE);
1148        if (IS_ERR(inode))
1149                return ERR_CAST(inode);
1150
1151        res = d_find_alias(inode);
1152        if (res)
1153                goto out_iput;
1154
1155        tmp = d_alloc(NULL, &anonstring);
1156        if (!tmp) {
1157                res = ERR_PTR(-ENOMEM);
1158                goto out_iput;
1159        }
1160        tmp->d_parent = tmp; /* make sure dput doesn't croak */
1161
1162        spin_lock(&dcache_lock);
1163        res = __d_find_alias(inode, 0);
1164        if (res) {
1165                spin_unlock(&dcache_lock);
1166                dput(tmp);
1167                goto out_iput;
1168        }
1169
1170        /* attach a disconnected dentry */
1171        spin_lock(&tmp->d_lock);
1172        tmp->d_sb = inode->i_sb;
1173        tmp->d_inode = inode;
1174        tmp->d_flags |= DCACHE_DISCONNECTED;
1175        tmp->d_flags &= ~DCACHE_UNHASHED;
1176        list_add(&tmp->d_alias, &inode->i_dentry);
1177        hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
1178        spin_unlock(&tmp->d_lock);
1179
1180        spin_unlock(&dcache_lock);
1181        return tmp;
1182
1183 out_iput:
1184        iput(inode);
1185        return res;
1186}
1187EXPORT_SYMBOL_GPL(d_obtain_alias);
1188
1189/**
1190 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1191 * @inode:  the inode which may have a disconnected dentry
1192 * @dentry: a negative dentry which we want to point to the inode.
1193 *
1194 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1195 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1196 * and return it, else simply d_add the inode to the dentry and return NULL.
1197 *
1198 * This is needed in the lookup routine of any filesystem that is exportable
1199 * (via knfsd) so that we can build dcache paths to directories effectively.
1200 *
1201 * If a dentry was found and moved, then it is returned.  Otherwise NULL
1202 * is returned.  This matches the expected return value of ->lookup.
1203 *
1204 */
1205struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1206{
1207        struct dentry *new = NULL;
1208
1209        if (inode && S_ISDIR(inode->i_mode)) {
1210                spin_lock(&dcache_lock);
1211                new = __d_find_alias(inode, 1);
1212                if (new) {
1213                        BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1214                        spin_unlock(&dcache_lock);
1215                        security_d_instantiate(new, inode);
1216                        d_rehash(dentry);
1217                        d_move(new, dentry);
1218                        iput(inode);
1219                } else {
1220                        /* already taking dcache_lock, so d_add() by hand */
1221                        __d_instantiate(dentry, inode);
1222                        spin_unlock(&dcache_lock);
1223                        security_d_instantiate(dentry, inode);
1224                        d_rehash(dentry);
1225                }
1226        } else
1227                d_add(dentry, inode);
1228        return new;
1229}
1230
1231/**
1232 * d_add_ci - lookup or allocate new dentry with case-exact name
1233 * @inode:  the inode case-insensitive lookup has found
1234 * @dentry: the negative dentry that was passed to the parent's lookup func
1235 * @name:   the case-exact name to be associated with the returned dentry
1236 *
1237 * This is to avoid filling the dcache with case-insensitive names to the
1238 * same inode, only the actual correct case is stored in the dcache for
1239 * case-insensitive filesystems.
1240 *
1241 * For a case-insensitive lookup match and if the the case-exact dentry
1242 * already exists in in the dcache, use it and return it.
1243 *
1244 * If no entry exists with the exact case name, allocate new dentry with
1245 * the exact case, and return the spliced entry.
1246 */
1247struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1248                        struct qstr *name)
1249{
1250        int error;
1251        struct dentry *found;
1252        struct dentry *new;
1253
1254        /* Does a dentry matching the name exist already? */
1255        found = d_hash_and_lookup(dentry->d_parent, name);
1256        /* If not, create it now and return */
1257        if (!found) {
1258                new = d_alloc(dentry->d_parent, name);
1259                if (!new) {
1260                        error = -ENOMEM;
1261                        goto err_out;
1262                }
1263                found = d_splice_alias(inode, new);
1264                if (found) {
1265                        dput(new);
1266                        return found;
1267                }
1268                return new;
1269        }
1270        /* Matching dentry exists, check if it is negative. */
1271        if (found->d_inode) {
1272                if (unlikely(found->d_inode != inode)) {
1273                        /* This can't happen because bad inodes are unhashed. */
1274                        BUG_ON(!is_bad_inode(inode));
1275                        BUG_ON(!is_bad_inode(found->d_inode));
1276                }
1277                /*
1278                 * Already have the inode and the dentry attached, decrement
1279                 * the reference count to balance the iget() done
1280                 * earlier on.  We found the dentry using d_lookup() so it
1281                 * cannot be disconnected and thus we do not need to worry
1282                 * about any NFS/disconnectedness issues here.
1283                 */
1284                iput(inode);
1285                return found;
1286        }
1287        /*
1288         * Negative dentry: instantiate it unless the inode is a directory and
1289         * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
1290         * in which case d_move() that in place of the found dentry.
1291         */
1292        if (!S_ISDIR(inode->i_mode)) {
1293                /* Not a directory; everything is easy. */
1294                d_instantiate(found, inode);
1295                return found;
1296        }
1297        spin_lock(&dcache_lock);
1298        if (list_empty(&inode->i_dentry)) {
1299                /*
1300                 * Directory without a 'disconnected' dentry; we need to do
1301                 * d_instantiate() by hand because it takes dcache_lock which
1302                 * we already hold.
1303                 */
1304                __d_instantiate(found, inode);
1305                spin_unlock(&dcache_lock);
1306                security_d_instantiate(found, inode);
1307                return found;
1308        }
1309        /*
1310         * Directory with a 'disconnected' dentry; get a reference to the
1311         * 'disconnected' dentry.
1312         */
1313        new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1314        dget_locked(new);
1315        spin_unlock(&dcache_lock);
1316        /* Do security vodoo. */
1317        security_d_instantiate(found, inode);
1318        /* Move new in place of found. */
1319        d_move(new, found);
1320        /* Balance the iget() we did above. */
1321        iput(inode);
1322        /* Throw away found. */
1323        dput(found);
1324        /* Use new as the actual dentry. */
1325        return new;
1326
1327err_out:
1328        iput(inode);
1329        return ERR_PTR(error);
1330}
1331
1332/**
1333 * d_lookup - search for a dentry
1334 * @parent: parent dentry
1335 * @name: qstr of name we wish to find
1336 *
1337 * Searches the children of the parent dentry for the name in question. If
1338 * the dentry is found its reference count is incremented and the dentry
1339 * is returned. The caller must use d_put to free the entry when it has
1340 * finished using it. %NULL is returned on failure.
1341 *
1342 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
1343 * Memory barriers are used while updating and doing lockless traversal. 
1344 * To avoid races with d_move while rename is happening, d_lock is used.
1345 *
1346 * Overflows in memcmp(), while d_move, are avoided by keeping the length
1347 * and name pointer in one structure pointed by d_qstr.
1348 *
1349 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
1350 * lookup is going on.
1351 *
1352 * The dentry unused LRU is not updated even if lookup finds the required dentry
1353 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
1354 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
1355 * acquisition.
1356 *
1357 * d_lookup() is protected against the concurrent renames in some unrelated
1358 * directory using the seqlockt_t rename_lock.
1359 */
1360
1361struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1362{
1363        struct dentry * dentry = NULL;
1364        unsigned long seq;
1365
1366        do {
1367                seq = read_seqbegin(&rename_lock);
1368                dentry = __d_lookup(parent, name);
1369                if (dentry)
1370                        break;
1371        } while (read_seqretry(&rename_lock, seq));
1372        return dentry;
1373}
1374
1375struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1376{
1377        unsigned int len = name->len;
1378        unsigned int hash = name->hash;
1379        const unsigned char *str = name->name;
1380        struct hlist_head *head = d_hash(parent,hash);
1381        struct dentry *found = NULL;
1382        struct hlist_node *node;
1383        struct dentry *dentry;
1384
1385        rcu_read_lock();
1386        
1387        hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
1388                struct qstr *qstr;
1389
1390                if (dentry->d_name.hash != hash)
1391                        continue;
1392                if (dentry->d_parent != parent)
1393                        continue;
1394
1395                spin_lock(&dentry->d_lock);
1396
1397                /*
1398                 * Recheck the dentry after taking the lock - d_move may have
1399                 * changed things.  Don't bother checking the hash because we're
1400                 * about to compare the whole name anyway.
1401                 */
1402                if (dentry->d_parent != parent)
1403                        goto next;
1404
1405                /* non-existing due to RCU? */
1406                if (d_unhashed(dentry))
1407                        goto next;
1408
1409                /*
1410                 * It is safe to compare names since d_move() cannot
1411                 * change the qstr (protected by d_lock).
1412                 */
1413                qstr = &dentry->d_name;
1414                if (parent->d_op && parent->d_op->d_compare) {
1415                        if (parent->d_op->d_compare(parent, qstr, name))
1416                                goto next;
1417                } else {
1418                        if (qstr->len != len)
1419                                goto next;
1420                        if (memcmp(qstr->name, str, len))
1421                                goto next;
1422                }
1423
1424                atomic_inc(&dentry->d_count);
1425                found = dentry;
1426                spin_unlock(&dentry->d_lock);
1427                break;
1428next:
1429                spin_unlock(&dentry->d_lock);
1430        }
1431        rcu_read_unlock();
1432
1433        return found;
1434}
1435
1436/**
1437 * d_hash_and_lookup - hash the qstr then search for a dentry
1438 * @dir: Directory to search in
1439 * @name: qstr of name we wish to find
1440 *
1441 * On hash failure or on lookup failure NULL is returned.
1442 */
1443struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1444{
1445        struct dentry *dentry = NULL;
1446
1447        /*
1448         * Check for a fs-specific hash function. Note that we must
1449         * calculate the standard hash first, as the d_op->d_hash()
1450         * routine may choose to leave the hash value unchanged.
1451         */
1452        name->hash = full_name_hash(name->name, name->len);
1453        if (dir->d_op && dir->d_op->d_hash) {
1454                if (dir->d_op->d_hash(dir, name) < 0)
1455                        goto out;
1456        }
1457        dentry = d_lookup(dir, name);
1458out:
1459        return dentry;
1460}
1461
1462/**
1463 * d_validate - verify dentry provided from insecure source
1464 * @dentry: The dentry alleged to be valid child of @dparent
1465 * @dparent: The parent dentry (known to be valid)
1466 *
1467 * An insecure source has sent us a dentry, here we verify it and dget() it.
1468 * This is used by ncpfs in its readdir implementation.
1469 * Zero is returned in the dentry is invalid.
1470 */
1471 
1472int d_validate(struct dentry *dentry, struct dentry *dparent)
1473{
1474        struct hlist_head *base;
1475        struct hlist_node *lhp;
1476
1477        /* Check whether the ptr might be valid at all.. */
1478        if (!kmem_ptr_validate(dentry_cache, dentry))
1479                goto out;
1480
1481        if (dentry->d_parent != dparent)
1482                goto out;
1483
1484        spin_lock(&dcache_lock);
1485        base = d_hash(dparent, dentry->d_name.hash);
1486        hlist_for_each(lhp,base) { 
1487                /* hlist_for_each_entry_rcu() not required for d_hash list
1488                 * as it is parsed under dcache_lock
1489                 */
1490                if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1491                        __dget_locked(dentry);
1492                        spin_unlock(&dcache_lock);
1493                        return 1;
1494                }
1495        }
1496        spin_unlock(&dcache_lock);
1497out:
1498        return 0;
1499}
1500
1501/*
1502 * When a file is deleted, we have two options:
1503 * - turn this dentry into a negative dentry
1504 * - unhash this dentry and free it.
1505 *
1506 * Usually, we want to just turn this into
1507 * a negative dentry, but if anybody else is
1508 * currently using the dentry or the inode
1509 * we can't do that and we fall back on removing
1510 * it from the hash queues and waiting for
1511 * it to be deleted later when it has no users
1512 */
1513 
1514/**
1515 * d_delete - delete a dentry
1516 * @dentry: The dentry to delete
1517 *
1518 * Turn the dentry into a negative dentry if possible, otherwise
1519 * remove it from the hash queues so it can be deleted later
1520 */
1521 
1522void d_delete(struct dentry * dentry)
1523{
1524        int isdir = 0;
1525        /*
1526         * Are we the only user?
1527         */
1528        spin_lock(&dcache_lock);
1529        spin_lock(&dentry->d_lock);
1530        isdir = S_ISDIR(dentry->d_inode->i_mode);
1531        if (atomic_read(&dentry->d_count) == 1) {
1532                dentry_iput(dentry);
1533                fsnotify_nameremove(dentry, isdir);
1534                return;
1535        }
1536
1537        if (!d_unhashed(dentry))
1538                __d_drop(dentry);
1539
1540        spin_unlock(&dentry->d_lock);
1541        spin_unlock(&dcache_lock);
1542
1543        fsnotify_nameremove(dentry, isdir);
1544}
1545
1546static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1547{
1548
1549        entry->d_flags &= ~DCACHE_UNHASHED;
1550        hlist_add_head_rcu(&entry->d_hash, list);
1551}
1552
1553static void _d_rehash(struct dentry * entry)
1554{
1555        __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1556}
1557
1558/**
1559 * d_rehash     - add an entry back to the hash
1560 * @entry: dentry to add to the hash
1561 *
1562 * Adds a dentry to the hash according to its name.
1563 */
1564 
1565void d_rehash(struct dentry * entry)
1566{
1567        spin_lock(&dcache_lock);
1568        spin_lock(&entry->d_lock);
1569        _d_rehash(entry);
1570        spin_unlock(&entry->d_lock);
1571        spin_unlock(&dcache_lock);
1572}
1573
1574#define do_switch(x,y) do { \
1575        __typeof__ (x) __tmp = x; \
1576        x = y; y = __tmp; } while (0)
1577
1578/*
1579 * When switching names, the actual string doesn't strictly have to
1580 * be preserved in the target - because we're dropping the target
1581 * anyway. As such, we can just do a simple memcpy() to copy over
1582 * the new name before we switch.
1583 *
1584 * Note that we have to be a lot more careful about getting the hash
1585 * switched - we have to switch the hash value properly even if it
1586 * then no longer matches the actual (corrupted) string of the target.
1587 * The hash value has to match the hash queue that the dentry is on..
1588 */
1589static void switch_names(struct dentry *dentry, struct dentry *target)
1590{
1591        if (dname_external(target)) {
1592                if (dname_external(dentry)) {
1593                        /*
1594                         * Both external: swap the pointers
1595                         */
1596                        do_switch(target->d_name.name, dentry->d_name.name);
1597                } else {
1598                        /*
1599                         * dentry:internal, target:external.  Steal target's
1600                         * storage and make target internal.
1601                         */
1602                        memcpy(target->d_iname, dentry->d_name.name,
1603                                        dentry->d_name.len + 1);
1604                        dentry->d_name.name = target->d_name.name;
1605                        target->d_name.name = target->d_iname;
1606                }
1607        } else {
1608                if (dname_external(dentry)) {
1609                        /*
1610                         * dentry:external, target:internal.  Give dentry's
1611                         * storage to target and make dentry internal
1612                         */
1613                        memcpy(dentry->d_iname, target->d_name.name,
1614                                        target->d_name.len + 1);
1615                        target->d_name.name = dentry->d_name.name;
1616                        dentry->d_name.name = dentry->d_iname;
1617                } else {
1618                        /*
1619                         * Both are internal.  Just copy target to dentry
1620                         */
1621                        memcpy(dentry->d_iname, target->d_name.name,
1622                                        target->d_name.len + 1);
1623                        dentry->d_name.len = target->d_name.len;
1624                        return;
1625                }
1626        }
1627        do_switch(dentry->d_name.len, target->d_name.len);
1628}
1629
1630/*
1631 * We cannibalize "target" when moving dentry on top of it,
1632 * because it's going to be thrown away anyway. We could be more
1633 * polite about it, though.
1634 *
1635 * This forceful removal will result in ugly /proc output if
1636 * somebody holds a file open that got deleted due to a rename.
1637 * We could be nicer about the deleted file, and let it show
1638 * up under the name it had before it was deleted rather than
1639 * under the original name of the file that was moved on top of it.
1640 */
1641 
1642/*
1643 * d_move_locked - move a dentry
1644 * @dentry: entry to move
1645 * @target: new dentry
1646 *
1647 * Update the dcache to reflect the move of a file name. Negative
1648 * dcache entries should not be moved in this way.
1649 */
1650static void d_move_locked(struct dentry * dentry, struct dentry * target)
1651{
1652        struct hlist_head *list;
1653
1654        if (!dentry->d_inode)
1655                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1656
1657        write_seqlock(&rename_lock);
1658        /*
1659         * XXXX: do we really need to take target->d_lock?
1660         */
1661        if (target < dentry) {
1662                spin_lock(&target->d_lock);
1663                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1664        } else {
1665                spin_lock(&dentry->d_lock);
1666                spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1667        }
1668
1669        /* Move the dentry to the target hash queue, if on different bucket */
1670        if (d_unhashed(dentry))
1671                goto already_unhashed;
1672
1673        hlist_del_rcu(&dentry->d_hash);
1674
1675already_unhashed:
1676        list = d_hash(target->d_parent, target->d_name.hash);
1677        __d_rehash(dentry, list);
1678
1679        /* Unhash the target: dput() will then get rid of it */
1680        __d_drop(target);
1681
1682        list_del(&dentry->d_u.d_child);
1683        list_del(&target->d_u.d_child);
1684
1685        /* Switch the names.. */
1686        switch_names(dentry, target);
1687        do_switch(dentry->d_name.hash, target->d_name.hash);
1688
1689        /* ... and switch the parents */
1690        if (IS_ROOT(dentry)) {
1691                dentry->d_parent = target->d_parent;
1692                target->d_parent = target;
1693                INIT_LIST_HEAD(&target->d_u.d_child);
1694        } else {
1695                do_switch(dentry->d_parent, target->d_parent);
1696
1697                /* And add them back to the (new) parent lists */
1698                list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1699        }
1700
1701        list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1702        spin_unlock(&target->d_lock);
1703        fsnotify_d_move(dentry);
1704        spin_unlock(&dentry->d_lock);
1705        write_sequnlock(&rename_lock);
1706}
1707
1708/**
1709 * d_move - move a dentry
1710 * @dentry: entry to move
1711 * @target: new dentry
1712 *
1713 * Update the dcache to reflect the move of a file name. Negative
1714 * dcache entries should not be moved in this way.
1715 */
1716
1717void d_move(struct dentry * dentry, struct dentry * target)
1718{
1719        spin_lock(&dcache_lock);
1720        d_move_locked(dentry, target);
1721        spin_unlock(&dcache_lock);
1722}
1723
1724/**
1725 * d_ancestor - search for an ancestor
1726 * @p1: ancestor dentry
1727 * @p2: child dentry
1728 *
1729 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
1730 * an ancestor of p2, else NULL.
1731 */
1732struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
1733{
1734        struct dentry *p;
1735
1736        for (p = p2; !IS_ROOT(p); p = p->d_parent) {
1737                if (p->d_parent == p1)
1738                        return p;
1739        }
1740        return NULL;
1741}
1742
1743/*
1744 * This helper attempts to cope with remotely renamed directories
1745 *
1746 * It assumes that the caller is already holding
1747 * dentry->d_parent->d_inode->i_mutex and the dcache_lock
1748 *
1749 * Note: If ever the locking in lock_rename() changes, then please
1750 * remember to update this too...
1751 */
1752static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
1753        __releases(dcache_lock)
1754{
1755        struct mutex *m1 = NULL, *m2 = NULL;
1756        struct dentry *ret;
1757
1758        /* If alias and dentry share a parent, then no extra locks required */
1759        if (alias->d_parent == dentry->d_parent)
1760                goto out_unalias;
1761
1762        /* Check for loops */
1763        ret = ERR_PTR(-ELOOP);
1764        if (d_ancestor(alias, dentry))
1765                goto out_err;
1766
1767        /* See lock_rename() */
1768        ret = ERR_PTR(-EBUSY);
1769        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
1770                goto out_err;
1771        m1 = &dentry->d_sb->s_vfs_rename_mutex;
1772        if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
1773                goto out_err;
1774        m2 = &alias->d_parent->d_inode->i_mutex;
1775out_unalias:
1776        d_move_locked(alias, dentry);
1777        ret = alias;
1778out_err:
1779        spin_unlock(&dcache_lock);
1780        if (m2)
1781                mutex_unlock(m2);
1782        if (m1)
1783                mutex_unlock(m1);
1784        return ret;
1785}
1786
1787/*
1788 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
1789 * named dentry in place of the dentry to be replaced.
1790 */
1791static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1792{
1793        struct dentry *dparent, *aparent;
1794
1795        switch_names(dentry, anon);
1796        do_switch(dentry->d_name.hash, anon->d_name.hash);
1797
1798        dparent = dentry->d_parent;
1799        aparent = anon->d_parent;
1800
1801        dentry->d_parent = (aparent == anon) ? dentry : aparent;
1802        list_del(&dentry->d_u.d_child);
1803        if (!IS_ROOT(dentry))
1804                list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1805        else
1806                INIT_LIST_HEAD(&dentry->d_u.d_child);
1807
1808        anon->d_parent = (dparent == dentry) ? anon : dparent;
1809        list_del(&anon->d_u.d_child);
1810        if (!IS_ROOT(anon))
1811                list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
1812        else
1813                INIT_LIST_HEAD(&anon->d_u.d_child);
1814
1815        anon->d_flags &= ~DCACHE_DISCONNECTED;
1816}
1817
1818/**
1819 * d_materialise_unique - introduce an inode into the tree
1820 * @dentry: candidate dentry
1821 * @inode: inode to bind to the dentry, to which aliases may be attached
1822 *
1823 * Introduces an dentry into the tree, substituting an extant disconnected
1824 * root directory alias in its place if there is one
1825 */
1826struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1827{
1828        struct dentry *actual;
1829
1830        BUG_ON(!d_unhashed(dentry));
1831
1832        spin_lock(&dcache_lock);
1833
1834        if (!inode) {
1835                actual = dentry;
1836                __d_instantiate(dentry, NULL);
1837                goto found_lock;
1838        }
1839
1840        if (S_ISDIR(inode->i_mode)) {
1841                struct dentry *alias;
1842
1843                /* Does an aliased dentry already exist? */
1844                alias = __d_find_alias(inode, 0);
1845                if (alias) {
1846                        actual = alias;
1847                        /* Is this an anonymous mountpoint that we could splice
1848                         * into our tree? */
1849                        if (IS_ROOT(alias)) {
1850                                spin_lock(&alias->d_lock);
1851                                __d_materialise_dentry(dentry, alias);
1852                                __d_drop(alias);
1853                                goto found;
1854                        }
1855                        /* Nope, but we must(!) avoid directory aliasing */
1856                        actual = __d_unalias(dentry, alias);
1857                        if (IS_ERR(actual))
1858                                dput(alias);
1859                        goto out_nolock;
1860                }
1861        }
1862
1863        /* Add a unique reference */
1864        actual = __d_instantiate_unique(dentry, inode);
1865        if (!actual)
1866                actual = dentry;
1867        else if (unlikely(!d_unhashed(actual)))
1868                goto shouldnt_be_hashed;
1869
1870found_lock:
1871        spin_lock(&actual->d_lock);
1872found:
1873        _d_rehash(actual);
1874        spin_unlock(&actual->d_lock);
1875        spin_unlock(&dcache_lock);
1876out_nolock:
1877        if (actual == dentry) {
1878                security_d_instantiate(dentry, inode);
1879                return NULL;
1880        }
1881
1882        iput(inode);
1883        return actual;
1884
1885shouldnt_be_hashed:
1886        spin_unlock(&dcache_lock);
1887        BUG();
1888}
1889
1890static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1891{
1892        *buflen -= namelen;
1893        if (*buflen < 0)
1894                return -ENAMETOOLONG;
1895        *buffer -= namelen;
1896        memcpy(*buffer, str, namelen);
1897        return 0;
1898}
1899
1900static int prepend_name(char **buffer, int *buflen, struct qstr *name)
1901{
1902        return prepend(buffer, buflen, name->name, name->len);
1903}
1904
1905/**
1906 * __d_path - return the path of a dentry
1907 * @path: the dentry/vfsmount to report
1908 * @root: root vfsmnt/dentry (may be modified by this function)
1909 * @buffer: buffer to return value in
1910 * @buflen: buffer length
1911 *
1912 * Convert a dentry into an ASCII path name. If the entry has been deleted
1913 * the string " (deleted)" is appended. Note that this is ambiguous.
1914 *
1915 * Returns the buffer or an error code if the path was too long.
1916 *
1917 * "buflen" should be positive. Caller holds the dcache_lock.
1918 *
1919 * If path is not reachable from the supplied root, then the value of
1920 * root is changed (without modifying refcounts).
1921 */
1922char *__d_path(const struct path *path, struct path *root,
1923               char *buffer, int buflen)
1924{
1925        struct dentry *dentry = path->dentry;
1926        struct vfsmount *vfsmnt = path->mnt;
1927        char *end = buffer + buflen;
1928        char *retval;
1929
1930        spin_lock(&vfsmount_lock);
1931        prepend(&end, &buflen, "\0", 1);
1932        if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
1933                (prepend(&end, &buflen, " (deleted)", 10) != 0))
1934                        goto Elong;
1935
1936        if (buflen < 1)
1937                goto Elong;
1938        /* Get '/' right */
1939        retval = end-1;
1940        *retval = '/';
1941
1942        for (;;) {
1943                struct dentry * parent;
1944
1945                if (dentry == root->dentry && vfsmnt == root->mnt)
1946                        break;
1947                if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1948                        /* Global root? */
1949                        if (vfsmnt->mnt_parent == vfsmnt) {
1950                                goto global_root;
1951                        }
1952                        dentry = vfsmnt->mnt_mountpoint;
1953                        vfsmnt = vfsmnt->mnt_parent;
1954                        continue;
1955                }
1956                parent = dentry->d_parent;
1957                prefetch(parent);
1958                if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
1959                    (prepend(&end, &buflen, "/", 1) != 0))
1960                        goto Elong;
1961                retval = end;
1962                dentry = parent;
1963        }
1964
1965out:
1966        spin_unlock(&vfsmount_lock);
1967        return retval;
1968
1969global_root:
1970        retval += 1;    /* hit the slash */
1971        if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
1972                goto Elong;
1973        root->mnt = vfsmnt;
1974        root->dentry = dentry;
1975        goto out;
1976
1977Elong:
1978        retval = ERR_PTR(-ENAMETOOLONG);
1979        goto out;
1980}
1981
1982/**
1983 * d_path - return the path of a dentry
1984 * @path: path to report
1985 * @buf: buffer to return value in
1986 * @buflen: buffer length
1987 *
1988 * Convert a dentry into an ASCII path name. If the entry has been deleted
1989 * the string " (deleted)" is appended. Note that this is ambiguous.
1990 *
1991 * Returns the buffer or an error code if the path was too long.
1992 *
1993 * "buflen" should be positive.
1994 */
1995char *d_path(const struct path *path, char *buf, int buflen)
1996{
1997        char *res;
1998        struct path root;
1999        struct path tmp;
2000
2001        /*
2002         * We have various synthetic filesystems that never get mounted.  On
2003         * these filesystems dentries are never used for lookup purposes, and
2004         * thus don't need to be hashed.  They also don't need a name until a
2005         * user wants to identify the object in /proc/pid/fd/.  The little hack
2006         * below allows us to generate a name for these objects on demand:
2007         */
2008        if (path->dentry->d_op && path->dentry->d_op->d_dname)
2009                return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2010
2011        read_lock(&current->fs->lock);
2012        root = current->fs->root;
2013        path_get(&root);
2014        read_unlock(&current->fs->lock);
2015        spin_lock(&dcache_lock);
2016        tmp = root;
2017        res = __d_path(path, &tmp, buf, buflen);
2018        spin_unlock(&dcache_lock);
2019        path_put(&root);
2020        return res;
2021}
2022
2023/*
2024 * Helper function for dentry_operations.d_dname() members
2025 */
2026char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2027                        const char *fmt, ...)
2028{
2029        va_list args;
2030        char temp[64];
2031        int sz;
2032
2033        va_start(args, fmt);
2034        sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2035        va_end(args);
2036
2037        if (sz > sizeof(temp) || sz > buflen)
2038                return ERR_PTR(-ENAMETOOLONG);
2039
2040        buffer += buflen - sz;
2041        return memcpy(buffer, temp, sz);
2042}
2043
2044/*
2045 * Write full pathname from the root of the filesystem into the buffer.
2046 */
2047char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2048{
2049        char *end = buf + buflen;
2050        char *retval;
2051
2052        spin_lock(&dcache_lock);
2053        prepend(&end, &buflen, "\0", 1);
2054        if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
2055                (prepend(&end, &buflen, "//deleted", 9) != 0))
2056                        goto Elong;
2057        if (buflen < 1)
2058                goto Elong;
2059        /* Get '/' right */
2060        retval = end-1;
2061        *retval = '/';
2062
2063        while (!IS_ROOT(dentry)) {
2064                struct dentry *parent = dentry->d_parent;
2065
2066                prefetch(parent);
2067                if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
2068                    (prepend(&end, &buflen, "/", 1) != 0))
2069                        goto Elong;
2070
2071                retval = end;
2072                dentry = parent;
2073        }
2074        spin_unlock(&dcache_lock);
2075        return retval;
2076Elong:
2077        spin_unlock(&dcache_lock);
2078        return ERR_PTR(-ENAMETOOLONG);
2079}
2080
2081/*
2082 * NOTE! The user-level library version returns a
2083 * character pointer. The kernel system call just
2084 * returns the length of the buffer filled (which
2085 * includes the ending '\0' character), or a negative
2086 * error value. So libc would do something like
2087 *
2088 *      char *getcwd(char * buf, size_t size)
2089 *      {
2090 *              int retval;
2091 *
2092 *              retval = sys_getcwd(buf, size);
2093 *              if (retval >= 0)
2094 *                      return buf;
2095 *              errno = -retval;
2096 *              return NULL;
2097 *      }
2098 */
2099SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2100{
2101        int error;
2102        struct path pwd, root;
2103        char *page = (char *) __get_free_page(GFP_USER);
2104
2105        if (!page)
2106                return -ENOMEM;
2107
2108        read_lock(&current->fs->lock);
2109        pwd = current->fs->pwd;
2110        path_get(&pwd);
2111        root = current->fs->root;
2112        path_get(&root);
2113        read_unlock(&current->fs->lock);
2114
2115        error = -ENOENT;
2116        /* Has the current directory has been unlinked? */
2117        spin_lock(&dcache_lock);
2118        if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) {
2119                unsigned long len;
2120                struct path tmp = root;
2121                char * cwd;
2122
2123                cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
2124                spin_unlock(&dcache_lock);
2125
2126                error = PTR_ERR(cwd);
2127                if (IS_ERR(cwd))
2128                        goto out;
2129
2130                error = -ERANGE;
2131                len = PAGE_SIZE + page - cwd;
2132                if (len <= size) {
2133                        error = len;
2134                        if (copy_to_user(buf, cwd, len))
2135                                error = -EFAULT;
2136                }
2137        } else
2138                spin_unlock(&dcache_lock);
2139
2140out:
2141        path_put(&pwd);
2142        path_put(&root);
2143        free_page((unsigned long) page);
2144        return error;
2145}
2146
2147/*
2148 * Test whether new_dentry is a subdirectory of old_dentry.
2149 *
2150 * Trivially implemented using the dcache structure
2151 */
2152
2153/**
2154 * is_subdir - is new dentry a subdirectory of old_dentry
2155 * @new_dentry: new dentry
2156 * @old_dentry: old dentry
2157 *
2158 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2159 * Returns 0 otherwise.
2160 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2161 */
2162  
2163int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2164{
2165        int result;
2166        unsigned long seq;
2167
2168        /* FIXME: This is old behavior, needed? Please check callers. */
2169        if (new_dentry == old_dentry)
2170                return 1;
2171
2172        /*
2173         * Need rcu_readlock to protect against the d_parent trashing
2174         * due to d_move
2175         */
2176        rcu_read_lock();
2177        do {
2178                /* for restarting inner loop in case of seq retry */
2179                seq = read_seqbegin(&rename_lock);
2180                if (d_ancestor(old_dentry, new_dentry))
2181                        result = 1;
2182                else
2183                        result = 0;
2184        } while (read_seqretry(&rename_lock, seq));
2185        rcu_read_unlock();
2186
2187        return result;
2188}
2189
2190void d_genocide(struct dentry *root)
2191{
2192        struct dentry *this_parent = root;
2193        struct list_head *next;
2194
2195        spin_lock(&dcache_lock);
2196repeat:
2197        next = this_parent->d_subdirs.next;
2198resume:
2199        while (next != &this_parent->d_subdirs) {
2200                struct list_head *tmp = next;
2201                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2202                next = tmp->next;
2203                if (d_unhashed(dentry)||!dentry->d_inode)
2204                        continue;
2205                if (!list_empty(&dentry->d_subdirs)) {
2206                        this_parent = dentry;
2207                        goto repeat;
2208                }
2209                atomic_dec(&dentry->d_count);
2210        }
2211        if (this_parent != root) {
2212                next = this_parent->d_u.d_child.next;
2213                atomic_dec(&this_parent->d_count);
2214                this_parent = this_parent->d_parent;
2215                goto resume;
2216        }
2217        spin_unlock(&dcache_lock);
2218}
2219
2220/**
2221 * find_inode_number - check for dentry with name
2222 * @dir: directory to check
2223 * @name: Name to find.
2224 *
2225 * Check whether a dentry already exists for the given name,
2226 * and return the inode number if it has an inode. Otherwise
2227 * 0 is returned.
2228 *
2229 * This routine is used to post-process directory listings for
2230 * filesystems using synthetic inode numbers, and is necessary
2231 * to keep getcwd() working.
2232 */
2233 
2234ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2235{
2236        struct dentry * dentry;
2237        ino_t ino = 0;
2238
2239        dentry = d_hash_and_lookup(dir, name);
2240        if (dentry) {
2241                if (dentry->d_inode)
2242                        ino = dentry->d_inode->i_ino;
2243                dput(dentry);
2244        }
2245        return ino;
2246}
2247
2248static __initdata unsigned long dhash_entries;
2249static int __init set_dhash_entries(char *str)
2250{
2251        if (!str)
2252                return 0;
2253        dhash_entries = simple_strtoul(str, &str, 0);
2254        return 1;
2255}
2256__setup("dhash_entries=", set_dhash_entries);
2257
2258static void __init dcache_init_early(void)
2259{
2260        int loop;
2261
2262        /* If hashes are distributed across NUMA nodes, defer
2263         * hash allocation until vmalloc space is available.
2264         */
2265        if (hashdist)
2266                return;
2267
2268        dentry_hashtable =
2269                alloc_large_system_hash("Dentry cache",
2270                                        sizeof(struct hlist_head),
2271                                        dhash_entries,
2272                                        13,
2273                                        HASH_EARLY,
2274                                        &d_hash_shift,
2275                                        &d_hash_mask,
2276                                        0);
2277
2278        for (loop = 0; loop < (1 << d_hash_shift); loop++)
2279                INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2280}
2281
2282static void __init dcache_init(void)
2283{
2284        int loop;
2285
2286        /* 
2287         * A constructor could be added for stable state like the lists,
2288         * but it is probably not worth it because of the cache nature
2289         * of the dcache. 
2290         */
2291        dentry_cache = KMEM_CACHE(dentry,
2292                SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2293        
2294        register_shrinker(&dcache_shrinker);
2295
2296        /* Hash may have been set up in dcache_init_early */
2297        if (!hashdist)
2298                return;
2299
2300        dentry_hashtable =
2301                alloc_large_system_hash("Dentry cache",
2302                                        sizeof(struct hlist_head),
2303                                        dhash_entries,
2304                                        13,
2305                                        0,
2306                                        &d_hash_shift,
2307                                        &d_hash_mask,
2308                                        0);
2309
2310        for (loop = 0; loop < (1 << d_hash_shift); loop++)
2311                INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2312}
2313
2314/* SLAB cache for __getname() consumers */
2315struct kmem_cache *names_cachep __read_mostly;
2316
2317/* SLAB cache for file structures */
2318struct kmem_cache *filp_cachep __read_mostly;
2319
2320EXPORT_SYMBOL(d_genocide);
2321
2322void __init vfs_caches_init_early(void)
2323{
2324        dcache_init_early();
2325        inode_init_early();
2326}
2327
2328void __init vfs_caches_init(unsigned long mempages)
2329{
2330        unsigned long reserve;
2331
2332        /* Base hash sizes on available memory, with a reserve equal to
2333           150% of current kernel size */
2334
2335        reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2336        mempages -= reserve;
2337
2338        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
2339                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2340
2341        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
2342                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2343
2344        dcache_init();
2345        inode_init();
2346        files_init(mempages);
2347        mnt_init();
2348        bdev_cache_init();
2349        chrdev_init();
2350}
2351
2352EXPORT_SYMBOL(d_alloc);
2353EXPORT_SYMBOL(d_alloc_root);
2354EXPORT_SYMBOL(d_delete);
2355EXPORT_SYMBOL(d_find_alias);
2356EXPORT_SYMBOL(d_instantiate);
2357EXPORT_SYMBOL(d_invalidate);
2358EXPORT_SYMBOL(d_lookup);
2359EXPORT_SYMBOL(d_move);
2360EXPORT_SYMBOL_GPL(d_materialise_unique);
2361EXPORT_SYMBOL(d_path);
2362EXPORT_SYMBOL(d_prune_aliases);
2363EXPORT_SYMBOL(d_rehash);
2364EXPORT_SYMBOL(d_splice_alias);
2365EXPORT_SYMBOL(d_add_ci);
2366EXPORT_SYMBOL(d_validate);
2367EXPORT_SYMBOL(dget_locked);
2368EXPORT_SYMBOL(dput);
2369EXPORT_SYMBOL(find_inode_number);
2370EXPORT_SYMBOL(have_submounts);
2371EXPORT_SYMBOL(names_cachep);
2372EXPORT_SYMBOL(shrink_dcache_parent);
2373EXPORT_SYMBOL(shrink_dcache_sb);
2374