1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fdtable.h>
21#include <linux/fs.h>
22#include <linux/fsnotify.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/hash.h>
26#include <linux/cache.h>
27#include <linux/module.h>
28#include <linux/mount.h>
29#include <linux/file.h>
30#include <asm/uaccess.h>
31#include <linux/security.h>
32#include <linux/seqlock.h>
33#include <linux/swap.h>
34#include <linux/bootmem.h>
35#include "internal.h"
36
37
38int sysctl_vfs_cache_pressure __read_mostly = 100;
39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
40
41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
42__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
43
44EXPORT_SYMBOL(dcache_lock);
45
46static struct kmem_cache *dentry_cache __read_mostly;
47
48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49
50
51
52
53
54
55
56
57
58#define D_HASHBITS d_hash_shift
59#define D_HASHMASK d_hash_mask
60
61static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable __read_mostly;
64
65
66struct dentry_stat_t dentry_stat = {
67 .age_limit = 45,
68};
69
70static void __d_free(struct dentry *dentry)
71{
72 if (dname_external(dentry))
73 kfree(dentry->d_name.name);
74 kmem_cache_free(dentry_cache, dentry);
75}
76
77static void d_callback(struct rcu_head *head)
78{
79 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
80 __d_free(dentry);
81}
82
83
84
85
86
87static void d_free(struct dentry *dentry)
88{
89 if (dentry->d_op && dentry->d_op->d_release)
90 dentry->d_op->d_release(dentry);
91
92 if (hlist_unhashed(&dentry->d_hash))
93 __d_free(dentry);
94 else
95 call_rcu(&dentry->d_u.d_rcu, d_callback);
96}
97
98
99
100
101
102static void dentry_iput(struct dentry * dentry)
103 __releases(dentry->d_lock)
104 __releases(dcache_lock)
105{
106 struct inode *inode = dentry->d_inode;
107 if (inode) {
108 dentry->d_inode = NULL;
109 list_del_init(&dentry->d_alias);
110 spin_unlock(&dentry->d_lock);
111 spin_unlock(&dcache_lock);
112 if (!inode->i_nlink)
113 fsnotify_inoderemove(inode);
114 if (dentry->d_op && dentry->d_op->d_iput)
115 dentry->d_op->d_iput(dentry, inode);
116 else
117 iput(inode);
118 } else {
119 spin_unlock(&dentry->d_lock);
120 spin_unlock(&dcache_lock);
121 }
122}
123
124
125
126
127static void dentry_lru_add(struct dentry *dentry)
128{
129 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
130 dentry->d_sb->s_nr_dentry_unused++;
131 dentry_stat.nr_unused++;
132}
133
134static void dentry_lru_add_tail(struct dentry *dentry)
135{
136 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
137 dentry->d_sb->s_nr_dentry_unused++;
138 dentry_stat.nr_unused++;
139}
140
141static void dentry_lru_del(struct dentry *dentry)
142{
143 if (!list_empty(&dentry->d_lru)) {
144 list_del(&dentry->d_lru);
145 dentry->d_sb->s_nr_dentry_unused--;
146 dentry_stat.nr_unused--;
147 }
148}
149
150static void dentry_lru_del_init(struct dentry *dentry)
151{
152 if (likely(!list_empty(&dentry->d_lru))) {
153 list_del_init(&dentry->d_lru);
154 dentry->d_sb->s_nr_dentry_unused--;
155 dentry_stat.nr_unused--;
156 }
157}
158
159
160
161
162
163
164
165
166
167static struct dentry *d_kill(struct dentry *dentry)
168 __releases(dentry->d_lock)
169 __releases(dcache_lock)
170{
171 struct dentry *parent;
172
173 list_del(&dentry->d_u.d_child);
174 dentry_stat.nr_dentry--;
175
176 dentry_iput(dentry);
177 parent = dentry->d_parent;
178 d_free(dentry);
179 return dentry == parent ? NULL : parent;
180}
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211void dput(struct dentry *dentry)
212{
213 if (!dentry)
214 return;
215
216repeat:
217 if (atomic_read(&dentry->d_count) == 1)
218 might_sleep();
219 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
220 return;
221
222 spin_lock(&dentry->d_lock);
223 if (atomic_read(&dentry->d_count)) {
224 spin_unlock(&dentry->d_lock);
225 spin_unlock(&dcache_lock);
226 return;
227 }
228
229
230
231
232 if (dentry->d_op && dentry->d_op->d_delete) {
233 if (dentry->d_op->d_delete(dentry))
234 goto unhash_it;
235 }
236
237 if (d_unhashed(dentry))
238 goto kill_it;
239 if (list_empty(&dentry->d_lru)) {
240 dentry->d_flags |= DCACHE_REFERENCED;
241 dentry_lru_add(dentry);
242 }
243 spin_unlock(&dentry->d_lock);
244 spin_unlock(&dcache_lock);
245 return;
246
247unhash_it:
248 __d_drop(dentry);
249kill_it:
250
251 dentry_lru_del(dentry);
252 dentry = d_kill(dentry);
253 if (dentry)
254 goto repeat;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269int d_invalidate(struct dentry * dentry)
270{
271
272
273
274 spin_lock(&dcache_lock);
275 if (d_unhashed(dentry)) {
276 spin_unlock(&dcache_lock);
277 return 0;
278 }
279
280
281
282
283 if (!list_empty(&dentry->d_subdirs)) {
284 spin_unlock(&dcache_lock);
285 shrink_dcache_parent(dentry);
286 spin_lock(&dcache_lock);
287 }
288
289
290
291
292
293
294
295
296
297
298
299 spin_lock(&dentry->d_lock);
300 if (atomic_read(&dentry->d_count) > 1) {
301 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
302 spin_unlock(&dentry->d_lock);
303 spin_unlock(&dcache_lock);
304 return -EBUSY;
305 }
306 }
307
308 __d_drop(dentry);
309 spin_unlock(&dentry->d_lock);
310 spin_unlock(&dcache_lock);
311 return 0;
312}
313
314
315
316static inline struct dentry * __dget_locked(struct dentry *dentry)
317{
318 atomic_inc(&dentry->d_count);
319 dentry_lru_del_init(dentry);
320 return dentry;
321}
322
323struct dentry * dget_locked(struct dentry *dentry)
324{
325 return __dget_locked(dentry);
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
346{
347 struct list_head *head, *next, *tmp;
348 struct dentry *alias, *discon_alias=NULL;
349
350 head = &inode->i_dentry;
351 next = inode->i_dentry.next;
352 while (next != head) {
353 tmp = next;
354 next = tmp->next;
355 prefetch(next);
356 alias = list_entry(tmp, struct dentry, d_alias);
357 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
358 if (IS_ROOT(alias) &&
359 (alias->d_flags & DCACHE_DISCONNECTED))
360 discon_alias = alias;
361 else if (!want_discon) {
362 __dget_locked(alias);
363 return alias;
364 }
365 }
366 }
367 if (discon_alias)
368 __dget_locked(discon_alias);
369 return discon_alias;
370}
371
372struct dentry * d_find_alias(struct inode *inode)
373{
374 struct dentry *de = NULL;
375
376 if (!list_empty(&inode->i_dentry)) {
377 spin_lock(&dcache_lock);
378 de = __d_find_alias(inode, 0);
379 spin_unlock(&dcache_lock);
380 }
381 return de;
382}
383
384
385
386
387
388void d_prune_aliases(struct inode *inode)
389{
390 struct dentry *dentry;
391restart:
392 spin_lock(&dcache_lock);
393 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
394 spin_lock(&dentry->d_lock);
395 if (!atomic_read(&dentry->d_count)) {
396 __dget_locked(dentry);
397 __d_drop(dentry);
398 spin_unlock(&dentry->d_lock);
399 spin_unlock(&dcache_lock);
400 dput(dentry);
401 goto restart;
402 }
403 spin_unlock(&dentry->d_lock);
404 }
405 spin_unlock(&dcache_lock);
406}
407
408
409
410
411
412
413
414
415
416static void prune_one_dentry(struct dentry * dentry)
417 __releases(dentry->d_lock)
418 __releases(dcache_lock)
419 __acquires(dcache_lock)
420{
421 __d_drop(dentry);
422 dentry = d_kill(dentry);
423
424
425
426
427
428 spin_lock(&dcache_lock);
429 while (dentry) {
430 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
431 return;
432
433 if (dentry->d_op && dentry->d_op->d_delete)
434 dentry->d_op->d_delete(dentry);
435 dentry_lru_del_init(dentry);
436 __d_drop(dentry);
437 dentry = d_kill(dentry);
438 spin_lock(&dcache_lock);
439 }
440}
441
442
443
444
445
446
447
448
449
450static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
451{
452 LIST_HEAD(referenced);
453 LIST_HEAD(tmp);
454 struct dentry *dentry;
455 int cnt = 0;
456
457 BUG_ON(!sb);
458 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
459 spin_lock(&dcache_lock);
460 if (count != NULL)
461
462 cnt = *count;
463restart:
464 if (count == NULL)
465 list_splice_init(&sb->s_dentry_lru, &tmp);
466 else {
467 while (!list_empty(&sb->s_dentry_lru)) {
468 dentry = list_entry(sb->s_dentry_lru.prev,
469 struct dentry, d_lru);
470 BUG_ON(dentry->d_sb != sb);
471
472 spin_lock(&dentry->d_lock);
473
474
475
476
477
478 if ((flags & DCACHE_REFERENCED)
479 && (dentry->d_flags & DCACHE_REFERENCED)) {
480 dentry->d_flags &= ~DCACHE_REFERENCED;
481 list_move_tail(&dentry->d_lru, &referenced);
482 spin_unlock(&dentry->d_lock);
483 } else {
484 list_move_tail(&dentry->d_lru, &tmp);
485 spin_unlock(&dentry->d_lock);
486 cnt--;
487 if (!cnt)
488 break;
489 }
490 cond_resched_lock(&dcache_lock);
491 }
492 }
493 while (!list_empty(&tmp)) {
494 dentry = list_entry(tmp.prev, struct dentry, d_lru);
495 dentry_lru_del_init(dentry);
496 spin_lock(&dentry->d_lock);
497
498
499
500
501
502 if (atomic_read(&dentry->d_count)) {
503 spin_unlock(&dentry->d_lock);
504 continue;
505 }
506 prune_one_dentry(dentry);
507
508 cond_resched_lock(&dcache_lock);
509 }
510 if (count == NULL && !list_empty(&sb->s_dentry_lru))
511 goto restart;
512 if (count != NULL)
513 *count = cnt;
514 if (!list_empty(&referenced))
515 list_splice(&referenced, &sb->s_dentry_lru);
516 spin_unlock(&dcache_lock);
517}
518
519
520
521
522
523
524
525
526
527
528static void prune_dcache(int count)
529{
530 struct super_block *sb;
531 int w_count;
532 int unused = dentry_stat.nr_unused;
533 int prune_ratio;
534 int pruned;
535
536 if (unused == 0 || count == 0)
537 return;
538 spin_lock(&dcache_lock);
539restart:
540 if (count >= unused)
541 prune_ratio = 1;
542 else
543 prune_ratio = unused / count;
544 spin_lock(&sb_lock);
545 list_for_each_entry(sb, &super_blocks, s_list) {
546 if (sb->s_nr_dentry_unused == 0)
547 continue;
548 sb->s_count++;
549
550
551
552
553
554
555
556
557
558 spin_unlock(&sb_lock);
559 if (prune_ratio != 1)
560 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
561 else
562 w_count = sb->s_nr_dentry_unused;
563 pruned = w_count;
564
565
566
567
568
569
570
571 if (down_read_trylock(&sb->s_umount)) {
572 if ((sb->s_root != NULL) &&
573 (!list_empty(&sb->s_dentry_lru))) {
574 spin_unlock(&dcache_lock);
575 __shrink_dcache_sb(sb, &w_count,
576 DCACHE_REFERENCED);
577 pruned -= w_count;
578 spin_lock(&dcache_lock);
579 }
580 up_read(&sb->s_umount);
581 }
582 spin_lock(&sb_lock);
583 count -= pruned;
584
585
586
587
588 if (__put_super_and_need_restart(sb) && count > 0) {
589 spin_unlock(&sb_lock);
590 goto restart;
591 }
592 }
593 spin_unlock(&sb_lock);
594 spin_unlock(&dcache_lock);
595}
596
597
598
599
600
601
602
603
604
605void shrink_dcache_sb(struct super_block * sb)
606{
607 __shrink_dcache_sb(sb, NULL, 0);
608}
609
610
611
612
613
614
615static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
616{
617 struct dentry *parent;
618 unsigned detached = 0;
619
620 BUG_ON(!IS_ROOT(dentry));
621
622
623 spin_lock(&dcache_lock);
624 dentry_lru_del_init(dentry);
625 __d_drop(dentry);
626 spin_unlock(&dcache_lock);
627
628 for (;;) {
629
630 while (!list_empty(&dentry->d_subdirs)) {
631 struct dentry *loop;
632
633
634
635 spin_lock(&dcache_lock);
636 list_for_each_entry(loop, &dentry->d_subdirs,
637 d_u.d_child) {
638 dentry_lru_del_init(loop);
639 __d_drop(loop);
640 cond_resched_lock(&dcache_lock);
641 }
642 spin_unlock(&dcache_lock);
643
644
645 dentry = list_entry(dentry->d_subdirs.next,
646 struct dentry, d_u.d_child);
647 }
648
649
650
651 do {
652 struct inode *inode;
653
654 if (atomic_read(&dentry->d_count) != 0) {
655 printk(KERN_ERR
656 "BUG: Dentry %p{i=%lx,n=%s}"
657 " still in use (%d)"
658 " [unmount of %s %s]\n",
659 dentry,
660 dentry->d_inode ?
661 dentry->d_inode->i_ino : 0UL,
662 dentry->d_name.name,
663 atomic_read(&dentry->d_count),
664 dentry->d_sb->s_type->name,
665 dentry->d_sb->s_id);
666 BUG();
667 }
668
669 parent = dentry->d_parent;
670 if (parent == dentry)
671 parent = NULL;
672 else
673 atomic_dec(&parent->d_count);
674
675 list_del(&dentry->d_u.d_child);
676 detached++;
677
678 inode = dentry->d_inode;
679 if (inode) {
680 dentry->d_inode = NULL;
681 list_del_init(&dentry->d_alias);
682 if (dentry->d_op && dentry->d_op->d_iput)
683 dentry->d_op->d_iput(dentry, inode);
684 else
685 iput(inode);
686 }
687
688 d_free(dentry);
689
690
691
692
693 if (!parent)
694 goto out;
695
696 dentry = parent;
697
698 } while (list_empty(&dentry->d_subdirs));
699
700 dentry = list_entry(dentry->d_subdirs.next,
701 struct dentry, d_u.d_child);
702 }
703out:
704
705 spin_lock(&dcache_lock);
706 dentry_stat.nr_dentry -= detached;
707 spin_unlock(&dcache_lock);
708}
709
710
711
712
713
714
715
716
717
718
719
720
721void shrink_dcache_for_umount(struct super_block *sb)
722{
723 struct dentry *dentry;
724
725 if (down_read_trylock(&sb->s_umount))
726 BUG();
727
728 dentry = sb->s_root;
729 sb->s_root = NULL;
730 atomic_dec(&dentry->d_count);
731 shrink_dcache_for_umount_subtree(dentry);
732
733 while (!hlist_empty(&sb->s_anon)) {
734 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
735 shrink_dcache_for_umount_subtree(dentry);
736 }
737}
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753int have_submounts(struct dentry *parent)
754{
755 struct dentry *this_parent = parent;
756 struct list_head *next;
757
758 spin_lock(&dcache_lock);
759 if (d_mountpoint(parent))
760 goto positive;
761repeat:
762 next = this_parent->d_subdirs.next;
763resume:
764 while (next != &this_parent->d_subdirs) {
765 struct list_head *tmp = next;
766 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
767 next = tmp->next;
768
769 if (d_mountpoint(dentry))
770 goto positive;
771 if (!list_empty(&dentry->d_subdirs)) {
772 this_parent = dentry;
773 goto repeat;
774 }
775 }
776
777
778
779 if (this_parent != parent) {
780 next = this_parent->d_u.d_child.next;
781 this_parent = this_parent->d_parent;
782 goto resume;
783 }
784 spin_unlock(&dcache_lock);
785 return 0;
786positive:
787 spin_unlock(&dcache_lock);
788 return 1;
789}
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805static int select_parent(struct dentry * parent)
806{
807 struct dentry *this_parent = parent;
808 struct list_head *next;
809 int found = 0;
810
811 spin_lock(&dcache_lock);
812repeat:
813 next = this_parent->d_subdirs.next;
814resume:
815 while (next != &this_parent->d_subdirs) {
816 struct list_head *tmp = next;
817 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
818 next = tmp->next;
819
820 dentry_lru_del_init(dentry);
821
822
823
824
825 if (!atomic_read(&dentry->d_count)) {
826 dentry_lru_add_tail(dentry);
827 found++;
828 }
829
830
831
832
833
834
835 if (found && need_resched())
836 goto out;
837
838
839
840
841 if (!list_empty(&dentry->d_subdirs)) {
842 this_parent = dentry;
843 goto repeat;
844 }
845 }
846
847
848
849 if (this_parent != parent) {
850 next = this_parent->d_u.d_child.next;
851 this_parent = this_parent->d_parent;
852 goto resume;
853 }
854out:
855 spin_unlock(&dcache_lock);
856 return found;
857}
858
859
860
861
862
863
864
865
866void shrink_dcache_parent(struct dentry * parent)
867{
868 struct super_block *sb = parent->d_sb;
869 int found;
870
871 while ((found = select_parent(parent)) != 0)
872 __shrink_dcache_sb(sb, &found, 0);
873}
874
875
876
877
878
879
880
881
882
883
884
885
886
887static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
888{
889 if (nr) {
890 if (!(gfp_mask & __GFP_FS))
891 return -1;
892 prune_dcache(nr);
893 }
894 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
895}
896
897static struct shrinker dcache_shrinker = {
898 .shrink = shrink_dcache_memory,
899 .seeks = DEFAULT_SEEKS,
900};
901
902
903
904
905
906
907
908
909
910
911
912struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
913{
914 struct dentry *dentry;
915 char *dname;
916
917 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
918 if (!dentry)
919 return NULL;
920
921 if (name->len > DNAME_INLINE_LEN-1) {
922 dname = kmalloc(name->len + 1, GFP_KERNEL);
923 if (!dname) {
924 kmem_cache_free(dentry_cache, dentry);
925 return NULL;
926 }
927 } else {
928 dname = dentry->d_iname;
929 }
930 dentry->d_name.name = dname;
931
932 dentry->d_name.len = name->len;
933 dentry->d_name.hash = name->hash;
934 memcpy(dname, name->name, name->len);
935 dname[name->len] = 0;
936
937 atomic_set(&dentry->d_count, 1);
938 dentry->d_flags = DCACHE_UNHASHED;
939 spin_lock_init(&dentry->d_lock);
940 dentry->d_inode = NULL;
941 dentry->d_parent = NULL;
942 dentry->d_sb = NULL;
943 dentry->d_op = NULL;
944 dentry->d_fsdata = NULL;
945 dentry->d_mounted = 0;
946#ifdef CONFIG_PROFILING
947 dentry->d_cookie = NULL;
948#endif
949 INIT_HLIST_NODE(&dentry->d_hash);
950 INIT_LIST_HEAD(&dentry->d_lru);
951 INIT_LIST_HEAD(&dentry->d_subdirs);
952 INIT_LIST_HEAD(&dentry->d_alias);
953
954 if (parent) {
955 dentry->d_parent = dget(parent);
956 dentry->d_sb = parent->d_sb;
957 } else {
958 INIT_LIST_HEAD(&dentry->d_u.d_child);
959 }
960
961 spin_lock(&dcache_lock);
962 if (parent)
963 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
964 dentry_stat.nr_dentry++;
965 spin_unlock(&dcache_lock);
966
967 return dentry;
968}
969
970struct dentry *d_alloc_name(struct dentry *parent, const char *name)
971{
972 struct qstr q;
973
974 q.name = name;
975 q.len = strlen(name);
976 q.hash = full_name_hash(q.name, q.len);
977 return d_alloc(parent, &q);
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995void d_instantiate(struct dentry *entry, struct inode * inode)
996{
997 BUG_ON(!list_empty(&entry->d_alias));
998 spin_lock(&dcache_lock);
999 if (inode)
1000 list_add(&entry->d_alias, &inode->i_dentry);
1001 entry->d_inode = inode;
1002 fsnotify_d_instantiate(entry, inode);
1003 spin_unlock(&dcache_lock);
1004 security_d_instantiate(entry, inode);
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static struct dentry *__d_instantiate_unique(struct dentry *entry,
1024 struct inode *inode)
1025{
1026 struct dentry *alias;
1027 int len = entry->d_name.len;
1028 const char *name = entry->d_name.name;
1029 unsigned int hash = entry->d_name.hash;
1030
1031 if (!inode) {
1032 entry->d_inode = NULL;
1033 return NULL;
1034 }
1035
1036 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1037 struct qstr *qstr = &alias->d_name;
1038
1039 if (qstr->hash != hash)
1040 continue;
1041 if (alias->d_parent != entry->d_parent)
1042 continue;
1043 if (qstr->len != len)
1044 continue;
1045 if (memcmp(qstr->name, name, len))
1046 continue;
1047 dget_locked(alias);
1048 return alias;
1049 }
1050
1051 list_add(&entry->d_alias, &inode->i_dentry);
1052 entry->d_inode = inode;
1053 fsnotify_d_instantiate(entry, inode);
1054 return NULL;
1055}
1056
1057struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1058{
1059 struct dentry *result;
1060
1061 BUG_ON(!list_empty(&entry->d_alias));
1062
1063 spin_lock(&dcache_lock);
1064 result = __d_instantiate_unique(entry, inode);
1065 spin_unlock(&dcache_lock);
1066
1067 if (!result) {
1068 security_d_instantiate(entry, inode);
1069 return NULL;
1070 }
1071
1072 BUG_ON(!d_unhashed(result));
1073 iput(inode);
1074 return result;
1075}
1076
1077EXPORT_SYMBOL(d_instantiate_unique);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088struct dentry * d_alloc_root(struct inode * root_inode)
1089{
1090 struct dentry *res = NULL;
1091
1092 if (root_inode) {
1093 static const struct qstr name = { .name = "/", .len = 1 };
1094
1095 res = d_alloc(NULL, &name);
1096 if (res) {
1097 res->d_sb = root_inode->i_sb;
1098 res->d_parent = res;
1099 d_instantiate(res, root_inode);
1100 }
1101 }
1102 return res;
1103}
1104
1105static inline struct hlist_head *d_hash(struct dentry *parent,
1106 unsigned long hash)
1107{
1108 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1109 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1110 return dentry_hashtable + (hash & D_HASHMASK);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133struct dentry * d_alloc_anon(struct inode *inode)
1134{
1135 static const struct qstr anonstring = { .name = "" };
1136 struct dentry *tmp;
1137 struct dentry *res;
1138
1139 if ((res = d_find_alias(inode))) {
1140 iput(inode);
1141 return res;
1142 }
1143
1144 tmp = d_alloc(NULL, &anonstring);
1145 if (!tmp)
1146 return NULL;
1147
1148 tmp->d_parent = tmp;
1149
1150 spin_lock(&dcache_lock);
1151 res = __d_find_alias(inode, 0);
1152 if (!res) {
1153
1154 res = tmp;
1155 tmp = NULL;
1156 spin_lock(&res->d_lock);
1157 res->d_sb = inode->i_sb;
1158 res->d_parent = res;
1159 res->d_inode = inode;
1160 res->d_flags |= DCACHE_DISCONNECTED;
1161 res->d_flags &= ~DCACHE_UNHASHED;
1162 list_add(&res->d_alias, &inode->i_dentry);
1163 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
1164 spin_unlock(&res->d_lock);
1165
1166 inode = NULL;
1167 }
1168 spin_unlock(&dcache_lock);
1169
1170 if (inode)
1171 iput(inode);
1172 if (tmp)
1173 dput(tmp);
1174 return res;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1195{
1196 struct dentry *new = NULL;
1197
1198 if (inode && S_ISDIR(inode->i_mode)) {
1199 spin_lock(&dcache_lock);
1200 new = __d_find_alias(inode, 1);
1201 if (new) {
1202 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1203 fsnotify_d_instantiate(new, inode);
1204 spin_unlock(&dcache_lock);
1205 security_d_instantiate(new, inode);
1206 d_rehash(dentry);
1207 d_move(new, dentry);
1208 iput(inode);
1209 } else {
1210
1211 list_add(&dentry->d_alias, &inode->i_dentry);
1212 dentry->d_inode = inode;
1213 fsnotify_d_instantiate(dentry, inode);
1214 spin_unlock(&dcache_lock);
1215 security_d_instantiate(dentry, inode);
1216 d_rehash(dentry);
1217 }
1218 } else
1219 d_add(dentry, inode);
1220 return new;
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1240 struct qstr *name)
1241{
1242 int error;
1243 struct dentry *found;
1244 struct dentry *new;
1245
1246
1247 found = d_hash_and_lookup(dentry->d_parent, name);
1248
1249 if (!found) {
1250 new = d_alloc(dentry->d_parent, name);
1251 if (!new) {
1252 error = -ENOMEM;
1253 goto err_out;
1254 }
1255 found = d_splice_alias(inode, new);
1256 if (found) {
1257 dput(new);
1258 return found;
1259 }
1260 return new;
1261 }
1262
1263 if (found->d_inode) {
1264 if (unlikely(found->d_inode != inode)) {
1265
1266 BUG_ON(!is_bad_inode(inode));
1267 BUG_ON(!is_bad_inode(found->d_inode));
1268 }
1269
1270
1271
1272
1273
1274
1275
1276 iput(inode);
1277 return found;
1278 }
1279
1280
1281
1282
1283
1284 if (!S_ISDIR(inode->i_mode)) {
1285
1286 d_instantiate(found, inode);
1287 return found;
1288 }
1289 spin_lock(&dcache_lock);
1290 if (list_empty(&inode->i_dentry)) {
1291
1292
1293
1294
1295
1296 list_add(&found->d_alias, &inode->i_dentry);
1297 found->d_inode = inode;
1298 spin_unlock(&dcache_lock);
1299 security_d_instantiate(found, inode);
1300 return found;
1301 }
1302
1303
1304
1305
1306 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1307 dget_locked(new);
1308 spin_unlock(&dcache_lock);
1309
1310 security_d_instantiate(found, inode);
1311
1312 d_move(new, found);
1313
1314 iput(inode);
1315
1316 dput(found);
1317
1318 return new;
1319
1320err_out:
1321 iput(inode);
1322 return ERR_PTR(error);
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1355{
1356 struct dentry * dentry = NULL;
1357 unsigned long seq;
1358
1359 do {
1360 seq = read_seqbegin(&rename_lock);
1361 dentry = __d_lookup(parent, name);
1362 if (dentry)
1363 break;
1364 } while (read_seqretry(&rename_lock, seq));
1365 return dentry;
1366}
1367
1368struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1369{
1370 unsigned int len = name->len;
1371 unsigned int hash = name->hash;
1372 const unsigned char *str = name->name;
1373 struct hlist_head *head = d_hash(parent,hash);
1374 struct dentry *found = NULL;
1375 struct hlist_node *node;
1376 struct dentry *dentry;
1377
1378 rcu_read_lock();
1379
1380 hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
1381 struct qstr *qstr;
1382
1383 if (dentry->d_name.hash != hash)
1384 continue;
1385 if (dentry->d_parent != parent)
1386 continue;
1387
1388 spin_lock(&dentry->d_lock);
1389
1390
1391
1392
1393
1394
1395 if (dentry->d_parent != parent)
1396 goto next;
1397
1398
1399 if (d_unhashed(dentry))
1400 goto next;
1401
1402
1403
1404
1405
1406 qstr = &dentry->d_name;
1407 if (parent->d_op && parent->d_op->d_compare) {
1408 if (parent->d_op->d_compare(parent, qstr, name))
1409 goto next;
1410 } else {
1411 if (qstr->len != len)
1412 goto next;
1413 if (memcmp(qstr->name, str, len))
1414 goto next;
1415 }
1416
1417 atomic_inc(&dentry->d_count);
1418 found = dentry;
1419 spin_unlock(&dentry->d_lock);
1420 break;
1421next:
1422 spin_unlock(&dentry->d_lock);
1423 }
1424 rcu_read_unlock();
1425
1426 return found;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1437{
1438 struct dentry *dentry = NULL;
1439
1440
1441
1442
1443
1444
1445 name->hash = full_name_hash(name->name, name->len);
1446 if (dir->d_op && dir->d_op->d_hash) {
1447 if (dir->d_op->d_hash(dir, name) < 0)
1448 goto out;
1449 }
1450 dentry = d_lookup(dir, name);
1451out:
1452 return dentry;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467int d_validate(struct dentry *dentry, struct dentry *dparent)
1468{
1469 struct hlist_head *base;
1470 struct hlist_node *lhp;
1471
1472
1473 if (!kmem_ptr_validate(dentry_cache, dentry))
1474 goto out;
1475
1476 if (dentry->d_parent != dparent)
1477 goto out;
1478
1479 spin_lock(&dcache_lock);
1480 base = d_hash(dparent, dentry->d_name.hash);
1481 hlist_for_each(lhp,base) {
1482
1483
1484
1485 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1486 __dget_locked(dentry);
1487 spin_unlock(&dcache_lock);
1488 return 1;
1489 }
1490 }
1491 spin_unlock(&dcache_lock);
1492out:
1493 return 0;
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517void d_delete(struct dentry * dentry)
1518{
1519 int isdir = 0;
1520
1521
1522
1523 spin_lock(&dcache_lock);
1524 spin_lock(&dentry->d_lock);
1525 isdir = S_ISDIR(dentry->d_inode->i_mode);
1526 if (atomic_read(&dentry->d_count) == 1) {
1527 dentry_iput(dentry);
1528 fsnotify_nameremove(dentry, isdir);
1529 return;
1530 }
1531
1532 if (!d_unhashed(dentry))
1533 __d_drop(dentry);
1534
1535 spin_unlock(&dentry->d_lock);
1536 spin_unlock(&dcache_lock);
1537
1538 fsnotify_nameremove(dentry, isdir);
1539}
1540
1541static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1542{
1543
1544 entry->d_flags &= ~DCACHE_UNHASHED;
1545 hlist_add_head_rcu(&entry->d_hash, list);
1546}
1547
1548static void _d_rehash(struct dentry * entry)
1549{
1550 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560void d_rehash(struct dentry * entry)
1561{
1562 spin_lock(&dcache_lock);
1563 spin_lock(&entry->d_lock);
1564 _d_rehash(entry);
1565 spin_unlock(&entry->d_lock);
1566 spin_unlock(&dcache_lock);
1567}
1568
1569#define do_switch(x,y) do { \
1570 __typeof__ (x) __tmp = x; \
1571 x = y; y = __tmp; } while (0)
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static void switch_names(struct dentry *dentry, struct dentry *target)
1585{
1586 if (dname_external(target)) {
1587 if (dname_external(dentry)) {
1588
1589
1590
1591 do_switch(target->d_name.name, dentry->d_name.name);
1592 } else {
1593
1594
1595
1596
1597 memcpy(target->d_iname, dentry->d_name.name,
1598 dentry->d_name.len + 1);
1599 dentry->d_name.name = target->d_name.name;
1600 target->d_name.name = target->d_iname;
1601 }
1602 } else {
1603 if (dname_external(dentry)) {
1604
1605
1606
1607
1608 memcpy(dentry->d_iname, target->d_name.name,
1609 target->d_name.len + 1);
1610 target->d_name.name = dentry->d_name.name;
1611 dentry->d_name.name = dentry->d_iname;
1612 } else {
1613
1614
1615
1616 memcpy(dentry->d_iname, target->d_name.name,
1617 target->d_name.len + 1);
1618 dentry->d_name.len = target->d_name.len;
1619 return;
1620 }
1621 }
1622 do_switch(dentry->d_name.len, target->d_name.len);
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645static void d_move_locked(struct dentry * dentry, struct dentry * target)
1646{
1647 struct hlist_head *list;
1648
1649 if (!dentry->d_inode)
1650 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1651
1652 write_seqlock(&rename_lock);
1653
1654
1655
1656 if (target < dentry) {
1657 spin_lock(&target->d_lock);
1658 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1659 } else {
1660 spin_lock(&dentry->d_lock);
1661 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1662 }
1663
1664
1665 if (d_unhashed(dentry))
1666 goto already_unhashed;
1667
1668 hlist_del_rcu(&dentry->d_hash);
1669
1670already_unhashed:
1671 list = d_hash(target->d_parent, target->d_name.hash);
1672 __d_rehash(dentry, list);
1673
1674
1675 __d_drop(target);
1676
1677 list_del(&dentry->d_u.d_child);
1678 list_del(&target->d_u.d_child);
1679
1680
1681 switch_names(dentry, target);
1682 do_switch(dentry->d_name.hash, target->d_name.hash);
1683
1684
1685 if (IS_ROOT(dentry)) {
1686 dentry->d_parent = target->d_parent;
1687 target->d_parent = target;
1688 INIT_LIST_HEAD(&target->d_u.d_child);
1689 } else {
1690 do_switch(dentry->d_parent, target->d_parent);
1691
1692
1693 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1694 }
1695
1696 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1697 spin_unlock(&target->d_lock);
1698 fsnotify_d_move(dentry);
1699 spin_unlock(&dentry->d_lock);
1700 write_sequnlock(&rename_lock);
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712void d_move(struct dentry * dentry, struct dentry * target)
1713{
1714 spin_lock(&dcache_lock);
1715 d_move_locked(dentry, target);
1716 spin_unlock(&dcache_lock);
1717}
1718
1719
1720
1721
1722static int d_isparent(struct dentry *p1, struct dentry *p2)
1723{
1724 struct dentry *p;
1725
1726 for (p = p2; p->d_parent != p; p = p->d_parent) {
1727 if (p->d_parent == p1)
1728 return 1;
1729 }
1730 return 0;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
1743 __releases(dcache_lock)
1744{
1745 struct mutex *m1 = NULL, *m2 = NULL;
1746 struct dentry *ret;
1747
1748
1749 if (alias->d_parent == dentry->d_parent)
1750 goto out_unalias;
1751
1752
1753 ret = ERR_PTR(-ELOOP);
1754 if (d_isparent(alias, dentry))
1755 goto out_err;
1756
1757
1758 ret = ERR_PTR(-EBUSY);
1759 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
1760 goto out_err;
1761 m1 = &dentry->d_sb->s_vfs_rename_mutex;
1762 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
1763 goto out_err;
1764 m2 = &alias->d_parent->d_inode->i_mutex;
1765out_unalias:
1766 d_move_locked(alias, dentry);
1767 ret = alias;
1768out_err:
1769 spin_unlock(&dcache_lock);
1770 if (m2)
1771 mutex_unlock(m2);
1772 if (m1)
1773 mutex_unlock(m1);
1774 return ret;
1775}
1776
1777
1778
1779
1780
1781static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1782{
1783 struct dentry *dparent, *aparent;
1784
1785 switch_names(dentry, anon);
1786 do_switch(dentry->d_name.hash, anon->d_name.hash);
1787
1788 dparent = dentry->d_parent;
1789 aparent = anon->d_parent;
1790
1791 dentry->d_parent = (aparent == anon) ? dentry : aparent;
1792 list_del(&dentry->d_u.d_child);
1793 if (!IS_ROOT(dentry))
1794 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1795 else
1796 INIT_LIST_HEAD(&dentry->d_u.d_child);
1797
1798 anon->d_parent = (dparent == dentry) ? anon : dparent;
1799 list_del(&anon->d_u.d_child);
1800 if (!IS_ROOT(anon))
1801 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
1802 else
1803 INIT_LIST_HEAD(&anon->d_u.d_child);
1804
1805 anon->d_flags &= ~DCACHE_DISCONNECTED;
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1817{
1818 struct dentry *actual;
1819
1820 BUG_ON(!d_unhashed(dentry));
1821
1822 spin_lock(&dcache_lock);
1823
1824 if (!inode) {
1825 actual = dentry;
1826 dentry->d_inode = NULL;
1827 goto found_lock;
1828 }
1829
1830 if (S_ISDIR(inode->i_mode)) {
1831 struct dentry *alias;
1832
1833
1834 alias = __d_find_alias(inode, 0);
1835 if (alias) {
1836 actual = alias;
1837
1838
1839 if (IS_ROOT(alias)) {
1840 spin_lock(&alias->d_lock);
1841 __d_materialise_dentry(dentry, alias);
1842 __d_drop(alias);
1843 goto found;
1844 }
1845
1846 actual = __d_unalias(dentry, alias);
1847 if (IS_ERR(actual))
1848 dput(alias);
1849 goto out_nolock;
1850 }
1851 }
1852
1853
1854 actual = __d_instantiate_unique(dentry, inode);
1855 if (!actual)
1856 actual = dentry;
1857 else if (unlikely(!d_unhashed(actual)))
1858 goto shouldnt_be_hashed;
1859
1860found_lock:
1861 spin_lock(&actual->d_lock);
1862found:
1863 _d_rehash(actual);
1864 spin_unlock(&actual->d_lock);
1865 spin_unlock(&dcache_lock);
1866out_nolock:
1867 if (actual == dentry) {
1868 security_d_instantiate(dentry, inode);
1869 return NULL;
1870 }
1871
1872 iput(inode);
1873 return actual;
1874
1875shouldnt_be_hashed:
1876 spin_unlock(&dcache_lock);
1877 BUG();
1878}
1879
1880static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1881{
1882 *buflen -= namelen;
1883 if (*buflen < 0)
1884 return -ENAMETOOLONG;
1885 *buffer -= namelen;
1886 memcpy(*buffer, str, namelen);
1887 return 0;
1888}
1889
1890static int prepend_name(char **buffer, int *buflen, struct qstr *name)
1891{
1892 return prepend(buffer, buflen, name->name, name->len);
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912char *__d_path(const struct path *path, struct path *root,
1913 char *buffer, int buflen)
1914{
1915 struct dentry *dentry = path->dentry;
1916 struct vfsmount *vfsmnt = path->mnt;
1917 char *end = buffer + buflen;
1918 char *retval;
1919
1920 spin_lock(&vfsmount_lock);
1921 prepend(&end, &buflen, "\0", 1);
1922 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
1923 (prepend(&end, &buflen, " (deleted)", 10) != 0))
1924 goto Elong;
1925
1926 if (buflen < 1)
1927 goto Elong;
1928
1929 retval = end-1;
1930 *retval = '/';
1931
1932 for (;;) {
1933 struct dentry * parent;
1934
1935 if (dentry == root->dentry && vfsmnt == root->mnt)
1936 break;
1937 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1938
1939 if (vfsmnt->mnt_parent == vfsmnt) {
1940 goto global_root;
1941 }
1942 dentry = vfsmnt->mnt_mountpoint;
1943 vfsmnt = vfsmnt->mnt_parent;
1944 continue;
1945 }
1946 parent = dentry->d_parent;
1947 prefetch(parent);
1948 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
1949 (prepend(&end, &buflen, "/", 1) != 0))
1950 goto Elong;
1951 retval = end;
1952 dentry = parent;
1953 }
1954
1955out:
1956 spin_unlock(&vfsmount_lock);
1957 return retval;
1958
1959global_root:
1960 retval += 1;
1961 if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
1962 goto Elong;
1963 root->mnt = vfsmnt;
1964 root->dentry = dentry;
1965 goto out;
1966
1967Elong:
1968 retval = ERR_PTR(-ENAMETOOLONG);
1969 goto out;
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985char *d_path(const struct path *path, char *buf, int buflen)
1986{
1987 char *res;
1988 struct path root;
1989 struct path tmp;
1990
1991
1992
1993
1994
1995
1996
1997
1998 if (path->dentry->d_op && path->dentry->d_op->d_dname)
1999 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2000
2001 read_lock(¤t->fs->lock);
2002 root = current->fs->root;
2003 path_get(&root);
2004 read_unlock(¤t->fs->lock);
2005 spin_lock(&dcache_lock);
2006 tmp = root;
2007 res = __d_path(path, &tmp, buf, buflen);
2008 spin_unlock(&dcache_lock);
2009 path_put(&root);
2010 return res;
2011}
2012
2013
2014
2015
2016char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2017 const char *fmt, ...)
2018{
2019 va_list args;
2020 char temp[64];
2021 int sz;
2022
2023 va_start(args, fmt);
2024 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2025 va_end(args);
2026
2027 if (sz > sizeof(temp) || sz > buflen)
2028 return ERR_PTR(-ENAMETOOLONG);
2029
2030 buffer += buflen - sz;
2031 return memcpy(buffer, temp, sz);
2032}
2033
2034
2035
2036
2037char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2038{
2039 char *end = buf + buflen;
2040 char *retval;
2041
2042 spin_lock(&dcache_lock);
2043 prepend(&end, &buflen, "\0", 1);
2044 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
2045 (prepend(&end, &buflen, "//deleted", 9) != 0))
2046 goto Elong;
2047 if (buflen < 1)
2048 goto Elong;
2049
2050 retval = end-1;
2051 *retval = '/';
2052
2053 while (!IS_ROOT(dentry)) {
2054 struct dentry *parent = dentry->d_parent;
2055
2056 prefetch(parent);
2057 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
2058 (prepend(&end, &buflen, "/", 1) != 0))
2059 goto Elong;
2060
2061 retval = end;
2062 dentry = parent;
2063 }
2064 spin_unlock(&dcache_lock);
2065 return retval;
2066Elong:
2067 spin_unlock(&dcache_lock);
2068 return ERR_PTR(-ENAMETOOLONG);
2069}
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2090{
2091 int error;
2092 struct path pwd, root;
2093 char *page = (char *) __get_free_page(GFP_USER);
2094
2095 if (!page)
2096 return -ENOMEM;
2097
2098 read_lock(¤t->fs->lock);
2099 pwd = current->fs->pwd;
2100 path_get(&pwd);
2101 root = current->fs->root;
2102 path_get(&root);
2103 read_unlock(¤t->fs->lock);
2104
2105 error = -ENOENT;
2106
2107 spin_lock(&dcache_lock);
2108 if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) {
2109 unsigned long len;
2110 struct path tmp = root;
2111 char * cwd;
2112
2113 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
2114 spin_unlock(&dcache_lock);
2115
2116 error = PTR_ERR(cwd);
2117 if (IS_ERR(cwd))
2118 goto out;
2119
2120 error = -ERANGE;
2121 len = PAGE_SIZE + page - cwd;
2122 if (len <= size) {
2123 error = len;
2124 if (copy_to_user(buf, cwd, len))
2125 error = -EFAULT;
2126 }
2127 } else
2128 spin_unlock(&dcache_lock);
2129
2130out:
2131 path_put(&pwd);
2132 path_put(&root);
2133 free_page((unsigned long) page);
2134 return error;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
2154{
2155 int result;
2156 struct dentry * saved = new_dentry;
2157 unsigned long seq;
2158
2159
2160
2161
2162 rcu_read_lock();
2163 do {
2164
2165 new_dentry = saved;
2166 result = 0;
2167 seq = read_seqbegin(&rename_lock);
2168 for (;;) {
2169 if (new_dentry != old_dentry) {
2170 struct dentry * parent = new_dentry->d_parent;
2171 if (parent == new_dentry)
2172 break;
2173 new_dentry = parent;
2174 continue;
2175 }
2176 result = 1;
2177 break;
2178 }
2179 } while (read_seqretry(&rename_lock, seq));
2180 rcu_read_unlock();
2181
2182 return result;
2183}
2184
2185void d_genocide(struct dentry *root)
2186{
2187 struct dentry *this_parent = root;
2188 struct list_head *next;
2189
2190 spin_lock(&dcache_lock);
2191repeat:
2192 next = this_parent->d_subdirs.next;
2193resume:
2194 while (next != &this_parent->d_subdirs) {
2195 struct list_head *tmp = next;
2196 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2197 next = tmp->next;
2198 if (d_unhashed(dentry)||!dentry->d_inode)
2199 continue;
2200 if (!list_empty(&dentry->d_subdirs)) {
2201 this_parent = dentry;
2202 goto repeat;
2203 }
2204 atomic_dec(&dentry->d_count);
2205 }
2206 if (this_parent != root) {
2207 next = this_parent->d_u.d_child.next;
2208 atomic_dec(&this_parent->d_count);
2209 this_parent = this_parent->d_parent;
2210 goto resume;
2211 }
2212 spin_unlock(&dcache_lock);
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2230{
2231 struct dentry * dentry;
2232 ino_t ino = 0;
2233
2234 dentry = d_hash_and_lookup(dir, name);
2235 if (dentry) {
2236 if (dentry->d_inode)
2237 ino = dentry->d_inode->i_ino;
2238 dput(dentry);
2239 }
2240 return ino;
2241}
2242
2243static __initdata unsigned long dhash_entries;
2244static int __init set_dhash_entries(char *str)
2245{
2246 if (!str)
2247 return 0;
2248 dhash_entries = simple_strtoul(str, &str, 0);
2249 return 1;
2250}
2251__setup("dhash_entries=", set_dhash_entries);
2252
2253static void __init dcache_init_early(void)
2254{
2255 int loop;
2256
2257
2258
2259
2260 if (hashdist)
2261 return;
2262
2263 dentry_hashtable =
2264 alloc_large_system_hash("Dentry cache",
2265 sizeof(struct hlist_head),
2266 dhash_entries,
2267 13,
2268 HASH_EARLY,
2269 &d_hash_shift,
2270 &d_hash_mask,
2271 0);
2272
2273 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2274 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2275}
2276
2277static void __init dcache_init(void)
2278{
2279 int loop;
2280
2281
2282
2283
2284
2285
2286 dentry_cache = KMEM_CACHE(dentry,
2287 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2288
2289 register_shrinker(&dcache_shrinker);
2290
2291
2292 if (!hashdist)
2293 return;
2294
2295 dentry_hashtable =
2296 alloc_large_system_hash("Dentry cache",
2297 sizeof(struct hlist_head),
2298 dhash_entries,
2299 13,
2300 0,
2301 &d_hash_shift,
2302 &d_hash_mask,
2303 0);
2304
2305 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2306 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2307}
2308
2309
2310struct kmem_cache *names_cachep __read_mostly;
2311
2312
2313struct kmem_cache *filp_cachep __read_mostly;
2314
2315EXPORT_SYMBOL(d_genocide);
2316
2317void __init vfs_caches_init_early(void)
2318{
2319 dcache_init_early();
2320 inode_init_early();
2321}
2322
2323void __init vfs_caches_init(unsigned long mempages)
2324{
2325 unsigned long reserve;
2326
2327
2328
2329
2330 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2331 mempages -= reserve;
2332
2333 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
2334 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2335
2336 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
2337 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2338
2339 dcache_init();
2340 inode_init();
2341 files_init(mempages);
2342 mnt_init();
2343 bdev_cache_init();
2344 chrdev_init();
2345}
2346
2347EXPORT_SYMBOL(d_alloc);
2348EXPORT_SYMBOL(d_alloc_anon);
2349EXPORT_SYMBOL(d_alloc_root);
2350EXPORT_SYMBOL(d_delete);
2351EXPORT_SYMBOL(d_find_alias);
2352EXPORT_SYMBOL(d_instantiate);
2353EXPORT_SYMBOL(d_invalidate);
2354EXPORT_SYMBOL(d_lookup);
2355EXPORT_SYMBOL(d_move);
2356EXPORT_SYMBOL_GPL(d_materialise_unique);
2357EXPORT_SYMBOL(d_path);
2358EXPORT_SYMBOL(d_prune_aliases);
2359EXPORT_SYMBOL(d_rehash);
2360EXPORT_SYMBOL(d_splice_alias);
2361EXPORT_SYMBOL(d_add_ci);
2362EXPORT_SYMBOL(d_validate);
2363EXPORT_SYMBOL(dget_locked);
2364EXPORT_SYMBOL(dput);
2365EXPORT_SYMBOL(find_inode_number);
2366EXPORT_SYMBOL(have_submounts);
2367EXPORT_SYMBOL(names_cachep);
2368EXPORT_SYMBOL(shrink_dcache_parent);
2369EXPORT_SYMBOL(shrink_dcache_sb);
2370