1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/module.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include "internal.h"
36
37int sysctl_vfs_cache_pressure __read_mostly = 100;
38EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
39
40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
41__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
42
43EXPORT_SYMBOL(dcache_lock);
44
45static struct kmem_cache *dentry_cache __read_mostly;
46
47#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48
49
50
51
52
53
54
55
56
57#define D_HASHBITS d_hash_shift
58#define D_HASHMASK d_hash_mask
59
60static unsigned int d_hash_mask __read_mostly;
61static unsigned int d_hash_shift __read_mostly;
62static struct hlist_head *dentry_hashtable __read_mostly;
63
64
65struct dentry_stat_t dentry_stat = {
66 .age_limit = 45,
67};
68
69static void __d_free(struct dentry *dentry)
70{
71 WARN_ON(!list_empty(&dentry->d_alias));
72 if (dname_external(dentry))
73 kfree(dentry->d_name.name);
74 kmem_cache_free(dentry_cache, dentry);
75}
76
77static void d_callback(struct rcu_head *head)
78{
79 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
80 __d_free(dentry);
81}
82
83
84
85
86
87static void d_free(struct dentry *dentry)
88{
89 if (dentry->d_op && dentry->d_op->d_release)
90 dentry->d_op->d_release(dentry);
91
92 if (hlist_unhashed(&dentry->d_hash))
93 __d_free(dentry);
94 else
95 call_rcu(&dentry->d_u.d_rcu, d_callback);
96}
97
98
99
100
101
102static void dentry_iput(struct dentry * dentry)
103 __releases(dentry->d_lock)
104 __releases(dcache_lock)
105{
106 struct inode *inode = dentry->d_inode;
107 if (inode) {
108 dentry->d_inode = NULL;
109 list_del_init(&dentry->d_alias);
110 spin_unlock(&dentry->d_lock);
111 spin_unlock(&dcache_lock);
112 if (!inode->i_nlink)
113 fsnotify_inoderemove(inode);
114 if (dentry->d_op && dentry->d_op->d_iput)
115 dentry->d_op->d_iput(dentry, inode);
116 else
117 iput(inode);
118 } else {
119 spin_unlock(&dentry->d_lock);
120 spin_unlock(&dcache_lock);
121 }
122}
123
124
125
126
127static void dentry_lru_add(struct dentry *dentry)
128{
129 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
130 dentry->d_sb->s_nr_dentry_unused++;
131 dentry_stat.nr_unused++;
132}
133
134static void dentry_lru_add_tail(struct dentry *dentry)
135{
136 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
137 dentry->d_sb->s_nr_dentry_unused++;
138 dentry_stat.nr_unused++;
139}
140
141static void dentry_lru_del(struct dentry *dentry)
142{
143 if (!list_empty(&dentry->d_lru)) {
144 list_del(&dentry->d_lru);
145 dentry->d_sb->s_nr_dentry_unused--;
146 dentry_stat.nr_unused--;
147 }
148}
149
150static void dentry_lru_del_init(struct dentry *dentry)
151{
152 if (likely(!list_empty(&dentry->d_lru))) {
153 list_del_init(&dentry->d_lru);
154 dentry->d_sb->s_nr_dentry_unused--;
155 dentry_stat.nr_unused--;
156 }
157}
158
159
160
161
162
163
164
165
166
167static struct dentry *d_kill(struct dentry *dentry)
168 __releases(dentry->d_lock)
169 __releases(dcache_lock)
170{
171 struct dentry *parent;
172
173 list_del(&dentry->d_u.d_child);
174 dentry_stat.nr_dentry--;
175
176 dentry_iput(dentry);
177 if (IS_ROOT(dentry))
178 parent = NULL;
179 else
180 parent = dentry->d_parent;
181 d_free(dentry);
182 return parent;
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214void dput(struct dentry *dentry)
215{
216 if (!dentry)
217 return;
218
219repeat:
220 if (atomic_read(&dentry->d_count) == 1)
221 might_sleep();
222 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
223 return;
224
225 spin_lock(&dentry->d_lock);
226 if (atomic_read(&dentry->d_count)) {
227 spin_unlock(&dentry->d_lock);
228 spin_unlock(&dcache_lock);
229 return;
230 }
231
232
233
234
235 if (dentry->d_op && dentry->d_op->d_delete) {
236 if (dentry->d_op->d_delete(dentry))
237 goto unhash_it;
238 }
239
240 if (d_unhashed(dentry))
241 goto kill_it;
242 if (list_empty(&dentry->d_lru)) {
243 dentry->d_flags |= DCACHE_REFERENCED;
244 dentry_lru_add(dentry);
245 }
246 spin_unlock(&dentry->d_lock);
247 spin_unlock(&dcache_lock);
248 return;
249
250unhash_it:
251 __d_drop(dentry);
252kill_it:
253
254 dentry_lru_del(dentry);
255 dentry = d_kill(dentry);
256 if (dentry)
257 goto repeat;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272int d_invalidate(struct dentry * dentry)
273{
274
275
276
277 spin_lock(&dcache_lock);
278 if (d_unhashed(dentry)) {
279 spin_unlock(&dcache_lock);
280 return 0;
281 }
282
283
284
285
286 if (!list_empty(&dentry->d_subdirs)) {
287 spin_unlock(&dcache_lock);
288 shrink_dcache_parent(dentry);
289 spin_lock(&dcache_lock);
290 }
291
292
293
294
295
296
297
298
299
300
301
302 spin_lock(&dentry->d_lock);
303 if (atomic_read(&dentry->d_count) > 1) {
304 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
305 spin_unlock(&dentry->d_lock);
306 spin_unlock(&dcache_lock);
307 return -EBUSY;
308 }
309 }
310
311 __d_drop(dentry);
312 spin_unlock(&dentry->d_lock);
313 spin_unlock(&dcache_lock);
314 return 0;
315}
316
317
318
319static inline struct dentry * __dget_locked(struct dentry *dentry)
320{
321 atomic_inc(&dentry->d_count);
322 dentry_lru_del_init(dentry);
323 return dentry;
324}
325
326struct dentry * dget_locked(struct dentry *dentry)
327{
328 return __dget_locked(dentry);
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
349{
350 struct list_head *head, *next, *tmp;
351 struct dentry *alias, *discon_alias=NULL;
352
353 head = &inode->i_dentry;
354 next = inode->i_dentry.next;
355 while (next != head) {
356 tmp = next;
357 next = tmp->next;
358 prefetch(next);
359 alias = list_entry(tmp, struct dentry, d_alias);
360 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
361 if (IS_ROOT(alias) &&
362 (alias->d_flags & DCACHE_DISCONNECTED))
363 discon_alias = alias;
364 else if (!want_discon) {
365 __dget_locked(alias);
366 return alias;
367 }
368 }
369 }
370 if (discon_alias)
371 __dget_locked(discon_alias);
372 return discon_alias;
373}
374
375struct dentry * d_find_alias(struct inode *inode)
376{
377 struct dentry *de = NULL;
378
379 if (!list_empty(&inode->i_dentry)) {
380 spin_lock(&dcache_lock);
381 de = __d_find_alias(inode, 0);
382 spin_unlock(&dcache_lock);
383 }
384 return de;
385}
386
387
388
389
390
391void d_prune_aliases(struct inode *inode)
392{
393 struct dentry *dentry;
394restart:
395 spin_lock(&dcache_lock);
396 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
397 spin_lock(&dentry->d_lock);
398 if (!atomic_read(&dentry->d_count)) {
399 __dget_locked(dentry);
400 __d_drop(dentry);
401 spin_unlock(&dentry->d_lock);
402 spin_unlock(&dcache_lock);
403 dput(dentry);
404 goto restart;
405 }
406 spin_unlock(&dentry->d_lock);
407 }
408 spin_unlock(&dcache_lock);
409}
410
411
412
413
414
415
416
417
418
419static void prune_one_dentry(struct dentry * dentry)
420 __releases(dentry->d_lock)
421 __releases(dcache_lock)
422 __acquires(dcache_lock)
423{
424 __d_drop(dentry);
425 dentry = d_kill(dentry);
426
427
428
429
430
431 spin_lock(&dcache_lock);
432 while (dentry) {
433 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
434 return;
435
436 if (dentry->d_op && dentry->d_op->d_delete)
437 dentry->d_op->d_delete(dentry);
438 dentry_lru_del_init(dentry);
439 __d_drop(dentry);
440 dentry = d_kill(dentry);
441 spin_lock(&dcache_lock);
442 }
443}
444
445
446
447
448
449
450
451
452
453static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
454{
455 LIST_HEAD(referenced);
456 LIST_HEAD(tmp);
457 struct dentry *dentry;
458 int cnt = 0;
459
460 BUG_ON(!sb);
461 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
462 spin_lock(&dcache_lock);
463 if (count != NULL)
464
465 cnt = *count;
466restart:
467 if (count == NULL)
468 list_splice_init(&sb->s_dentry_lru, &tmp);
469 else {
470 while (!list_empty(&sb->s_dentry_lru)) {
471 dentry = list_entry(sb->s_dentry_lru.prev,
472 struct dentry, d_lru);
473 BUG_ON(dentry->d_sb != sb);
474
475 spin_lock(&dentry->d_lock);
476
477
478
479
480
481 if ((flags & DCACHE_REFERENCED)
482 && (dentry->d_flags & DCACHE_REFERENCED)) {
483 dentry->d_flags &= ~DCACHE_REFERENCED;
484 list_move(&dentry->d_lru, &referenced);
485 spin_unlock(&dentry->d_lock);
486 } else {
487 list_move_tail(&dentry->d_lru, &tmp);
488 spin_unlock(&dentry->d_lock);
489 cnt--;
490 if (!cnt)
491 break;
492 }
493 cond_resched_lock(&dcache_lock);
494 }
495 }
496 while (!list_empty(&tmp)) {
497 dentry = list_entry(tmp.prev, struct dentry, d_lru);
498 dentry_lru_del_init(dentry);
499 spin_lock(&dentry->d_lock);
500
501
502
503
504
505 if (atomic_read(&dentry->d_count)) {
506 spin_unlock(&dentry->d_lock);
507 continue;
508 }
509 prune_one_dentry(dentry);
510
511 cond_resched_lock(&dcache_lock);
512 }
513 if (count == NULL && !list_empty(&sb->s_dentry_lru))
514 goto restart;
515 if (count != NULL)
516 *count = cnt;
517 if (!list_empty(&referenced))
518 list_splice(&referenced, &sb->s_dentry_lru);
519 spin_unlock(&dcache_lock);
520}
521
522
523
524
525
526
527
528
529
530
531static void prune_dcache(int count)
532{
533 struct super_block *sb;
534 int w_count;
535 int unused = dentry_stat.nr_unused;
536 int prune_ratio;
537 int pruned;
538
539 if (unused == 0 || count == 0)
540 return;
541 spin_lock(&dcache_lock);
542restart:
543 if (count >= unused)
544 prune_ratio = 1;
545 else
546 prune_ratio = unused / count;
547 spin_lock(&sb_lock);
548 list_for_each_entry(sb, &super_blocks, s_list) {
549 if (sb->s_nr_dentry_unused == 0)
550 continue;
551 sb->s_count++;
552
553
554
555
556
557
558
559
560
561 spin_unlock(&sb_lock);
562 if (prune_ratio != 1)
563 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
564 else
565 w_count = sb->s_nr_dentry_unused;
566 pruned = w_count;
567
568
569
570
571
572
573
574 if (down_read_trylock(&sb->s_umount)) {
575 if ((sb->s_root != NULL) &&
576 (!list_empty(&sb->s_dentry_lru))) {
577 spin_unlock(&dcache_lock);
578 __shrink_dcache_sb(sb, &w_count,
579 DCACHE_REFERENCED);
580 pruned -= w_count;
581 spin_lock(&dcache_lock);
582 }
583 up_read(&sb->s_umount);
584 }
585 spin_lock(&sb_lock);
586 count -= pruned;
587
588
589
590
591 if (__put_super_and_need_restart(sb) && count > 0) {
592 spin_unlock(&sb_lock);
593 goto restart;
594 }
595 }
596 spin_unlock(&sb_lock);
597 spin_unlock(&dcache_lock);
598}
599
600
601
602
603
604
605
606
607
608void shrink_dcache_sb(struct super_block * sb)
609{
610 __shrink_dcache_sb(sb, NULL, 0);
611}
612
613
614
615
616
617
618static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
619{
620 struct dentry *parent;
621 unsigned detached = 0;
622
623 BUG_ON(!IS_ROOT(dentry));
624
625
626 spin_lock(&dcache_lock);
627 dentry_lru_del_init(dentry);
628 __d_drop(dentry);
629 spin_unlock(&dcache_lock);
630
631 for (;;) {
632
633 while (!list_empty(&dentry->d_subdirs)) {
634 struct dentry *loop;
635
636
637
638 spin_lock(&dcache_lock);
639 list_for_each_entry(loop, &dentry->d_subdirs,
640 d_u.d_child) {
641 dentry_lru_del_init(loop);
642 __d_drop(loop);
643 cond_resched_lock(&dcache_lock);
644 }
645 spin_unlock(&dcache_lock);
646
647
648 dentry = list_entry(dentry->d_subdirs.next,
649 struct dentry, d_u.d_child);
650 }
651
652
653
654 do {
655 struct inode *inode;
656
657 if (atomic_read(&dentry->d_count) != 0) {
658 printk(KERN_ERR
659 "BUG: Dentry %p{i=%lx,n=%s}"
660 " still in use (%d)"
661 " [unmount of %s %s]\n",
662 dentry,
663 dentry->d_inode ?
664 dentry->d_inode->i_ino : 0UL,
665 dentry->d_name.name,
666 atomic_read(&dentry->d_count),
667 dentry->d_sb->s_type->name,
668 dentry->d_sb->s_id);
669 BUG();
670 }
671
672 if (IS_ROOT(dentry))
673 parent = NULL;
674 else {
675 parent = dentry->d_parent;
676 atomic_dec(&parent->d_count);
677 }
678
679 list_del(&dentry->d_u.d_child);
680 detached++;
681
682 inode = dentry->d_inode;
683 if (inode) {
684 dentry->d_inode = NULL;
685 list_del_init(&dentry->d_alias);
686 if (dentry->d_op && dentry->d_op->d_iput)
687 dentry->d_op->d_iput(dentry, inode);
688 else
689 iput(inode);
690 }
691
692 d_free(dentry);
693
694
695
696
697 if (!parent)
698 goto out;
699
700 dentry = parent;
701
702 } while (list_empty(&dentry->d_subdirs));
703
704 dentry = list_entry(dentry->d_subdirs.next,
705 struct dentry, d_u.d_child);
706 }
707out:
708
709 spin_lock(&dcache_lock);
710 dentry_stat.nr_dentry -= detached;
711 spin_unlock(&dcache_lock);
712}
713
714
715
716
717
718
719
720
721
722
723
724
725void shrink_dcache_for_umount(struct super_block *sb)
726{
727 struct dentry *dentry;
728
729 if (down_read_trylock(&sb->s_umount))
730 BUG();
731
732 dentry = sb->s_root;
733 sb->s_root = NULL;
734 atomic_dec(&dentry->d_count);
735 shrink_dcache_for_umount_subtree(dentry);
736
737 while (!hlist_empty(&sb->s_anon)) {
738 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
739 shrink_dcache_for_umount_subtree(dentry);
740 }
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757int have_submounts(struct dentry *parent)
758{
759 struct dentry *this_parent = parent;
760 struct list_head *next;
761
762 spin_lock(&dcache_lock);
763 if (d_mountpoint(parent))
764 goto positive;
765repeat:
766 next = this_parent->d_subdirs.next;
767resume:
768 while (next != &this_parent->d_subdirs) {
769 struct list_head *tmp = next;
770 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
771 next = tmp->next;
772
773 if (d_mountpoint(dentry))
774 goto positive;
775 if (!list_empty(&dentry->d_subdirs)) {
776 this_parent = dentry;
777 goto repeat;
778 }
779 }
780
781
782
783 if (this_parent != parent) {
784 next = this_parent->d_u.d_child.next;
785 this_parent = this_parent->d_parent;
786 goto resume;
787 }
788 spin_unlock(&dcache_lock);
789 return 0;
790positive:
791 spin_unlock(&dcache_lock);
792 return 1;
793}
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809static int select_parent(struct dentry * parent)
810{
811 struct dentry *this_parent = parent;
812 struct list_head *next;
813 int found = 0;
814
815 spin_lock(&dcache_lock);
816repeat:
817 next = this_parent->d_subdirs.next;
818resume:
819 while (next != &this_parent->d_subdirs) {
820 struct list_head *tmp = next;
821 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
822 next = tmp->next;
823
824 dentry_lru_del_init(dentry);
825
826
827
828
829 if (!atomic_read(&dentry->d_count)) {
830 dentry_lru_add_tail(dentry);
831 found++;
832 }
833
834
835
836
837
838
839 if (found && need_resched())
840 goto out;
841
842
843
844
845 if (!list_empty(&dentry->d_subdirs)) {
846 this_parent = dentry;
847 goto repeat;
848 }
849 }
850
851
852
853 if (this_parent != parent) {
854 next = this_parent->d_u.d_child.next;
855 this_parent = this_parent->d_parent;
856 goto resume;
857 }
858out:
859 spin_unlock(&dcache_lock);
860 return found;
861}
862
863
864
865
866
867
868
869
870void shrink_dcache_parent(struct dentry * parent)
871{
872 struct super_block *sb = parent->d_sb;
873 int found;
874
875 while ((found = select_parent(parent)) != 0)
876 __shrink_dcache_sb(sb, &found, 0);
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
892{
893 if (nr) {
894 if (!(gfp_mask & __GFP_FS))
895 return -1;
896 prune_dcache(nr);
897 }
898 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
899}
900
901static struct shrinker dcache_shrinker = {
902 .shrink = shrink_dcache_memory,
903 .seeks = DEFAULT_SEEKS,
904};
905
906
907
908
909
910
911
912
913
914
915
916struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
917{
918 struct dentry *dentry;
919 char *dname;
920
921 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
922 if (!dentry)
923 return NULL;
924
925 if (name->len > DNAME_INLINE_LEN-1) {
926 dname = kmalloc(name->len + 1, GFP_KERNEL);
927 if (!dname) {
928 kmem_cache_free(dentry_cache, dentry);
929 return NULL;
930 }
931 } else {
932 dname = dentry->d_iname;
933 }
934 dentry->d_name.name = dname;
935
936 dentry->d_name.len = name->len;
937 dentry->d_name.hash = name->hash;
938 memcpy(dname, name->name, name->len);
939 dname[name->len] = 0;
940
941 atomic_set(&dentry->d_count, 1);
942 dentry->d_flags = DCACHE_UNHASHED;
943 spin_lock_init(&dentry->d_lock);
944 dentry->d_inode = NULL;
945 dentry->d_parent = NULL;
946 dentry->d_sb = NULL;
947 dentry->d_op = NULL;
948 dentry->d_fsdata = NULL;
949 dentry->d_mounted = 0;
950 INIT_HLIST_NODE(&dentry->d_hash);
951 INIT_LIST_HEAD(&dentry->d_lru);
952 INIT_LIST_HEAD(&dentry->d_subdirs);
953 INIT_LIST_HEAD(&dentry->d_alias);
954
955 if (parent) {
956 dentry->d_parent = dget(parent);
957 dentry->d_sb = parent->d_sb;
958 } else {
959 INIT_LIST_HEAD(&dentry->d_u.d_child);
960 }
961
962 spin_lock(&dcache_lock);
963 if (parent)
964 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
965 dentry_stat.nr_dentry++;
966 spin_unlock(&dcache_lock);
967
968 return dentry;
969}
970
971struct dentry *d_alloc_name(struct dentry *parent, const char *name)
972{
973 struct qstr q;
974
975 q.name = name;
976 q.len = strlen(name);
977 q.hash = full_name_hash(q.name, q.len);
978 return d_alloc(parent, &q);
979}
980
981
982static void __d_instantiate(struct dentry *dentry, struct inode *inode)
983{
984 if (inode)
985 list_add(&dentry->d_alias, &inode->i_dentry);
986 dentry->d_inode = inode;
987 fsnotify_d_instantiate(dentry, inode);
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005void d_instantiate(struct dentry *entry, struct inode * inode)
1006{
1007 BUG_ON(!list_empty(&entry->d_alias));
1008 spin_lock(&dcache_lock);
1009 __d_instantiate(entry, inode);
1010 spin_unlock(&dcache_lock);
1011 security_d_instantiate(entry, inode);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static struct dentry *__d_instantiate_unique(struct dentry *entry,
1031 struct inode *inode)
1032{
1033 struct dentry *alias;
1034 int len = entry->d_name.len;
1035 const char *name = entry->d_name.name;
1036 unsigned int hash = entry->d_name.hash;
1037
1038 if (!inode) {
1039 __d_instantiate(entry, NULL);
1040 return NULL;
1041 }
1042
1043 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1044 struct qstr *qstr = &alias->d_name;
1045
1046 if (qstr->hash != hash)
1047 continue;
1048 if (alias->d_parent != entry->d_parent)
1049 continue;
1050 if (qstr->len != len)
1051 continue;
1052 if (memcmp(qstr->name, name, len))
1053 continue;
1054 dget_locked(alias);
1055 return alias;
1056 }
1057
1058 __d_instantiate(entry, inode);
1059 return NULL;
1060}
1061
1062struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1063{
1064 struct dentry *result;
1065
1066 BUG_ON(!list_empty(&entry->d_alias));
1067
1068 spin_lock(&dcache_lock);
1069 result = __d_instantiate_unique(entry, inode);
1070 spin_unlock(&dcache_lock);
1071
1072 if (!result) {
1073 security_d_instantiate(entry, inode);
1074 return NULL;
1075 }
1076
1077 BUG_ON(!d_unhashed(result));
1078 iput(inode);
1079 return result;
1080}
1081
1082EXPORT_SYMBOL(d_instantiate_unique);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093struct dentry * d_alloc_root(struct inode * root_inode)
1094{
1095 struct dentry *res = NULL;
1096
1097 if (root_inode) {
1098 static const struct qstr name = { .name = "/", .len = 1 };
1099
1100 res = d_alloc(NULL, &name);
1101 if (res) {
1102 res->d_sb = root_inode->i_sb;
1103 res->d_parent = res;
1104 d_instantiate(res, root_inode);
1105 }
1106 }
1107 return res;
1108}
1109
1110static inline struct hlist_head *d_hash(struct dentry *parent,
1111 unsigned long hash)
1112{
1113 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1114 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1115 return dentry_hashtable + (hash & D_HASHMASK);
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136struct dentry *d_obtain_alias(struct inode *inode)
1137{
1138 static const struct qstr anonstring = { .name = "" };
1139 struct dentry *tmp;
1140 struct dentry *res;
1141
1142 if (!inode)
1143 return ERR_PTR(-ESTALE);
1144 if (IS_ERR(inode))
1145 return ERR_CAST(inode);
1146
1147 res = d_find_alias(inode);
1148 if (res)
1149 goto out_iput;
1150
1151 tmp = d_alloc(NULL, &anonstring);
1152 if (!tmp) {
1153 res = ERR_PTR(-ENOMEM);
1154 goto out_iput;
1155 }
1156 tmp->d_parent = tmp;
1157
1158 spin_lock(&dcache_lock);
1159 res = __d_find_alias(inode, 0);
1160 if (res) {
1161 spin_unlock(&dcache_lock);
1162 dput(tmp);
1163 goto out_iput;
1164 }
1165
1166
1167 spin_lock(&tmp->d_lock);
1168 tmp->d_sb = inode->i_sb;
1169 tmp->d_inode = inode;
1170 tmp->d_flags |= DCACHE_DISCONNECTED;
1171 tmp->d_flags &= ~DCACHE_UNHASHED;
1172 list_add(&tmp->d_alias, &inode->i_dentry);
1173 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
1174 spin_unlock(&tmp->d_lock);
1175
1176 spin_unlock(&dcache_lock);
1177 return tmp;
1178
1179 out_iput:
1180 iput(inode);
1181 return res;
1182}
1183EXPORT_SYMBOL(d_obtain_alias);
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1202{
1203 struct dentry *new = NULL;
1204
1205 if (inode && S_ISDIR(inode->i_mode)) {
1206 spin_lock(&dcache_lock);
1207 new = __d_find_alias(inode, 1);
1208 if (new) {
1209 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1210 spin_unlock(&dcache_lock);
1211 security_d_instantiate(new, inode);
1212 d_rehash(dentry);
1213 d_move(new, dentry);
1214 iput(inode);
1215 } else {
1216
1217 __d_instantiate(dentry, inode);
1218 spin_unlock(&dcache_lock);
1219 security_d_instantiate(dentry, inode);
1220 d_rehash(dentry);
1221 }
1222 } else
1223 d_add(dentry, inode);
1224 return new;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1244 struct qstr *name)
1245{
1246 int error;
1247 struct dentry *found;
1248 struct dentry *new;
1249
1250
1251
1252
1253
1254 found = d_hash_and_lookup(dentry->d_parent, name);
1255 if (!found) {
1256 new = d_alloc(dentry->d_parent, name);
1257 if (!new) {
1258 error = -ENOMEM;
1259 goto err_out;
1260 }
1261
1262 found = d_splice_alias(inode, new);
1263 if (found) {
1264 dput(new);
1265 return found;
1266 }
1267 return new;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276 if (found->d_inode) {
1277 if (unlikely(found->d_inode != inode)) {
1278
1279 BUG_ON(!is_bad_inode(inode));
1280 BUG_ON(!is_bad_inode(found->d_inode));
1281 }
1282 iput(inode);
1283 return found;
1284 }
1285
1286
1287
1288
1289
1290 spin_lock(&dcache_lock);
1291 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1292 __d_instantiate(found, inode);
1293 spin_unlock(&dcache_lock);
1294 security_d_instantiate(found, inode);
1295 return found;
1296 }
1297
1298
1299
1300
1301
1302 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1303 dget_locked(new);
1304 spin_unlock(&dcache_lock);
1305 security_d_instantiate(found, inode);
1306 d_move(new, found);
1307 iput(inode);
1308 dput(found);
1309 return new;
1310
1311err_out:
1312 iput(inode);
1313 return ERR_PTR(error);
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1346{
1347 struct dentry * dentry = NULL;
1348 unsigned long seq;
1349
1350 do {
1351 seq = read_seqbegin(&rename_lock);
1352 dentry = __d_lookup(parent, name);
1353 if (dentry)
1354 break;
1355 } while (read_seqretry(&rename_lock, seq));
1356 return dentry;
1357}
1358
1359struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1360{
1361 unsigned int len = name->len;
1362 unsigned int hash = name->hash;
1363 const unsigned char *str = name->name;
1364 struct hlist_head *head = d_hash(parent,hash);
1365 struct dentry *found = NULL;
1366 struct hlist_node *node;
1367 struct dentry *dentry;
1368
1369 rcu_read_lock();
1370
1371 hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
1372 struct qstr *qstr;
1373
1374 if (dentry->d_name.hash != hash)
1375 continue;
1376 if (dentry->d_parent != parent)
1377 continue;
1378
1379 spin_lock(&dentry->d_lock);
1380
1381
1382
1383
1384
1385
1386 if (dentry->d_parent != parent)
1387 goto next;
1388
1389
1390 if (d_unhashed(dentry))
1391 goto next;
1392
1393
1394
1395
1396
1397 qstr = &dentry->d_name;
1398 if (parent->d_op && parent->d_op->d_compare) {
1399 if (parent->d_op->d_compare(parent, qstr, name))
1400 goto next;
1401 } else {
1402 if (qstr->len != len)
1403 goto next;
1404 if (memcmp(qstr->name, str, len))
1405 goto next;
1406 }
1407
1408 atomic_inc(&dentry->d_count);
1409 found = dentry;
1410 spin_unlock(&dentry->d_lock);
1411 break;
1412next:
1413 spin_unlock(&dentry->d_lock);
1414 }
1415 rcu_read_unlock();
1416
1417 return found;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1428{
1429 struct dentry *dentry = NULL;
1430
1431
1432
1433
1434
1435
1436 name->hash = full_name_hash(name->name, name->len);
1437 if (dir->d_op && dir->d_op->d_hash) {
1438 if (dir->d_op->d_hash(dir, name) < 0)
1439 goto out;
1440 }
1441 dentry = d_lookup(dir, name);
1442out:
1443 return dentry;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456int d_validate(struct dentry *dentry, struct dentry *dparent)
1457{
1458 struct hlist_head *base;
1459 struct hlist_node *lhp;
1460
1461
1462 if (!kmem_ptr_validate(dentry_cache, dentry))
1463 goto out;
1464
1465 if (dentry->d_parent != dparent)
1466 goto out;
1467
1468 spin_lock(&dcache_lock);
1469 base = d_hash(dparent, dentry->d_name.hash);
1470 hlist_for_each(lhp,base) {
1471
1472
1473
1474 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1475 __dget_locked(dentry);
1476 spin_unlock(&dcache_lock);
1477 return 1;
1478 }
1479 }
1480 spin_unlock(&dcache_lock);
1481out:
1482 return 0;
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506void d_delete(struct dentry * dentry)
1507{
1508 int isdir = 0;
1509
1510
1511
1512 spin_lock(&dcache_lock);
1513 spin_lock(&dentry->d_lock);
1514 isdir = S_ISDIR(dentry->d_inode->i_mode);
1515 if (atomic_read(&dentry->d_count) == 1) {
1516 dentry_iput(dentry);
1517 fsnotify_nameremove(dentry, isdir);
1518 return;
1519 }
1520
1521 if (!d_unhashed(dentry))
1522 __d_drop(dentry);
1523
1524 spin_unlock(&dentry->d_lock);
1525 spin_unlock(&dcache_lock);
1526
1527 fsnotify_nameremove(dentry, isdir);
1528}
1529
1530static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1531{
1532
1533 entry->d_flags &= ~DCACHE_UNHASHED;
1534 hlist_add_head_rcu(&entry->d_hash, list);
1535}
1536
1537static void _d_rehash(struct dentry * entry)
1538{
1539 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549void d_rehash(struct dentry * entry)
1550{
1551 spin_lock(&dcache_lock);
1552 spin_lock(&entry->d_lock);
1553 _d_rehash(entry);
1554 spin_unlock(&entry->d_lock);
1555 spin_unlock(&dcache_lock);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static void switch_names(struct dentry *dentry, struct dentry *target)
1570{
1571 if (dname_external(target)) {
1572 if (dname_external(dentry)) {
1573
1574
1575
1576 swap(target->d_name.name, dentry->d_name.name);
1577 } else {
1578
1579
1580
1581
1582 memcpy(target->d_iname, dentry->d_name.name,
1583 dentry->d_name.len + 1);
1584 dentry->d_name.name = target->d_name.name;
1585 target->d_name.name = target->d_iname;
1586 }
1587 } else {
1588 if (dname_external(dentry)) {
1589
1590
1591
1592
1593 memcpy(dentry->d_iname, target->d_name.name,
1594 target->d_name.len + 1);
1595 target->d_name.name = dentry->d_name.name;
1596 dentry->d_name.name = dentry->d_iname;
1597 } else {
1598
1599
1600
1601 memcpy(dentry->d_iname, target->d_name.name,
1602 target->d_name.len + 1);
1603 dentry->d_name.len = target->d_name.len;
1604 return;
1605 }
1606 }
1607 swap(dentry->d_name.len, target->d_name.len);
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630static void d_move_locked(struct dentry * dentry, struct dentry * target)
1631{
1632 struct hlist_head *list;
1633
1634 if (!dentry->d_inode)
1635 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1636
1637 write_seqlock(&rename_lock);
1638
1639
1640
1641 if (target < dentry) {
1642 spin_lock(&target->d_lock);
1643 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1644 } else {
1645 spin_lock(&dentry->d_lock);
1646 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1647 }
1648
1649
1650 if (d_unhashed(dentry))
1651 goto already_unhashed;
1652
1653 hlist_del_rcu(&dentry->d_hash);
1654
1655already_unhashed:
1656 list = d_hash(target->d_parent, target->d_name.hash);
1657 __d_rehash(dentry, list);
1658
1659
1660 __d_drop(target);
1661
1662 list_del(&dentry->d_u.d_child);
1663 list_del(&target->d_u.d_child);
1664
1665
1666 switch_names(dentry, target);
1667 swap(dentry->d_name.hash, target->d_name.hash);
1668
1669
1670 if (IS_ROOT(dentry)) {
1671 dentry->d_parent = target->d_parent;
1672 target->d_parent = target;
1673 INIT_LIST_HEAD(&target->d_u.d_child);
1674 } else {
1675 swap(dentry->d_parent, target->d_parent);
1676
1677
1678 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1679 }
1680
1681 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1682 spin_unlock(&target->d_lock);
1683 fsnotify_d_move(dentry);
1684 spin_unlock(&dentry->d_lock);
1685 write_sequnlock(&rename_lock);
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697void d_move(struct dentry * dentry, struct dentry * target)
1698{
1699 spin_lock(&dcache_lock);
1700 d_move_locked(dentry, target);
1701 spin_unlock(&dcache_lock);
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
1713{
1714 struct dentry *p;
1715
1716 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
1717 if (p->d_parent == p1)
1718 return p;
1719 }
1720 return NULL;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
1733 __releases(dcache_lock)
1734{
1735 struct mutex *m1 = NULL, *m2 = NULL;
1736 struct dentry *ret;
1737
1738
1739 if (alias->d_parent == dentry->d_parent)
1740 goto out_unalias;
1741
1742
1743 ret = ERR_PTR(-ELOOP);
1744 if (d_ancestor(alias, dentry))
1745 goto out_err;
1746
1747
1748 ret = ERR_PTR(-EBUSY);
1749 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
1750 goto out_err;
1751 m1 = &dentry->d_sb->s_vfs_rename_mutex;
1752 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
1753 goto out_err;
1754 m2 = &alias->d_parent->d_inode->i_mutex;
1755out_unalias:
1756 d_move_locked(alias, dentry);
1757 ret = alias;
1758out_err:
1759 spin_unlock(&dcache_lock);
1760 if (m2)
1761 mutex_unlock(m2);
1762 if (m1)
1763 mutex_unlock(m1);
1764 return ret;
1765}
1766
1767
1768
1769
1770
1771static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1772{
1773 struct dentry *dparent, *aparent;
1774
1775 switch_names(dentry, anon);
1776 swap(dentry->d_name.hash, anon->d_name.hash);
1777
1778 dparent = dentry->d_parent;
1779 aparent = anon->d_parent;
1780
1781 dentry->d_parent = (aparent == anon) ? dentry : aparent;
1782 list_del(&dentry->d_u.d_child);
1783 if (!IS_ROOT(dentry))
1784 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1785 else
1786 INIT_LIST_HEAD(&dentry->d_u.d_child);
1787
1788 anon->d_parent = (dparent == dentry) ? anon : dparent;
1789 list_del(&anon->d_u.d_child);
1790 if (!IS_ROOT(anon))
1791 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
1792 else
1793 INIT_LIST_HEAD(&anon->d_u.d_child);
1794
1795 anon->d_flags &= ~DCACHE_DISCONNECTED;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1807{
1808 struct dentry *actual;
1809
1810 BUG_ON(!d_unhashed(dentry));
1811
1812 spin_lock(&dcache_lock);
1813
1814 if (!inode) {
1815 actual = dentry;
1816 __d_instantiate(dentry, NULL);
1817 goto found_lock;
1818 }
1819
1820 if (S_ISDIR(inode->i_mode)) {
1821 struct dentry *alias;
1822
1823
1824 alias = __d_find_alias(inode, 0);
1825 if (alias) {
1826 actual = alias;
1827
1828
1829 if (IS_ROOT(alias)) {
1830 spin_lock(&alias->d_lock);
1831 __d_materialise_dentry(dentry, alias);
1832 __d_drop(alias);
1833 goto found;
1834 }
1835
1836 actual = __d_unalias(dentry, alias);
1837 if (IS_ERR(actual))
1838 dput(alias);
1839 goto out_nolock;
1840 }
1841 }
1842
1843
1844 actual = __d_instantiate_unique(dentry, inode);
1845 if (!actual)
1846 actual = dentry;
1847 else if (unlikely(!d_unhashed(actual)))
1848 goto shouldnt_be_hashed;
1849
1850found_lock:
1851 spin_lock(&actual->d_lock);
1852found:
1853 _d_rehash(actual);
1854 spin_unlock(&actual->d_lock);
1855 spin_unlock(&dcache_lock);
1856out_nolock:
1857 if (actual == dentry) {
1858 security_d_instantiate(dentry, inode);
1859 return NULL;
1860 }
1861
1862 iput(inode);
1863 return actual;
1864
1865shouldnt_be_hashed:
1866 spin_unlock(&dcache_lock);
1867 BUG();
1868}
1869
1870static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1871{
1872 *buflen -= namelen;
1873 if (*buflen < 0)
1874 return -ENAMETOOLONG;
1875 *buffer -= namelen;
1876 memcpy(*buffer, str, namelen);
1877 return 0;
1878}
1879
1880static int prepend_name(char **buffer, int *buflen, struct qstr *name)
1881{
1882 return prepend(buffer, buflen, name->name, name->len);
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903char *__d_path(const struct path *path, struct path *root,
1904 char *buffer, int buflen)
1905{
1906 struct dentry *dentry = path->dentry;
1907 struct vfsmount *vfsmnt = path->mnt;
1908 char *end = buffer + buflen;
1909 char *retval;
1910
1911 spin_lock(&vfsmount_lock);
1912 prepend(&end, &buflen, "\0", 1);
1913 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
1914 (prepend(&end, &buflen, " (deleted)", 10) != 0))
1915 goto Elong;
1916
1917 if (buflen < 1)
1918 goto Elong;
1919
1920 retval = end-1;
1921 *retval = '/';
1922
1923 for (;;) {
1924 struct dentry * parent;
1925
1926 if (dentry == root->dentry && vfsmnt == root->mnt)
1927 break;
1928 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1929
1930 if (vfsmnt->mnt_parent == vfsmnt) {
1931 goto global_root;
1932 }
1933 dentry = vfsmnt->mnt_mountpoint;
1934 vfsmnt = vfsmnt->mnt_parent;
1935 continue;
1936 }
1937 parent = dentry->d_parent;
1938 prefetch(parent);
1939 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
1940 (prepend(&end, &buflen, "/", 1) != 0))
1941 goto Elong;
1942 retval = end;
1943 dentry = parent;
1944 }
1945
1946out:
1947 spin_unlock(&vfsmount_lock);
1948 return retval;
1949
1950global_root:
1951 retval += 1;
1952 if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
1953 goto Elong;
1954 root->mnt = vfsmnt;
1955 root->dentry = dentry;
1956 goto out;
1957
1958Elong:
1959 retval = ERR_PTR(-ENAMETOOLONG);
1960 goto out;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979char *d_path(const struct path *path, char *buf, int buflen)
1980{
1981 char *res;
1982 struct path root;
1983 struct path tmp;
1984
1985
1986
1987
1988
1989
1990
1991
1992 if (path->dentry->d_op && path->dentry->d_op->d_dname)
1993 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
1994
1995 read_lock(¤t->fs->lock);
1996 root = current->fs->root;
1997 path_get(&root);
1998 read_unlock(¤t->fs->lock);
1999 spin_lock(&dcache_lock);
2000 tmp = root;
2001 res = __d_path(path, &tmp, buf, buflen);
2002 spin_unlock(&dcache_lock);
2003 path_put(&root);
2004 return res;
2005}
2006
2007
2008
2009
2010char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2011 const char *fmt, ...)
2012{
2013 va_list args;
2014 char temp[64];
2015 int sz;
2016
2017 va_start(args, fmt);
2018 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2019 va_end(args);
2020
2021 if (sz > sizeof(temp) || sz > buflen)
2022 return ERR_PTR(-ENAMETOOLONG);
2023
2024 buffer += buflen - sz;
2025 return memcpy(buffer, temp, sz);
2026}
2027
2028
2029
2030
2031char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2032{
2033 char *end = buf + buflen;
2034 char *retval;
2035
2036 spin_lock(&dcache_lock);
2037 prepend(&end, &buflen, "\0", 1);
2038 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
2039 (prepend(&end, &buflen, "//deleted", 9) != 0))
2040 goto Elong;
2041 if (buflen < 1)
2042 goto Elong;
2043
2044 retval = end-1;
2045 *retval = '/';
2046
2047 while (!IS_ROOT(dentry)) {
2048 struct dentry *parent = dentry->d_parent;
2049
2050 prefetch(parent);
2051 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
2052 (prepend(&end, &buflen, "/", 1) != 0))
2053 goto Elong;
2054
2055 retval = end;
2056 dentry = parent;
2057 }
2058 spin_unlock(&dcache_lock);
2059 return retval;
2060Elong:
2061 spin_unlock(&dcache_lock);
2062 return ERR_PTR(-ENAMETOOLONG);
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2084{
2085 int error;
2086 struct path pwd, root;
2087 char *page = (char *) __get_free_page(GFP_USER);
2088
2089 if (!page)
2090 return -ENOMEM;
2091
2092 read_lock(¤t->fs->lock);
2093 pwd = current->fs->pwd;
2094 path_get(&pwd);
2095 root = current->fs->root;
2096 path_get(&root);
2097 read_unlock(¤t->fs->lock);
2098
2099 error = -ENOENT;
2100
2101 spin_lock(&dcache_lock);
2102 if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) {
2103 unsigned long len;
2104 struct path tmp = root;
2105 char * cwd;
2106
2107 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
2108 spin_unlock(&dcache_lock);
2109
2110 error = PTR_ERR(cwd);
2111 if (IS_ERR(cwd))
2112 goto out;
2113
2114 error = -ERANGE;
2115 len = PAGE_SIZE + page - cwd;
2116 if (len <= size) {
2117 error = len;
2118 if (copy_to_user(buf, cwd, len))
2119 error = -EFAULT;
2120 }
2121 } else
2122 spin_unlock(&dcache_lock);
2123
2124out:
2125 path_put(&pwd);
2126 path_put(&root);
2127 free_page((unsigned long) page);
2128 return error;
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2148{
2149 int result;
2150 unsigned long seq;
2151
2152 if (new_dentry == old_dentry)
2153 return 1;
2154
2155
2156
2157
2158
2159 rcu_read_lock();
2160 do {
2161
2162 seq = read_seqbegin(&rename_lock);
2163 if (d_ancestor(old_dentry, new_dentry))
2164 result = 1;
2165 else
2166 result = 0;
2167 } while (read_seqretry(&rename_lock, seq));
2168 rcu_read_unlock();
2169
2170 return result;
2171}
2172
2173void d_genocide(struct dentry *root)
2174{
2175 struct dentry *this_parent = root;
2176 struct list_head *next;
2177
2178 spin_lock(&dcache_lock);
2179repeat:
2180 next = this_parent->d_subdirs.next;
2181resume:
2182 while (next != &this_parent->d_subdirs) {
2183 struct list_head *tmp = next;
2184 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2185 next = tmp->next;
2186 if (d_unhashed(dentry)||!dentry->d_inode)
2187 continue;
2188 if (!list_empty(&dentry->d_subdirs)) {
2189 this_parent = dentry;
2190 goto repeat;
2191 }
2192 atomic_dec(&dentry->d_count);
2193 }
2194 if (this_parent != root) {
2195 next = this_parent->d_u.d_child.next;
2196 atomic_dec(&this_parent->d_count);
2197 this_parent = this_parent->d_parent;
2198 goto resume;
2199 }
2200 spin_unlock(&dcache_lock);
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2218{
2219 struct dentry * dentry;
2220 ino_t ino = 0;
2221
2222 dentry = d_hash_and_lookup(dir, name);
2223 if (dentry) {
2224 if (dentry->d_inode)
2225 ino = dentry->d_inode->i_ino;
2226 dput(dentry);
2227 }
2228 return ino;
2229}
2230
2231static __initdata unsigned long dhash_entries;
2232static int __init set_dhash_entries(char *str)
2233{
2234 if (!str)
2235 return 0;
2236 dhash_entries = simple_strtoul(str, &str, 0);
2237 return 1;
2238}
2239__setup("dhash_entries=", set_dhash_entries);
2240
2241static void __init dcache_init_early(void)
2242{
2243 int loop;
2244
2245
2246
2247
2248 if (hashdist)
2249 return;
2250
2251 dentry_hashtable =
2252 alloc_large_system_hash("Dentry cache",
2253 sizeof(struct hlist_head),
2254 dhash_entries,
2255 13,
2256 HASH_EARLY,
2257 &d_hash_shift,
2258 &d_hash_mask,
2259 0);
2260
2261 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2262 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2263}
2264
2265static void __init dcache_init(void)
2266{
2267 int loop;
2268
2269
2270
2271
2272
2273
2274 dentry_cache = KMEM_CACHE(dentry,
2275 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2276
2277 register_shrinker(&dcache_shrinker);
2278
2279
2280 if (!hashdist)
2281 return;
2282
2283 dentry_hashtable =
2284 alloc_large_system_hash("Dentry cache",
2285 sizeof(struct hlist_head),
2286 dhash_entries,
2287 13,
2288 0,
2289 &d_hash_shift,
2290 &d_hash_mask,
2291 0);
2292
2293 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2294 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2295}
2296
2297
2298struct kmem_cache *names_cachep __read_mostly;
2299
2300EXPORT_SYMBOL(d_genocide);
2301
2302void __init vfs_caches_init_early(void)
2303{
2304 dcache_init_early();
2305 inode_init_early();
2306}
2307
2308void __init vfs_caches_init(unsigned long mempages)
2309{
2310 unsigned long reserve;
2311
2312
2313
2314
2315 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2316 mempages -= reserve;
2317
2318 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
2319 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2320
2321 dcache_init();
2322 inode_init();
2323 files_init(mempages);
2324 mnt_init();
2325 bdev_cache_init();
2326 chrdev_init();
2327}
2328
2329EXPORT_SYMBOL(d_alloc);
2330EXPORT_SYMBOL(d_alloc_root);
2331EXPORT_SYMBOL(d_delete);
2332EXPORT_SYMBOL(d_find_alias);
2333EXPORT_SYMBOL(d_instantiate);
2334EXPORT_SYMBOL(d_invalidate);
2335EXPORT_SYMBOL(d_lookup);
2336EXPORT_SYMBOL(d_move);
2337EXPORT_SYMBOL_GPL(d_materialise_unique);
2338EXPORT_SYMBOL(d_path);
2339EXPORT_SYMBOL(d_prune_aliases);
2340EXPORT_SYMBOL(d_rehash);
2341EXPORT_SYMBOL(d_splice_alias);
2342EXPORT_SYMBOL(d_add_ci);
2343EXPORT_SYMBOL(d_validate);
2344EXPORT_SYMBOL(dget_locked);
2345EXPORT_SYMBOL(dput);
2346EXPORT_SYMBOL(find_inode_number);
2347EXPORT_SYMBOL(have_submounts);
2348EXPORT_SYMBOL(names_cachep);
2349EXPORT_SYMBOL(shrink_dcache_parent);
2350EXPORT_SYMBOL(shrink_dcache_sb);
2351