1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/ratelimit.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/fs.h>
22#include <linux/fscrypt.h>
23#include <linux/fsnotify.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/hash.h>
27#include <linux/cache.h>
28#include <linux/export.h>
29#include <linux/security.h>
30#include <linux/seqlock.h>
31#include <linux/memblock.h>
32#include <linux/bit_spinlock.h>
33#include <linux/rculist_bl.h>
34#include <linux/list_lru.h>
35#include "internal.h"
36#include "mount.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int sysctl_vfs_cache_pressure __read_mostly = 100;
75EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
76
77__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
78
79EXPORT_SYMBOL(rename_lock);
80
81static struct kmem_cache *dentry_cache __ro_after_init;
82
83const struct qstr empty_name = QSTR_INIT("", 0);
84EXPORT_SYMBOL(empty_name);
85const struct qstr slash_name = QSTR_INIT("/", 1);
86EXPORT_SYMBOL(slash_name);
87const struct qstr dotdot_name = QSTR_INIT("..", 2);
88EXPORT_SYMBOL(dotdot_name);
89
90
91
92
93
94
95
96
97
98
99static unsigned int d_hash_shift __ro_after_init;
100
101static struct hlist_bl_head *dentry_hashtable __ro_after_init;
102
103static inline struct hlist_bl_head *d_hash(unsigned int hash)
104{
105 return dentry_hashtable + (hash >> d_hash_shift);
106}
107
108#define IN_LOOKUP_SHIFT 10
109static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
110
111static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
112 unsigned int hash)
113{
114 hash += (unsigned long) parent / L1_CACHE_BYTES;
115 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
116}
117
118struct dentry_stat_t {
119 long nr_dentry;
120 long nr_unused;
121 long age_limit;
122 long want_pages;
123 long nr_negative;
124 long dummy;
125};
126
127static DEFINE_PER_CPU(long, nr_dentry);
128static DEFINE_PER_CPU(long, nr_dentry_unused);
129static DEFINE_PER_CPU(long, nr_dentry_negative);
130
131#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
132
133static struct dentry_stat_t dentry_stat = {
134 .age_limit = 45,
135};
136
137
138
139
140
141
142
143
144
145
146
147
148
149static long get_nr_dentry(void)
150{
151 int i;
152 long sum = 0;
153 for_each_possible_cpu(i)
154 sum += per_cpu(nr_dentry, i);
155 return sum < 0 ? 0 : sum;
156}
157
158static long get_nr_dentry_unused(void)
159{
160 int i;
161 long sum = 0;
162 for_each_possible_cpu(i)
163 sum += per_cpu(nr_dentry_unused, i);
164 return sum < 0 ? 0 : sum;
165}
166
167static long get_nr_dentry_negative(void)
168{
169 int i;
170 long sum = 0;
171
172 for_each_possible_cpu(i)
173 sum += per_cpu(nr_dentry_negative, i);
174 return sum < 0 ? 0 : sum;
175}
176
177static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
178 size_t *lenp, loff_t *ppos)
179{
180 dentry_stat.nr_dentry = get_nr_dentry();
181 dentry_stat.nr_unused = get_nr_dentry_unused();
182 dentry_stat.nr_negative = get_nr_dentry_negative();
183 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
184}
185
186static struct ctl_table fs_dcache_sysctls[] = {
187 {
188 .procname = "dentry-state",
189 .data = &dentry_stat,
190 .maxlen = 6*sizeof(long),
191 .mode = 0444,
192 .proc_handler = proc_nr_dentry,
193 },
194 { }
195};
196
197static int __init init_fs_dcache_sysctls(void)
198{
199 register_sysctl_init("fs", fs_dcache_sysctls);
200 return 0;
201}
202fs_initcall(init_fs_dcache_sysctls);
203#endif
204
205
206
207
208
209#ifdef CONFIG_DCACHE_WORD_ACCESS
210
211#include <asm/word-at-a-time.h>
212
213
214
215
216
217
218
219
220
221static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
222{
223 unsigned long a,b,mask;
224
225 for (;;) {
226 a = read_word_at_a_time(cs);
227 b = load_unaligned_zeropad(ct);
228 if (tcount < sizeof(unsigned long))
229 break;
230 if (unlikely(a != b))
231 return 1;
232 cs += sizeof(unsigned long);
233 ct += sizeof(unsigned long);
234 tcount -= sizeof(unsigned long);
235 if (!tcount)
236 return 0;
237 }
238 mask = bytemask_from_count(tcount);
239 return unlikely(!!((a ^ b) & mask));
240}
241
242#else
243
244static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
245{
246 do {
247 if (*cs != *ct)
248 return 1;
249 cs++;
250 ct++;
251 tcount--;
252 } while (tcount);
253 return 0;
254}
255
256#endif
257
258static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
259{
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
277
278 return dentry_string_cmp(cs, ct, tcount);
279}
280
281struct external_name {
282 union {
283 atomic_t count;
284 struct rcu_head head;
285 } u;
286 unsigned char name[];
287};
288
289static inline struct external_name *external_name(struct dentry *dentry)
290{
291 return container_of(dentry->d_name.name, struct external_name, name[0]);
292}
293
294static void __d_free(struct rcu_head *head)
295{
296 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
297
298 kmem_cache_free(dentry_cache, dentry);
299}
300
301static void __d_free_external(struct rcu_head *head)
302{
303 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
304 kfree(external_name(dentry));
305 kmem_cache_free(dentry_cache, dentry);
306}
307
308static inline int dname_external(const struct dentry *dentry)
309{
310 return dentry->d_name.name != dentry->d_iname;
311}
312
313void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
314{
315 spin_lock(&dentry->d_lock);
316 name->name = dentry->d_name;
317 if (unlikely(dname_external(dentry))) {
318 atomic_inc(&external_name(dentry)->u.count);
319 } else {
320 memcpy(name->inline_name, dentry->d_iname,
321 dentry->d_name.len + 1);
322 name->name.name = name->inline_name;
323 }
324 spin_unlock(&dentry->d_lock);
325}
326EXPORT_SYMBOL(take_dentry_name_snapshot);
327
328void release_dentry_name_snapshot(struct name_snapshot *name)
329{
330 if (unlikely(name->name.name != name->inline_name)) {
331 struct external_name *p;
332 p = container_of(name->name.name, struct external_name, name[0]);
333 if (unlikely(atomic_dec_and_test(&p->u.count)))
334 kfree_rcu(p, u.head);
335 }
336}
337EXPORT_SYMBOL(release_dentry_name_snapshot);
338
339static inline void __d_set_inode_and_type(struct dentry *dentry,
340 struct inode *inode,
341 unsigned type_flags)
342{
343 unsigned flags;
344
345 dentry->d_inode = inode;
346 flags = READ_ONCE(dentry->d_flags);
347 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
348 flags |= type_flags;
349 smp_store_release(&dentry->d_flags, flags);
350}
351
352static inline void __d_clear_type_and_inode(struct dentry *dentry)
353{
354 unsigned flags = READ_ONCE(dentry->d_flags);
355
356 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
357 WRITE_ONCE(dentry->d_flags, flags);
358 dentry->d_inode = NULL;
359 if (dentry->d_flags & DCACHE_LRU_LIST)
360 this_cpu_inc(nr_dentry_negative);
361}
362
363static void dentry_free(struct dentry *dentry)
364{
365 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
366 if (unlikely(dname_external(dentry))) {
367 struct external_name *p = external_name(dentry);
368 if (likely(atomic_dec_and_test(&p->u.count))) {
369 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
370 return;
371 }
372 }
373
374 if (dentry->d_flags & DCACHE_NORCU)
375 __d_free(&dentry->d_u.d_rcu);
376 else
377 call_rcu(&dentry->d_u.d_rcu, __d_free);
378}
379
380
381
382
383
384static void dentry_unlink_inode(struct dentry * dentry)
385 __releases(dentry->d_lock)
386 __releases(dentry->d_inode->i_lock)
387{
388 struct inode *inode = dentry->d_inode;
389
390 raw_write_seqcount_begin(&dentry->d_seq);
391 __d_clear_type_and_inode(dentry);
392 hlist_del_init(&dentry->d_u.d_alias);
393 raw_write_seqcount_end(&dentry->d_seq);
394 spin_unlock(&dentry->d_lock);
395 spin_unlock(&inode->i_lock);
396 if (!inode->i_nlink)
397 fsnotify_inoderemove(inode);
398 if (dentry->d_op && dentry->d_op->d_iput)
399 dentry->d_op->d_iput(dentry, inode);
400 else
401 iput(inode);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
424static void d_lru_add(struct dentry *dentry)
425{
426 D_FLAG_VERIFY(dentry, 0);
427 dentry->d_flags |= DCACHE_LRU_LIST;
428 this_cpu_inc(nr_dentry_unused);
429 if (d_is_negative(dentry))
430 this_cpu_inc(nr_dentry_negative);
431 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
432}
433
434static void d_lru_del(struct dentry *dentry)
435{
436 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
437 dentry->d_flags &= ~DCACHE_LRU_LIST;
438 this_cpu_dec(nr_dentry_unused);
439 if (d_is_negative(dentry))
440 this_cpu_dec(nr_dentry_negative);
441 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
442}
443
444static void d_shrink_del(struct dentry *dentry)
445{
446 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
447 list_del_init(&dentry->d_lru);
448 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
449 this_cpu_dec(nr_dentry_unused);
450}
451
452static void d_shrink_add(struct dentry *dentry, struct list_head *list)
453{
454 D_FLAG_VERIFY(dentry, 0);
455 list_add(&dentry->d_lru, list);
456 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
457 this_cpu_inc(nr_dentry_unused);
458}
459
460
461
462
463
464
465
466static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
467{
468 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
469 dentry->d_flags &= ~DCACHE_LRU_LIST;
470 this_cpu_dec(nr_dentry_unused);
471 if (d_is_negative(dentry))
472 this_cpu_dec(nr_dentry_negative);
473 list_lru_isolate(lru, &dentry->d_lru);
474}
475
476static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
477 struct list_head *list)
478{
479 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
480 dentry->d_flags |= DCACHE_SHRINK_LIST;
481 if (d_is_negative(dentry))
482 this_cpu_dec(nr_dentry_negative);
483 list_lru_isolate_move(lru, &dentry->d_lru, list);
484}
485
486static void ___d_drop(struct dentry *dentry)
487{
488 struct hlist_bl_head *b;
489
490
491
492
493
494 if (unlikely(IS_ROOT(dentry)))
495 b = &dentry->d_sb->s_roots;
496 else
497 b = d_hash(dentry->d_name.hash);
498
499 hlist_bl_lock(b);
500 __hlist_bl_del(&dentry->d_hash);
501 hlist_bl_unlock(b);
502}
503
504void __d_drop(struct dentry *dentry)
505{
506 if (!d_unhashed(dentry)) {
507 ___d_drop(dentry);
508 dentry->d_hash.pprev = NULL;
509 write_seqcount_invalidate(&dentry->d_seq);
510 }
511}
512EXPORT_SYMBOL(__d_drop);
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532void d_drop(struct dentry *dentry)
533{
534 spin_lock(&dentry->d_lock);
535 __d_drop(dentry);
536 spin_unlock(&dentry->d_lock);
537}
538EXPORT_SYMBOL(d_drop);
539
540static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
541{
542 struct dentry *next;
543
544
545
546
547 dentry->d_flags |= DCACHE_DENTRY_KILLED;
548 if (unlikely(list_empty(&dentry->d_child)))
549 return;
550 __list_del_entry(&dentry->d_child);
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 while (dentry->d_child.next != &parent->d_subdirs) {
571 next = list_entry(dentry->d_child.next, struct dentry, d_child);
572 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
573 break;
574 dentry->d_child.next = next->d_child.next;
575 }
576}
577
578static void __dentry_kill(struct dentry *dentry)
579{
580 struct dentry *parent = NULL;
581 bool can_free = true;
582 if (!IS_ROOT(dentry))
583 parent = dentry->d_parent;
584
585
586
587
588 lockref_mark_dead(&dentry->d_lockref);
589
590
591
592
593
594 if (dentry->d_flags & DCACHE_OP_PRUNE)
595 dentry->d_op->d_prune(dentry);
596
597 if (dentry->d_flags & DCACHE_LRU_LIST) {
598 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
599 d_lru_del(dentry);
600 }
601
602 __d_drop(dentry);
603 dentry_unlist(dentry, parent);
604 if (parent)
605 spin_unlock(&parent->d_lock);
606 if (dentry->d_inode)
607 dentry_unlink_inode(dentry);
608 else
609 spin_unlock(&dentry->d_lock);
610 this_cpu_dec(nr_dentry);
611 if (dentry->d_op && dentry->d_op->d_release)
612 dentry->d_op->d_release(dentry);
613
614 spin_lock(&dentry->d_lock);
615 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
616 dentry->d_flags |= DCACHE_MAY_FREE;
617 can_free = false;
618 }
619 spin_unlock(&dentry->d_lock);
620 if (likely(can_free))
621 dentry_free(dentry);
622 cond_resched();
623}
624
625static struct dentry *__lock_parent(struct dentry *dentry)
626{
627 struct dentry *parent;
628 rcu_read_lock();
629 spin_unlock(&dentry->d_lock);
630again:
631 parent = READ_ONCE(dentry->d_parent);
632 spin_lock(&parent->d_lock);
633
634
635
636
637
638
639
640
641 if (unlikely(parent != dentry->d_parent)) {
642 spin_unlock(&parent->d_lock);
643 goto again;
644 }
645 rcu_read_unlock();
646 if (parent != dentry)
647 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
648 else
649 parent = NULL;
650 return parent;
651}
652
653static inline struct dentry *lock_parent(struct dentry *dentry)
654{
655 struct dentry *parent = dentry->d_parent;
656 if (IS_ROOT(dentry))
657 return NULL;
658 if (likely(spin_trylock(&parent->d_lock)))
659 return parent;
660 return __lock_parent(dentry);
661}
662
663static inline bool retain_dentry(struct dentry *dentry)
664{
665 WARN_ON(d_in_lookup(dentry));
666
667
668 if (unlikely(d_unhashed(dentry)))
669 return false;
670
671 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
672 return false;
673
674 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
675 if (dentry->d_op->d_delete(dentry))
676 return false;
677 }
678
679 if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
680 return false;
681
682
683 dentry->d_lockref.count--;
684 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
685 d_lru_add(dentry);
686 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
687 dentry->d_flags |= DCACHE_REFERENCED;
688 return true;
689}
690
691void d_mark_dontcache(struct inode *inode)
692{
693 struct dentry *de;
694
695 spin_lock(&inode->i_lock);
696 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
697 spin_lock(&de->d_lock);
698 de->d_flags |= DCACHE_DONTCACHE;
699 spin_unlock(&de->d_lock);
700 }
701 inode->i_state |= I_DONTCACHE;
702 spin_unlock(&inode->i_lock);
703}
704EXPORT_SYMBOL(d_mark_dontcache);
705
706
707
708
709
710
711static struct dentry *dentry_kill(struct dentry *dentry)
712 __releases(dentry->d_lock)
713{
714 struct inode *inode = dentry->d_inode;
715 struct dentry *parent = NULL;
716
717 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
718 goto slow_positive;
719
720 if (!IS_ROOT(dentry)) {
721 parent = dentry->d_parent;
722 if (unlikely(!spin_trylock(&parent->d_lock))) {
723 parent = __lock_parent(dentry);
724 if (likely(inode || !dentry->d_inode))
725 goto got_locks;
726
727 if (parent)
728 spin_unlock(&parent->d_lock);
729 inode = dentry->d_inode;
730 goto slow_positive;
731 }
732 }
733 __dentry_kill(dentry);
734 return parent;
735
736slow_positive:
737 spin_unlock(&dentry->d_lock);
738 spin_lock(&inode->i_lock);
739 spin_lock(&dentry->d_lock);
740 parent = lock_parent(dentry);
741got_locks:
742 if (unlikely(dentry->d_lockref.count != 1)) {
743 dentry->d_lockref.count--;
744 } else if (likely(!retain_dentry(dentry))) {
745 __dentry_kill(dentry);
746 return parent;
747 }
748
749 if (inode)
750 spin_unlock(&inode->i_lock);
751 if (parent)
752 spin_unlock(&parent->d_lock);
753 spin_unlock(&dentry->d_lock);
754 return NULL;
755}
756
757
758
759
760
761
762
763
764
765static inline bool fast_dput(struct dentry *dentry)
766{
767 int ret;
768 unsigned int d_flags;
769
770
771
772
773
774 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
775 return lockref_put_or_lock(&dentry->d_lockref);
776
777
778
779
780
781 ret = lockref_put_return(&dentry->d_lockref);
782
783
784
785
786
787
788 if (unlikely(ret < 0)) {
789 spin_lock(&dentry->d_lock);
790 if (dentry->d_lockref.count > 1) {
791 dentry->d_lockref.count--;
792 spin_unlock(&dentry->d_lock);
793 return true;
794 }
795 return false;
796 }
797
798
799
800
801 if (ret)
802 return true;
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831 smp_rmb();
832 d_flags = READ_ONCE(dentry->d_flags);
833 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST |
834 DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
835
836
837 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
838 return true;
839
840
841
842
843
844
845 spin_lock(&dentry->d_lock);
846
847
848
849
850
851
852
853 if (dentry->d_lockref.count) {
854 spin_unlock(&dentry->d_lock);
855 return true;
856 }
857
858
859
860
861
862
863 dentry->d_lockref.count = 1;
864 return false;
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894void dput(struct dentry *dentry)
895{
896 while (dentry) {
897 might_sleep();
898
899 rcu_read_lock();
900 if (likely(fast_dput(dentry))) {
901 rcu_read_unlock();
902 return;
903 }
904
905
906 rcu_read_unlock();
907
908 if (likely(retain_dentry(dentry))) {
909 spin_unlock(&dentry->d_lock);
910 return;
911 }
912
913 dentry = dentry_kill(dentry);
914 }
915}
916EXPORT_SYMBOL(dput);
917
918static void __dput_to_list(struct dentry *dentry, struct list_head *list)
919__must_hold(&dentry->d_lock)
920{
921 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
922
923 --dentry->d_lockref.count;
924 } else {
925 if (dentry->d_flags & DCACHE_LRU_LIST)
926 d_lru_del(dentry);
927 if (!--dentry->d_lockref.count)
928 d_shrink_add(dentry, list);
929 }
930}
931
932void dput_to_list(struct dentry *dentry, struct list_head *list)
933{
934 rcu_read_lock();
935 if (likely(fast_dput(dentry))) {
936 rcu_read_unlock();
937 return;
938 }
939 rcu_read_unlock();
940 if (!retain_dentry(dentry))
941 __dput_to_list(dentry, list);
942 spin_unlock(&dentry->d_lock);
943}
944
945
946static inline void __dget_dlock(struct dentry *dentry)
947{
948 dentry->d_lockref.count++;
949}
950
951static inline void __dget(struct dentry *dentry)
952{
953 lockref_get(&dentry->d_lockref);
954}
955
956struct dentry *dget_parent(struct dentry *dentry)
957{
958 int gotref;
959 struct dentry *ret;
960 unsigned seq;
961
962
963
964
965
966 rcu_read_lock();
967 seq = raw_seqcount_begin(&dentry->d_seq);
968 ret = READ_ONCE(dentry->d_parent);
969 gotref = lockref_get_not_zero(&ret->d_lockref);
970 rcu_read_unlock();
971 if (likely(gotref)) {
972 if (!read_seqcount_retry(&dentry->d_seq, seq))
973 return ret;
974 dput(ret);
975 }
976
977repeat:
978
979
980
981
982 rcu_read_lock();
983 ret = dentry->d_parent;
984 spin_lock(&ret->d_lock);
985 if (unlikely(ret != dentry->d_parent)) {
986 spin_unlock(&ret->d_lock);
987 rcu_read_unlock();
988 goto repeat;
989 }
990 rcu_read_unlock();
991 BUG_ON(!ret->d_lockref.count);
992 ret->d_lockref.count++;
993 spin_unlock(&ret->d_lock);
994 return ret;
995}
996EXPORT_SYMBOL(dget_parent);
997
998static struct dentry * __d_find_any_alias(struct inode *inode)
999{
1000 struct dentry *alias;
1001
1002 if (hlist_empty(&inode->i_dentry))
1003 return NULL;
1004 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1005 __dget(alias);
1006 return alias;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016struct dentry *d_find_any_alias(struct inode *inode)
1017{
1018 struct dentry *de;
1019
1020 spin_lock(&inode->i_lock);
1021 de = __d_find_any_alias(inode);
1022 spin_unlock(&inode->i_lock);
1023 return de;
1024}
1025EXPORT_SYMBOL(d_find_any_alias);
1026
1027static struct dentry *__d_find_alias(struct inode *inode)
1028{
1029 struct dentry *alias;
1030
1031 if (S_ISDIR(inode->i_mode))
1032 return __d_find_any_alias(inode);
1033
1034 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1035 spin_lock(&alias->d_lock);
1036 if (!d_unhashed(alias)) {
1037 __dget_dlock(alias);
1038 spin_unlock(&alias->d_lock);
1039 return alias;
1040 }
1041 spin_unlock(&alias->d_lock);
1042 }
1043 return NULL;
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060struct dentry *d_find_alias(struct inode *inode)
1061{
1062 struct dentry *de = NULL;
1063
1064 if (!hlist_empty(&inode->i_dentry)) {
1065 spin_lock(&inode->i_lock);
1066 de = __d_find_alias(inode);
1067 spin_unlock(&inode->i_lock);
1068 }
1069 return de;
1070}
1071EXPORT_SYMBOL(d_find_alias);
1072
1073
1074
1075
1076
1077struct dentry *d_find_alias_rcu(struct inode *inode)
1078{
1079 struct hlist_head *l = &inode->i_dentry;
1080 struct dentry *de = NULL;
1081
1082 spin_lock(&inode->i_lock);
1083
1084
1085 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1086 if (S_ISDIR(inode->i_mode)) {
1087 de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1088 } else {
1089 hlist_for_each_entry(de, l, d_u.d_alias)
1090 if (!d_unhashed(de))
1091 break;
1092 }
1093 }
1094 spin_unlock(&inode->i_lock);
1095 return de;
1096}
1097
1098
1099
1100
1101
1102void d_prune_aliases(struct inode *inode)
1103{
1104 struct dentry *dentry;
1105restart:
1106 spin_lock(&inode->i_lock);
1107 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1108 spin_lock(&dentry->d_lock);
1109 if (!dentry->d_lockref.count) {
1110 struct dentry *parent = lock_parent(dentry);
1111 if (likely(!dentry->d_lockref.count)) {
1112 __dentry_kill(dentry);
1113 dput(parent);
1114 goto restart;
1115 }
1116 if (parent)
1117 spin_unlock(&parent->d_lock);
1118 }
1119 spin_unlock(&dentry->d_lock);
1120 }
1121 spin_unlock(&inode->i_lock);
1122}
1123EXPORT_SYMBOL(d_prune_aliases);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static bool shrink_lock_dentry(struct dentry *dentry)
1137{
1138 struct inode *inode;
1139 struct dentry *parent;
1140
1141 if (dentry->d_lockref.count)
1142 return false;
1143
1144 inode = dentry->d_inode;
1145 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1146 spin_unlock(&dentry->d_lock);
1147 spin_lock(&inode->i_lock);
1148 spin_lock(&dentry->d_lock);
1149 if (unlikely(dentry->d_lockref.count))
1150 goto out;
1151
1152 if (unlikely(inode != dentry->d_inode))
1153 goto out;
1154 }
1155
1156 parent = dentry->d_parent;
1157 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1158 return true;
1159
1160 spin_unlock(&dentry->d_lock);
1161 spin_lock(&parent->d_lock);
1162 if (unlikely(parent != dentry->d_parent)) {
1163 spin_unlock(&parent->d_lock);
1164 spin_lock(&dentry->d_lock);
1165 goto out;
1166 }
1167 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1168 if (likely(!dentry->d_lockref.count))
1169 return true;
1170 spin_unlock(&parent->d_lock);
1171out:
1172 if (inode)
1173 spin_unlock(&inode->i_lock);
1174 return false;
1175}
1176
1177void shrink_dentry_list(struct list_head *list)
1178{
1179 while (!list_empty(list)) {
1180 struct dentry *dentry, *parent;
1181
1182 dentry = list_entry(list->prev, struct dentry, d_lru);
1183 spin_lock(&dentry->d_lock);
1184 rcu_read_lock();
1185 if (!shrink_lock_dentry(dentry)) {
1186 bool can_free = false;
1187 rcu_read_unlock();
1188 d_shrink_del(dentry);
1189 if (dentry->d_lockref.count < 0)
1190 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1191 spin_unlock(&dentry->d_lock);
1192 if (can_free)
1193 dentry_free(dentry);
1194 continue;
1195 }
1196 rcu_read_unlock();
1197 d_shrink_del(dentry);
1198 parent = dentry->d_parent;
1199 if (parent != dentry)
1200 __dput_to_list(parent, list);
1201 __dentry_kill(dentry);
1202 }
1203}
1204
1205static enum lru_status dentry_lru_isolate(struct list_head *item,
1206 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1207{
1208 struct list_head *freeable = arg;
1209 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1210
1211
1212
1213
1214
1215
1216
1217 if (!spin_trylock(&dentry->d_lock))
1218 return LRU_SKIP;
1219
1220
1221
1222
1223
1224
1225 if (dentry->d_lockref.count) {
1226 d_lru_isolate(lru, dentry);
1227 spin_unlock(&dentry->d_lock);
1228 return LRU_REMOVED;
1229 }
1230
1231 if (dentry->d_flags & DCACHE_REFERENCED) {
1232 dentry->d_flags &= ~DCACHE_REFERENCED;
1233 spin_unlock(&dentry->d_lock);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 return LRU_ROTATE;
1255 }
1256
1257 d_lru_shrink_move(lru, dentry, freeable);
1258 spin_unlock(&dentry->d_lock);
1259
1260 return LRU_REMOVED;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1276{
1277 LIST_HEAD(dispose);
1278 long freed;
1279
1280 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1281 dentry_lru_isolate, &dispose);
1282 shrink_dentry_list(&dispose);
1283 return freed;
1284}
1285
1286static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1287 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1288{
1289 struct list_head *freeable = arg;
1290 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1291
1292
1293
1294
1295
1296
1297 if (!spin_trylock(&dentry->d_lock))
1298 return LRU_SKIP;
1299
1300 d_lru_shrink_move(lru, dentry, freeable);
1301 spin_unlock(&dentry->d_lock);
1302
1303 return LRU_REMOVED;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314void shrink_dcache_sb(struct super_block *sb)
1315{
1316 do {
1317 LIST_HEAD(dispose);
1318
1319 list_lru_walk(&sb->s_dentry_lru,
1320 dentry_lru_isolate_shrink, &dispose, 1024);
1321 shrink_dentry_list(&dispose);
1322 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1323}
1324EXPORT_SYMBOL(shrink_dcache_sb);
1325
1326
1327
1328
1329
1330
1331
1332
1333enum d_walk_ret {
1334 D_WALK_CONTINUE,
1335 D_WALK_QUIT,
1336 D_WALK_NORETRY,
1337 D_WALK_SKIP,
1338};
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static void d_walk(struct dentry *parent, void *data,
1349 enum d_walk_ret (*enter)(void *, struct dentry *))
1350{
1351 struct dentry *this_parent;
1352 struct list_head *next;
1353 unsigned seq = 0;
1354 enum d_walk_ret ret;
1355 bool retry = true;
1356
1357again:
1358 read_seqbegin_or_lock(&rename_lock, &seq);
1359 this_parent = parent;
1360 spin_lock(&this_parent->d_lock);
1361
1362 ret = enter(data, this_parent);
1363 switch (ret) {
1364 case D_WALK_CONTINUE:
1365 break;
1366 case D_WALK_QUIT:
1367 case D_WALK_SKIP:
1368 goto out_unlock;
1369 case D_WALK_NORETRY:
1370 retry = false;
1371 break;
1372 }
1373repeat:
1374 next = this_parent->d_subdirs.next;
1375resume:
1376 while (next != &this_parent->d_subdirs) {
1377 struct list_head *tmp = next;
1378 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1379 next = tmp->next;
1380
1381 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1382 continue;
1383
1384 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1385
1386 ret = enter(data, dentry);
1387 switch (ret) {
1388 case D_WALK_CONTINUE:
1389 break;
1390 case D_WALK_QUIT:
1391 spin_unlock(&dentry->d_lock);
1392 goto out_unlock;
1393 case D_WALK_NORETRY:
1394 retry = false;
1395 break;
1396 case D_WALK_SKIP:
1397 spin_unlock(&dentry->d_lock);
1398 continue;
1399 }
1400
1401 if (!list_empty(&dentry->d_subdirs)) {
1402 spin_unlock(&this_parent->d_lock);
1403 spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1404 this_parent = dentry;
1405 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1406 goto repeat;
1407 }
1408 spin_unlock(&dentry->d_lock);
1409 }
1410
1411
1412
1413 rcu_read_lock();
1414ascend:
1415 if (this_parent != parent) {
1416 struct dentry *child = this_parent;
1417 this_parent = child->d_parent;
1418
1419 spin_unlock(&child->d_lock);
1420 spin_lock(&this_parent->d_lock);
1421
1422
1423 if (need_seqretry(&rename_lock, seq))
1424 goto rename_retry;
1425
1426 do {
1427 next = child->d_child.next;
1428 if (next == &this_parent->d_subdirs)
1429 goto ascend;
1430 child = list_entry(next, struct dentry, d_child);
1431 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1432 rcu_read_unlock();
1433 goto resume;
1434 }
1435 if (need_seqretry(&rename_lock, seq))
1436 goto rename_retry;
1437 rcu_read_unlock();
1438
1439out_unlock:
1440 spin_unlock(&this_parent->d_lock);
1441 done_seqretry(&rename_lock, seq);
1442 return;
1443
1444rename_retry:
1445 spin_unlock(&this_parent->d_lock);
1446 rcu_read_unlock();
1447 BUG_ON(seq & 1);
1448 if (!retry)
1449 return;
1450 seq = 1;
1451 goto again;
1452}
1453
1454struct check_mount {
1455 struct vfsmount *mnt;
1456 unsigned int mounted;
1457};
1458
1459static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1460{
1461 struct check_mount *info = data;
1462 struct path path = { .mnt = info->mnt, .dentry = dentry };
1463
1464 if (likely(!d_mountpoint(dentry)))
1465 return D_WALK_CONTINUE;
1466 if (__path_is_mountpoint(&path)) {
1467 info->mounted = 1;
1468 return D_WALK_QUIT;
1469 }
1470 return D_WALK_CONTINUE;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481int path_has_submounts(const struct path *parent)
1482{
1483 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1484
1485 read_seqlock_excl(&mount_lock);
1486 d_walk(parent->dentry, &data, path_check_mount);
1487 read_sequnlock_excl(&mount_lock);
1488
1489 return data.mounted;
1490}
1491EXPORT_SYMBOL(path_has_submounts);
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int d_set_mounted(struct dentry *dentry)
1502{
1503 struct dentry *p;
1504 int ret = -ENOENT;
1505 write_seqlock(&rename_lock);
1506 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1507
1508 spin_lock(&p->d_lock);
1509 if (unlikely(d_unhashed(p))) {
1510 spin_unlock(&p->d_lock);
1511 goto out;
1512 }
1513 spin_unlock(&p->d_lock);
1514 }
1515 spin_lock(&dentry->d_lock);
1516 if (!d_unlinked(dentry)) {
1517 ret = -EBUSY;
1518 if (!d_mountpoint(dentry)) {
1519 dentry->d_flags |= DCACHE_MOUNTED;
1520 ret = 0;
1521 }
1522 }
1523 spin_unlock(&dentry->d_lock);
1524out:
1525 write_sequnlock(&rename_lock);
1526 return ret;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544struct select_data {
1545 struct dentry *start;
1546 union {
1547 long found;
1548 struct dentry *victim;
1549 };
1550 struct list_head dispose;
1551};
1552
1553static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1554{
1555 struct select_data *data = _data;
1556 enum d_walk_ret ret = D_WALK_CONTINUE;
1557
1558 if (data->start == dentry)
1559 goto out;
1560
1561 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1562 data->found++;
1563 } else {
1564 if (dentry->d_flags & DCACHE_LRU_LIST)
1565 d_lru_del(dentry);
1566 if (!dentry->d_lockref.count) {
1567 d_shrink_add(dentry, &data->dispose);
1568 data->found++;
1569 }
1570 }
1571
1572
1573
1574
1575
1576 if (!list_empty(&data->dispose))
1577 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1578out:
1579 return ret;
1580}
1581
1582static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1583{
1584 struct select_data *data = _data;
1585 enum d_walk_ret ret = D_WALK_CONTINUE;
1586
1587 if (data->start == dentry)
1588 goto out;
1589
1590 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1591 if (!dentry->d_lockref.count) {
1592 rcu_read_lock();
1593 data->victim = dentry;
1594 return D_WALK_QUIT;
1595 }
1596 } else {
1597 if (dentry->d_flags & DCACHE_LRU_LIST)
1598 d_lru_del(dentry);
1599 if (!dentry->d_lockref.count)
1600 d_shrink_add(dentry, &data->dispose);
1601 }
1602
1603
1604
1605
1606
1607 if (!list_empty(&data->dispose))
1608 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1609out:
1610 return ret;
1611}
1612
1613
1614
1615
1616
1617
1618
1619void shrink_dcache_parent(struct dentry *parent)
1620{
1621 for (;;) {
1622 struct select_data data = {.start = parent};
1623
1624 INIT_LIST_HEAD(&data.dispose);
1625 d_walk(parent, &data, select_collect);
1626
1627 if (!list_empty(&data.dispose)) {
1628 shrink_dentry_list(&data.dispose);
1629 continue;
1630 }
1631
1632 cond_resched();
1633 if (!data.found)
1634 break;
1635 data.victim = NULL;
1636 d_walk(parent, &data, select_collect2);
1637 if (data.victim) {
1638 struct dentry *parent;
1639 spin_lock(&data.victim->d_lock);
1640 if (!shrink_lock_dentry(data.victim)) {
1641 spin_unlock(&data.victim->d_lock);
1642 rcu_read_unlock();
1643 } else {
1644 rcu_read_unlock();
1645 parent = data.victim->d_parent;
1646 if (parent != data.victim)
1647 __dput_to_list(parent, &data.dispose);
1648 __dentry_kill(data.victim);
1649 }
1650 }
1651 if (!list_empty(&data.dispose))
1652 shrink_dentry_list(&data.dispose);
1653 }
1654}
1655EXPORT_SYMBOL(shrink_dcache_parent);
1656
1657static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1658{
1659
1660 if (!list_empty(&dentry->d_subdirs))
1661 return D_WALK_CONTINUE;
1662
1663
1664 if (dentry == _data && dentry->d_lockref.count == 1)
1665 return D_WALK_CONTINUE;
1666
1667 WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1668 " still in use (%d) [unmount of %s %s]\n",
1669 dentry,
1670 dentry->d_inode ?
1671 dentry->d_inode->i_ino : 0UL,
1672 dentry,
1673 dentry->d_lockref.count,
1674 dentry->d_sb->s_type->name,
1675 dentry->d_sb->s_id);
1676 return D_WALK_CONTINUE;
1677}
1678
1679static void do_one_tree(struct dentry *dentry)
1680{
1681 shrink_dcache_parent(dentry);
1682 d_walk(dentry, dentry, umount_check);
1683 d_drop(dentry);
1684 dput(dentry);
1685}
1686
1687
1688
1689
1690void shrink_dcache_for_umount(struct super_block *sb)
1691{
1692 struct dentry *dentry;
1693
1694 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1695
1696 dentry = sb->s_root;
1697 sb->s_root = NULL;
1698 do_one_tree(dentry);
1699
1700 while (!hlist_bl_empty(&sb->s_roots)) {
1701 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1702 do_one_tree(dentry);
1703 }
1704}
1705
1706static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1707{
1708 struct dentry **victim = _data;
1709 if (d_mountpoint(dentry)) {
1710 __dget_dlock(dentry);
1711 *victim = dentry;
1712 return D_WALK_QUIT;
1713 }
1714 return D_WALK_CONTINUE;
1715}
1716
1717
1718
1719
1720
1721void d_invalidate(struct dentry *dentry)
1722{
1723 bool had_submounts = false;
1724 spin_lock(&dentry->d_lock);
1725 if (d_unhashed(dentry)) {
1726 spin_unlock(&dentry->d_lock);
1727 return;
1728 }
1729 __d_drop(dentry);
1730 spin_unlock(&dentry->d_lock);
1731
1732
1733 if (!dentry->d_inode)
1734 return;
1735
1736 shrink_dcache_parent(dentry);
1737 for (;;) {
1738 struct dentry *victim = NULL;
1739 d_walk(dentry, &victim, find_submount);
1740 if (!victim) {
1741 if (had_submounts)
1742 shrink_dcache_parent(dentry);
1743 return;
1744 }
1745 had_submounts = true;
1746 detach_mounts(victim);
1747 dput(victim);
1748 }
1749}
1750EXPORT_SYMBOL(d_invalidate);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1763{
1764 struct dentry *dentry;
1765 char *dname;
1766 int err;
1767
1768 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1769 GFP_KERNEL);
1770 if (!dentry)
1771 return NULL;
1772
1773
1774
1775
1776
1777
1778
1779 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1780 if (unlikely(!name)) {
1781 name = &slash_name;
1782 dname = dentry->d_iname;
1783 } else if (name->len > DNAME_INLINE_LEN-1) {
1784 size_t size = offsetof(struct external_name, name[1]);
1785 struct external_name *p = kmalloc(size + name->len,
1786 GFP_KERNEL_ACCOUNT |
1787 __GFP_RECLAIMABLE);
1788 if (!p) {
1789 kmem_cache_free(dentry_cache, dentry);
1790 return NULL;
1791 }
1792 atomic_set(&p->u.count, 1);
1793 dname = p->name;
1794 } else {
1795 dname = dentry->d_iname;
1796 }
1797
1798 dentry->d_name.len = name->len;
1799 dentry->d_name.hash = name->hash;
1800 memcpy(dname, name->name, name->len);
1801 dname[name->len] = 0;
1802
1803
1804 smp_store_release(&dentry->d_name.name, dname);
1805
1806 dentry->d_lockref.count = 1;
1807 dentry->d_flags = 0;
1808 spin_lock_init(&dentry->d_lock);
1809 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1810 dentry->d_inode = NULL;
1811 dentry->d_parent = dentry;
1812 dentry->d_sb = sb;
1813 dentry->d_op = NULL;
1814 dentry->d_fsdata = NULL;
1815 INIT_HLIST_BL_NODE(&dentry->d_hash);
1816 INIT_LIST_HEAD(&dentry->d_lru);
1817 INIT_LIST_HEAD(&dentry->d_subdirs);
1818 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1819 INIT_LIST_HEAD(&dentry->d_child);
1820 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1821
1822 if (dentry->d_op && dentry->d_op->d_init) {
1823 err = dentry->d_op->d_init(dentry);
1824 if (err) {
1825 if (dname_external(dentry))
1826 kfree(external_name(dentry));
1827 kmem_cache_free(dentry_cache, dentry);
1828 return NULL;
1829 }
1830 }
1831
1832 this_cpu_inc(nr_dentry);
1833
1834 return dentry;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1847{
1848 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1849 if (!dentry)
1850 return NULL;
1851 spin_lock(&parent->d_lock);
1852
1853
1854
1855
1856 __dget_dlock(parent);
1857 dentry->d_parent = parent;
1858 list_add(&dentry->d_child, &parent->d_subdirs);
1859 spin_unlock(&parent->d_lock);
1860
1861 return dentry;
1862}
1863EXPORT_SYMBOL(d_alloc);
1864
1865struct dentry *d_alloc_anon(struct super_block *sb)
1866{
1867 return __d_alloc(sb, NULL);
1868}
1869EXPORT_SYMBOL(d_alloc_anon);
1870
1871struct dentry *d_alloc_cursor(struct dentry * parent)
1872{
1873 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1874 if (dentry) {
1875 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1876 dentry->d_parent = dget(parent);
1877 }
1878 return dentry;
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1897{
1898 struct dentry *dentry = __d_alloc(sb, name);
1899 if (likely(dentry))
1900 dentry->d_flags |= DCACHE_NORCU;
1901 return dentry;
1902}
1903
1904struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1905{
1906 struct qstr q;
1907
1908 q.name = name;
1909 q.hash_len = hashlen_string(parent, name);
1910 return d_alloc(parent, &q);
1911}
1912EXPORT_SYMBOL(d_alloc_name);
1913
1914void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1915{
1916 WARN_ON_ONCE(dentry->d_op);
1917 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1918 DCACHE_OP_COMPARE |
1919 DCACHE_OP_REVALIDATE |
1920 DCACHE_OP_WEAK_REVALIDATE |
1921 DCACHE_OP_DELETE |
1922 DCACHE_OP_REAL));
1923 dentry->d_op = op;
1924 if (!op)
1925 return;
1926 if (op->d_hash)
1927 dentry->d_flags |= DCACHE_OP_HASH;
1928 if (op->d_compare)
1929 dentry->d_flags |= DCACHE_OP_COMPARE;
1930 if (op->d_revalidate)
1931 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1932 if (op->d_weak_revalidate)
1933 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1934 if (op->d_delete)
1935 dentry->d_flags |= DCACHE_OP_DELETE;
1936 if (op->d_prune)
1937 dentry->d_flags |= DCACHE_OP_PRUNE;
1938 if (op->d_real)
1939 dentry->d_flags |= DCACHE_OP_REAL;
1940
1941}
1942EXPORT_SYMBOL(d_set_d_op);
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952void d_set_fallthru(struct dentry *dentry)
1953{
1954 spin_lock(&dentry->d_lock);
1955 dentry->d_flags |= DCACHE_FALLTHRU;
1956 spin_unlock(&dentry->d_lock);
1957}
1958EXPORT_SYMBOL(d_set_fallthru);
1959
1960static unsigned d_flags_for_inode(struct inode *inode)
1961{
1962 unsigned add_flags = DCACHE_REGULAR_TYPE;
1963
1964 if (!inode)
1965 return DCACHE_MISS_TYPE;
1966
1967 if (S_ISDIR(inode->i_mode)) {
1968 add_flags = DCACHE_DIRECTORY_TYPE;
1969 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1970 if (unlikely(!inode->i_op->lookup))
1971 add_flags = DCACHE_AUTODIR_TYPE;
1972 else
1973 inode->i_opflags |= IOP_LOOKUP;
1974 }
1975 goto type_determined;
1976 }
1977
1978 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1979 if (unlikely(inode->i_op->get_link)) {
1980 add_flags = DCACHE_SYMLINK_TYPE;
1981 goto type_determined;
1982 }
1983 inode->i_opflags |= IOP_NOFOLLOW;
1984 }
1985
1986 if (unlikely(!S_ISREG(inode->i_mode)))
1987 add_flags = DCACHE_SPECIAL_TYPE;
1988
1989type_determined:
1990 if (unlikely(IS_AUTOMOUNT(inode)))
1991 add_flags |= DCACHE_NEED_AUTOMOUNT;
1992 return add_flags;
1993}
1994
1995static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1996{
1997 unsigned add_flags = d_flags_for_inode(inode);
1998 WARN_ON(d_in_lookup(dentry));
1999
2000 spin_lock(&dentry->d_lock);
2001
2002
2003
2004 if (dentry->d_flags & DCACHE_LRU_LIST)
2005 this_cpu_dec(nr_dentry_negative);
2006 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2007 raw_write_seqcount_begin(&dentry->d_seq);
2008 __d_set_inode_and_type(dentry, inode, add_flags);
2009 raw_write_seqcount_end(&dentry->d_seq);
2010 fsnotify_update_flags(dentry);
2011 spin_unlock(&dentry->d_lock);
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029void d_instantiate(struct dentry *entry, struct inode * inode)
2030{
2031 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2032 if (inode) {
2033 security_d_instantiate(entry, inode);
2034 spin_lock(&inode->i_lock);
2035 __d_instantiate(entry, inode);
2036 spin_unlock(&inode->i_lock);
2037 }
2038}
2039EXPORT_SYMBOL(d_instantiate);
2040
2041
2042
2043
2044
2045
2046
2047void d_instantiate_new(struct dentry *entry, struct inode *inode)
2048{
2049 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2050 BUG_ON(!inode);
2051 lockdep_annotate_inode_mutex_key(inode);
2052 security_d_instantiate(entry, inode);
2053 spin_lock(&inode->i_lock);
2054 __d_instantiate(entry, inode);
2055 WARN_ON(!(inode->i_state & I_NEW));
2056 inode->i_state &= ~I_NEW & ~I_CREATING;
2057 smp_mb();
2058 wake_up_bit(&inode->i_state, __I_NEW);
2059 spin_unlock(&inode->i_lock);
2060}
2061EXPORT_SYMBOL(d_instantiate_new);
2062
2063struct dentry *d_make_root(struct inode *root_inode)
2064{
2065 struct dentry *res = NULL;
2066
2067 if (root_inode) {
2068 res = d_alloc_anon(root_inode->i_sb);
2069 if (res)
2070 d_instantiate(res, root_inode);
2071 else
2072 iput(root_inode);
2073 }
2074 return res;
2075}
2076EXPORT_SYMBOL(d_make_root);
2077
2078static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2079 struct inode *inode,
2080 bool disconnected)
2081{
2082 struct dentry *res;
2083 unsigned add_flags;
2084
2085 security_d_instantiate(dentry, inode);
2086 spin_lock(&inode->i_lock);
2087 res = __d_find_any_alias(inode);
2088 if (res) {
2089 spin_unlock(&inode->i_lock);
2090 dput(dentry);
2091 goto out_iput;
2092 }
2093
2094
2095 add_flags = d_flags_for_inode(inode);
2096
2097 if (disconnected)
2098 add_flags |= DCACHE_DISCONNECTED;
2099
2100 spin_lock(&dentry->d_lock);
2101 __d_set_inode_and_type(dentry, inode, add_flags);
2102 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2103 if (!disconnected) {
2104 hlist_bl_lock(&dentry->d_sb->s_roots);
2105 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2106 hlist_bl_unlock(&dentry->d_sb->s_roots);
2107 }
2108 spin_unlock(&dentry->d_lock);
2109 spin_unlock(&inode->i_lock);
2110
2111 return dentry;
2112
2113 out_iput:
2114 iput(inode);
2115 return res;
2116}
2117
2118struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2119{
2120 return __d_instantiate_anon(dentry, inode, true);
2121}
2122EXPORT_SYMBOL(d_instantiate_anon);
2123
2124static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2125{
2126 struct dentry *tmp;
2127 struct dentry *res;
2128
2129 if (!inode)
2130 return ERR_PTR(-ESTALE);
2131 if (IS_ERR(inode))
2132 return ERR_CAST(inode);
2133
2134 res = d_find_any_alias(inode);
2135 if (res)
2136 goto out_iput;
2137
2138 tmp = d_alloc_anon(inode->i_sb);
2139 if (!tmp) {
2140 res = ERR_PTR(-ENOMEM);
2141 goto out_iput;
2142 }
2143
2144 return __d_instantiate_anon(tmp, inode, disconnected);
2145
2146out_iput:
2147 iput(inode);
2148 return res;
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169struct dentry *d_obtain_alias(struct inode *inode)
2170{
2171 return __d_obtain_alias(inode, true);
2172}
2173EXPORT_SYMBOL(d_obtain_alias);
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190struct dentry *d_obtain_root(struct inode *inode)
2191{
2192 return __d_obtain_alias(inode, false);
2193}
2194EXPORT_SYMBOL(d_obtain_root);
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2213 struct qstr *name)
2214{
2215 struct dentry *found, *res;
2216
2217
2218
2219
2220
2221 found = d_hash_and_lookup(dentry->d_parent, name);
2222 if (found) {
2223 iput(inode);
2224 return found;
2225 }
2226 if (d_in_lookup(dentry)) {
2227 found = d_alloc_parallel(dentry->d_parent, name,
2228 dentry->d_wait);
2229 if (IS_ERR(found) || !d_in_lookup(found)) {
2230 iput(inode);
2231 return found;
2232 }
2233 } else {
2234 found = d_alloc(dentry->d_parent, name);
2235 if (!found) {
2236 iput(inode);
2237 return ERR_PTR(-ENOMEM);
2238 }
2239 }
2240 res = d_splice_alias(inode, found);
2241 if (res) {
2242 d_lookup_done(found);
2243 dput(found);
2244 return res;
2245 }
2246 return found;
2247}
2248EXPORT_SYMBOL(d_add_ci);
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2259 const struct qstr *name)
2260{
2261 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2262 if (dentry->d_name.len != name->len)
2263 return false;
2264 return dentry_cmp(dentry, name->name, name->len) == 0;
2265 }
2266 return parent->d_op->d_compare(dentry,
2267 dentry->d_name.len, dentry->d_name.name,
2268 name) == 0;
2269}
2270EXPORT_SYMBOL_GPL(d_same_name);
2271
2272
2273
2274
2275
2276static noinline struct dentry *__d_lookup_rcu_op_compare(
2277 const struct dentry *parent,
2278 const struct qstr *name,
2279 unsigned *seqp)
2280{
2281 u64 hashlen = name->hash_len;
2282 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2283 struct hlist_bl_node *node;
2284 struct dentry *dentry;
2285
2286 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2287 int tlen;
2288 const char *tname;
2289 unsigned seq;
2290
2291seqretry:
2292 seq = raw_seqcount_begin(&dentry->d_seq);
2293 if (dentry->d_parent != parent)
2294 continue;
2295 if (d_unhashed(dentry))
2296 continue;
2297 if (dentry->d_name.hash != hashlen_hash(hashlen))
2298 continue;
2299 tlen = dentry->d_name.len;
2300 tname = dentry->d_name.name;
2301
2302 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2303 cpu_relax();
2304 goto seqretry;
2305 }
2306 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2307 continue;
2308 *seqp = seq;
2309 return dentry;
2310 }
2311 return NULL;
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343struct dentry *__d_lookup_rcu(const struct dentry *parent,
2344 const struct qstr *name,
2345 unsigned *seqp)
2346{
2347 u64 hashlen = name->hash_len;
2348 const unsigned char *str = name->name;
2349 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2350 struct hlist_bl_node *node;
2351 struct dentry *dentry;
2352
2353
2354
2355
2356
2357
2358
2359
2360 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2361 return __d_lookup_rcu_op_compare(parent, name, seqp);
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2377 unsigned seq;
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 seq = raw_seqcount_begin(&dentry->d_seq);
2397 if (dentry->d_parent != parent)
2398 continue;
2399 if (d_unhashed(dentry))
2400 continue;
2401 if (dentry->d_name.hash_len != hashlen)
2402 continue;
2403 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2404 continue;
2405 *seqp = seq;
2406 return dentry;
2407 }
2408 return NULL;
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2423{
2424 struct dentry *dentry;
2425 unsigned seq;
2426
2427 do {
2428 seq = read_seqbegin(&rename_lock);
2429 dentry = __d_lookup(parent, name);
2430 if (dentry)
2431 break;
2432 } while (read_seqretry(&rename_lock, seq));
2433 return dentry;
2434}
2435EXPORT_SYMBOL(d_lookup);
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2453{
2454 unsigned int hash = name->hash;
2455 struct hlist_bl_head *b = d_hash(hash);
2456 struct hlist_bl_node *node;
2457 struct dentry *found = NULL;
2458 struct dentry *dentry;
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480 rcu_read_lock();
2481
2482 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2483
2484 if (dentry->d_name.hash != hash)
2485 continue;
2486
2487 spin_lock(&dentry->d_lock);
2488 if (dentry->d_parent != parent)
2489 goto next;
2490 if (d_unhashed(dentry))
2491 goto next;
2492
2493 if (!d_same_name(dentry, parent, name))
2494 goto next;
2495
2496 dentry->d_lockref.count++;
2497 found = dentry;
2498 spin_unlock(&dentry->d_lock);
2499 break;
2500next:
2501 spin_unlock(&dentry->d_lock);
2502 }
2503 rcu_read_unlock();
2504
2505 return found;
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2516{
2517
2518
2519
2520
2521
2522 name->hash = full_name_hash(dir, name->name, name->len);
2523 if (dir->d_flags & DCACHE_OP_HASH) {
2524 int err = dir->d_op->d_hash(dir, name);
2525 if (unlikely(err < 0))
2526 return ERR_PTR(err);
2527 }
2528 return d_lookup(dir, name);
2529}
2530EXPORT_SYMBOL(d_hash_and_lookup);
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553void d_delete(struct dentry * dentry)
2554{
2555 struct inode *inode = dentry->d_inode;
2556
2557 spin_lock(&inode->i_lock);
2558 spin_lock(&dentry->d_lock);
2559
2560
2561
2562 if (dentry->d_lockref.count == 1) {
2563 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2564 dentry_unlink_inode(dentry);
2565 } else {
2566 __d_drop(dentry);
2567 spin_unlock(&dentry->d_lock);
2568 spin_unlock(&inode->i_lock);
2569 }
2570}
2571EXPORT_SYMBOL(d_delete);
2572
2573static void __d_rehash(struct dentry *entry)
2574{
2575 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2576
2577 hlist_bl_lock(b);
2578 hlist_bl_add_head_rcu(&entry->d_hash, b);
2579 hlist_bl_unlock(b);
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589void d_rehash(struct dentry * entry)
2590{
2591 spin_lock(&entry->d_lock);
2592 __d_rehash(entry);
2593 spin_unlock(&entry->d_lock);
2594}
2595EXPORT_SYMBOL(d_rehash);
2596
2597static inline unsigned start_dir_add(struct inode *dir)
2598{
2599 preempt_disable_nested();
2600 for (;;) {
2601 unsigned n = dir->i_dir_seq;
2602 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2603 return n;
2604 cpu_relax();
2605 }
2606}
2607
2608static inline void end_dir_add(struct inode *dir, unsigned int n,
2609 wait_queue_head_t *d_wait)
2610{
2611 smp_store_release(&dir->i_dir_seq, n + 2);
2612 preempt_enable_nested();
2613 wake_up_all(d_wait);
2614}
2615
2616static void d_wait_lookup(struct dentry *dentry)
2617{
2618 if (d_in_lookup(dentry)) {
2619 DECLARE_WAITQUEUE(wait, current);
2620 add_wait_queue(dentry->d_wait, &wait);
2621 do {
2622 set_current_state(TASK_UNINTERRUPTIBLE);
2623 spin_unlock(&dentry->d_lock);
2624 schedule();
2625 spin_lock(&dentry->d_lock);
2626 } while (d_in_lookup(dentry));
2627 }
2628}
2629
2630struct dentry *d_alloc_parallel(struct dentry *parent,
2631 const struct qstr *name,
2632 wait_queue_head_t *wq)
2633{
2634 unsigned int hash = name->hash;
2635 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2636 struct hlist_bl_node *node;
2637 struct dentry *new = d_alloc(parent, name);
2638 struct dentry *dentry;
2639 unsigned seq, r_seq, d_seq;
2640
2641 if (unlikely(!new))
2642 return ERR_PTR(-ENOMEM);
2643
2644retry:
2645 rcu_read_lock();
2646 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2647 r_seq = read_seqbegin(&rename_lock);
2648 dentry = __d_lookup_rcu(parent, name, &d_seq);
2649 if (unlikely(dentry)) {
2650 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2651 rcu_read_unlock();
2652 goto retry;
2653 }
2654 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2655 rcu_read_unlock();
2656 dput(dentry);
2657 goto retry;
2658 }
2659 rcu_read_unlock();
2660 dput(new);
2661 return dentry;
2662 }
2663 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2664 rcu_read_unlock();
2665 goto retry;
2666 }
2667
2668 if (unlikely(seq & 1)) {
2669 rcu_read_unlock();
2670 goto retry;
2671 }
2672
2673 hlist_bl_lock(b);
2674 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2675 hlist_bl_unlock(b);
2676 rcu_read_unlock();
2677 goto retry;
2678 }
2679
2680
2681
2682
2683
2684
2685
2686 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2687 if (dentry->d_name.hash != hash)
2688 continue;
2689 if (dentry->d_parent != parent)
2690 continue;
2691 if (!d_same_name(dentry, parent, name))
2692 continue;
2693 hlist_bl_unlock(b);
2694
2695 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2696 rcu_read_unlock();
2697 goto retry;
2698 }
2699
2700 rcu_read_unlock();
2701
2702
2703
2704
2705 spin_lock(&dentry->d_lock);
2706 d_wait_lookup(dentry);
2707
2708
2709
2710
2711
2712
2713 if (unlikely(dentry->d_name.hash != hash))
2714 goto mismatch;
2715 if (unlikely(dentry->d_parent != parent))
2716 goto mismatch;
2717 if (unlikely(d_unhashed(dentry)))
2718 goto mismatch;
2719 if (unlikely(!d_same_name(dentry, parent, name)))
2720 goto mismatch;
2721
2722 spin_unlock(&dentry->d_lock);
2723 dput(new);
2724 return dentry;
2725 }
2726 rcu_read_unlock();
2727
2728 new->d_flags |= DCACHE_PAR_LOOKUP;
2729 new->d_wait = wq;
2730 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2731 hlist_bl_unlock(b);
2732 return new;
2733mismatch:
2734 spin_unlock(&dentry->d_lock);
2735 dput(dentry);
2736 goto retry;
2737}
2738EXPORT_SYMBOL(d_alloc_parallel);
2739
2740
2741
2742
2743
2744
2745static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2746{
2747 wait_queue_head_t *d_wait;
2748 struct hlist_bl_head *b;
2749
2750 lockdep_assert_held(&dentry->d_lock);
2751
2752 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2753 hlist_bl_lock(b);
2754 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2755 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2756 d_wait = dentry->d_wait;
2757 dentry->d_wait = NULL;
2758 hlist_bl_unlock(b);
2759 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2760 INIT_LIST_HEAD(&dentry->d_lru);
2761 return d_wait;
2762}
2763
2764void __d_lookup_unhash_wake(struct dentry *dentry)
2765{
2766 spin_lock(&dentry->d_lock);
2767 wake_up_all(__d_lookup_unhash(dentry));
2768 spin_unlock(&dentry->d_lock);
2769}
2770EXPORT_SYMBOL(__d_lookup_unhash_wake);
2771
2772
2773
2774static inline void __d_add(struct dentry *dentry, struct inode *inode)
2775{
2776 wait_queue_head_t *d_wait;
2777 struct inode *dir = NULL;
2778 unsigned n;
2779 spin_lock(&dentry->d_lock);
2780 if (unlikely(d_in_lookup(dentry))) {
2781 dir = dentry->d_parent->d_inode;
2782 n = start_dir_add(dir);
2783 d_wait = __d_lookup_unhash(dentry);
2784 }
2785 if (inode) {
2786 unsigned add_flags = d_flags_for_inode(inode);
2787 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2788 raw_write_seqcount_begin(&dentry->d_seq);
2789 __d_set_inode_and_type(dentry, inode, add_flags);
2790 raw_write_seqcount_end(&dentry->d_seq);
2791 fsnotify_update_flags(dentry);
2792 }
2793 __d_rehash(dentry);
2794 if (dir)
2795 end_dir_add(dir, n, d_wait);
2796 spin_unlock(&dentry->d_lock);
2797 if (inode)
2798 spin_unlock(&inode->i_lock);
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810void d_add(struct dentry *entry, struct inode *inode)
2811{
2812 if (inode) {
2813 security_d_instantiate(entry, inode);
2814 spin_lock(&inode->i_lock);
2815 }
2816 __d_add(entry, inode);
2817}
2818EXPORT_SYMBOL(d_add);
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2832{
2833 struct dentry *alias;
2834 unsigned int hash = entry->d_name.hash;
2835
2836 spin_lock(&inode->i_lock);
2837 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2838
2839
2840
2841
2842
2843 if (alias->d_name.hash != hash)
2844 continue;
2845 if (alias->d_parent != entry->d_parent)
2846 continue;
2847 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2848 continue;
2849 spin_lock(&alias->d_lock);
2850 if (!d_unhashed(alias)) {
2851 spin_unlock(&alias->d_lock);
2852 alias = NULL;
2853 } else {
2854 __dget_dlock(alias);
2855 __d_rehash(alias);
2856 spin_unlock(&alias->d_lock);
2857 }
2858 spin_unlock(&inode->i_lock);
2859 return alias;
2860 }
2861 spin_unlock(&inode->i_lock);
2862 return NULL;
2863}
2864EXPORT_SYMBOL(d_exact_alias);
2865
2866static void swap_names(struct dentry *dentry, struct dentry *target)
2867{
2868 if (unlikely(dname_external(target))) {
2869 if (unlikely(dname_external(dentry))) {
2870
2871
2872
2873 swap(target->d_name.name, dentry->d_name.name);
2874 } else {
2875
2876
2877
2878
2879 memcpy(target->d_iname, dentry->d_name.name,
2880 dentry->d_name.len + 1);
2881 dentry->d_name.name = target->d_name.name;
2882 target->d_name.name = target->d_iname;
2883 }
2884 } else {
2885 if (unlikely(dname_external(dentry))) {
2886
2887
2888
2889
2890 memcpy(dentry->d_iname, target->d_name.name,
2891 target->d_name.len + 1);
2892 target->d_name.name = dentry->d_name.name;
2893 dentry->d_name.name = dentry->d_iname;
2894 } else {
2895
2896
2897
2898 unsigned int i;
2899 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2900 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2901 swap(((long *) &dentry->d_iname)[i],
2902 ((long *) &target->d_iname)[i]);
2903 }
2904 }
2905 }
2906 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2907}
2908
2909static void copy_name(struct dentry *dentry, struct dentry *target)
2910{
2911 struct external_name *old_name = NULL;
2912 if (unlikely(dname_external(dentry)))
2913 old_name = external_name(dentry);
2914 if (unlikely(dname_external(target))) {
2915 atomic_inc(&external_name(target)->u.count);
2916 dentry->d_name = target->d_name;
2917 } else {
2918 memcpy(dentry->d_iname, target->d_name.name,
2919 target->d_name.len + 1);
2920 dentry->d_name.name = dentry->d_iname;
2921 dentry->d_name.hash_len = target->d_name.hash_len;
2922 }
2923 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2924 kfree_rcu(old_name, u.head);
2925}
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static void __d_move(struct dentry *dentry, struct dentry *target,
2939 bool exchange)
2940{
2941 struct dentry *old_parent, *p;
2942 wait_queue_head_t *d_wait;
2943 struct inode *dir = NULL;
2944 unsigned n;
2945
2946 WARN_ON(!dentry->d_inode);
2947 if (WARN_ON(dentry == target))
2948 return;
2949
2950 BUG_ON(d_ancestor(target, dentry));
2951 old_parent = dentry->d_parent;
2952 p = d_ancestor(old_parent, target);
2953 if (IS_ROOT(dentry)) {
2954 BUG_ON(p);
2955 spin_lock(&target->d_parent->d_lock);
2956 } else if (!p) {
2957
2958 spin_lock(&target->d_parent->d_lock);
2959 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2960 } else {
2961 BUG_ON(p == dentry);
2962 spin_lock(&old_parent->d_lock);
2963 if (p != target)
2964 spin_lock_nested(&target->d_parent->d_lock,
2965 DENTRY_D_LOCK_NESTED);
2966 }
2967 spin_lock_nested(&dentry->d_lock, 2);
2968 spin_lock_nested(&target->d_lock, 3);
2969
2970 if (unlikely(d_in_lookup(target))) {
2971 dir = target->d_parent->d_inode;
2972 n = start_dir_add(dir);
2973 d_wait = __d_lookup_unhash(target);
2974 }
2975
2976 write_seqcount_begin(&dentry->d_seq);
2977 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2978
2979
2980 if (!d_unhashed(dentry))
2981 ___d_drop(dentry);
2982 if (!d_unhashed(target))
2983 ___d_drop(target);
2984
2985
2986 dentry->d_parent = target->d_parent;
2987 if (!exchange) {
2988 copy_name(dentry, target);
2989 target->d_hash.pprev = NULL;
2990 dentry->d_parent->d_lockref.count++;
2991 if (dentry != old_parent)
2992 WARN_ON(!--old_parent->d_lockref.count);
2993 } else {
2994 target->d_parent = old_parent;
2995 swap_names(dentry, target);
2996 list_move(&target->d_child, &target->d_parent->d_subdirs);
2997 __d_rehash(target);
2998 fsnotify_update_flags(target);
2999 }
3000 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
3001 __d_rehash(dentry);
3002 fsnotify_update_flags(dentry);
3003 fscrypt_handle_d_move(dentry);
3004
3005 write_seqcount_end(&target->d_seq);
3006 write_seqcount_end(&dentry->d_seq);
3007
3008 if (dir)
3009 end_dir_add(dir, n, d_wait);
3010
3011 if (dentry->d_parent != old_parent)
3012 spin_unlock(&dentry->d_parent->d_lock);
3013 if (dentry != old_parent)
3014 spin_unlock(&old_parent->d_lock);
3015 spin_unlock(&target->d_lock);
3016 spin_unlock(&dentry->d_lock);
3017}
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028void d_move(struct dentry *dentry, struct dentry *target)
3029{
3030 write_seqlock(&rename_lock);
3031 __d_move(dentry, target, false);
3032 write_sequnlock(&rename_lock);
3033}
3034EXPORT_SYMBOL(d_move);
3035
3036
3037
3038
3039
3040
3041void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
3042{
3043 write_seqlock(&rename_lock);
3044
3045 WARN_ON(!dentry1->d_inode);
3046 WARN_ON(!dentry2->d_inode);
3047 WARN_ON(IS_ROOT(dentry1));
3048 WARN_ON(IS_ROOT(dentry2));
3049
3050 __d_move(dentry1, dentry2, true);
3051
3052 write_sequnlock(&rename_lock);
3053}
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
3064{
3065 struct dentry *p;
3066
3067 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
3068 if (p->d_parent == p1)
3069 return p;
3070 }
3071 return NULL;
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083static int __d_unalias(struct inode *inode,
3084 struct dentry *dentry, struct dentry *alias)
3085{
3086 struct mutex *m1 = NULL;
3087 struct rw_semaphore *m2 = NULL;
3088 int ret = -ESTALE;
3089
3090
3091 if (alias->d_parent == dentry->d_parent)
3092 goto out_unalias;
3093
3094
3095 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
3096 goto out_err;
3097 m1 = &dentry->d_sb->s_vfs_rename_mutex;
3098 if (!inode_trylock_shared(alias->d_parent->d_inode))
3099 goto out_err;
3100 m2 = &alias->d_parent->d_inode->i_rwsem;
3101out_unalias:
3102 __d_move(alias, dentry, false);
3103 ret = 0;
3104out_err:
3105 if (m2)
3106 up_read(m2);
3107 if (m1)
3108 mutex_unlock(m1);
3109 return ret;
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3136{
3137 if (IS_ERR(inode))
3138 return ERR_CAST(inode);
3139
3140 BUG_ON(!d_unhashed(dentry));
3141
3142 if (!inode)
3143 goto out;
3144
3145 security_d_instantiate(dentry, inode);
3146 spin_lock(&inode->i_lock);
3147 if (S_ISDIR(inode->i_mode)) {
3148 struct dentry *new = __d_find_any_alias(inode);
3149 if (unlikely(new)) {
3150
3151 spin_unlock(&inode->i_lock);
3152 write_seqlock(&rename_lock);
3153 if (unlikely(d_ancestor(new, dentry))) {
3154 write_sequnlock(&rename_lock);
3155 dput(new);
3156 new = ERR_PTR(-ELOOP);
3157 pr_warn_ratelimited(
3158 "VFS: Lookup of '%s' in %s %s"
3159 " would have caused loop\n",
3160 dentry->d_name.name,
3161 inode->i_sb->s_type->name,
3162 inode->i_sb->s_id);
3163 } else if (!IS_ROOT(new)) {
3164 struct dentry *old_parent = dget(new->d_parent);
3165 int err = __d_unalias(inode, dentry, new);
3166 write_sequnlock(&rename_lock);
3167 if (err) {
3168 dput(new);
3169 new = ERR_PTR(err);
3170 }
3171 dput(old_parent);
3172 } else {
3173 __d_move(new, dentry, false);
3174 write_sequnlock(&rename_lock);
3175 }
3176 iput(inode);
3177 return new;
3178 }
3179 }
3180out:
3181 __d_add(dentry, inode);
3182 return NULL;
3183}
3184EXPORT_SYMBOL(d_splice_alias);
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3203{
3204 bool result;
3205 unsigned seq;
3206
3207 if (new_dentry == old_dentry)
3208 return true;
3209
3210 do {
3211
3212 seq = read_seqbegin(&rename_lock);
3213
3214
3215
3216
3217 rcu_read_lock();
3218 if (d_ancestor(old_dentry, new_dentry))
3219 result = true;
3220 else
3221 result = false;
3222 rcu_read_unlock();
3223 } while (read_seqretry(&rename_lock, seq));
3224
3225 return result;
3226}
3227EXPORT_SYMBOL(is_subdir);
3228
3229static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3230{
3231 struct dentry *root = data;
3232 if (dentry != root) {
3233 if (d_unhashed(dentry) || !dentry->d_inode)
3234 return D_WALK_SKIP;
3235
3236 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3237 dentry->d_flags |= DCACHE_GENOCIDE;
3238 dentry->d_lockref.count--;
3239 }
3240 }
3241 return D_WALK_CONTINUE;
3242}
3243
3244void d_genocide(struct dentry *parent)
3245{
3246 d_walk(parent, parent, d_genocide_kill);
3247}
3248
3249void d_mark_tmpfile(struct file *file, struct inode *inode)
3250{
3251 struct dentry *dentry = file->f_path.dentry;
3252
3253 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3254 !hlist_unhashed(&dentry->d_u.d_alias) ||
3255 !d_unlinked(dentry));
3256 spin_lock(&dentry->d_parent->d_lock);
3257 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3258 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3259 (unsigned long long)inode->i_ino);
3260 spin_unlock(&dentry->d_lock);
3261 spin_unlock(&dentry->d_parent->d_lock);
3262}
3263EXPORT_SYMBOL(d_mark_tmpfile);
3264
3265void d_tmpfile(struct file *file, struct inode *inode)
3266{
3267 struct dentry *dentry = file->f_path.dentry;
3268
3269 inode_dec_link_count(inode);
3270 d_mark_tmpfile(file, inode);
3271 d_instantiate(dentry, inode);
3272}
3273EXPORT_SYMBOL(d_tmpfile);
3274
3275static __initdata unsigned long dhash_entries;
3276static int __init set_dhash_entries(char *str)
3277{
3278 if (!str)
3279 return 0;
3280 dhash_entries = simple_strtoul(str, &str, 0);
3281 return 1;
3282}
3283__setup("dhash_entries=", set_dhash_entries);
3284
3285static void __init dcache_init_early(void)
3286{
3287
3288
3289
3290 if (hashdist)
3291 return;
3292
3293 dentry_hashtable =
3294 alloc_large_system_hash("Dentry cache",
3295 sizeof(struct hlist_bl_head),
3296 dhash_entries,
3297 13,
3298 HASH_EARLY | HASH_ZERO,
3299 &d_hash_shift,
3300 NULL,
3301 0,
3302 0);
3303 d_hash_shift = 32 - d_hash_shift;
3304}
3305
3306static void __init dcache_init(void)
3307{
3308
3309
3310
3311
3312
3313 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3314 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3315 d_iname);
3316
3317
3318 if (!hashdist)
3319 return;
3320
3321 dentry_hashtable =
3322 alloc_large_system_hash("Dentry cache",
3323 sizeof(struct hlist_bl_head),
3324 dhash_entries,
3325 13,
3326 HASH_ZERO,
3327 &d_hash_shift,
3328 NULL,
3329 0,
3330 0);
3331 d_hash_shift = 32 - d_hash_shift;
3332}
3333
3334
3335struct kmem_cache *names_cachep __ro_after_init;
3336EXPORT_SYMBOL(names_cachep);
3337
3338void __init vfs_caches_init_early(void)
3339{
3340 int i;
3341
3342 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3343 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3344
3345 dcache_init_early();
3346 inode_init_early();
3347}
3348
3349void __init vfs_caches_init(void)
3350{
3351 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3352 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3353
3354 dcache_init();
3355 inode_init();
3356 files_init();
3357 files_maxfiles_init();
3358 mnt_init();
3359 bdev_cache_init();
3360 chrdev_init();
3361}
3362