1
2
3
4
5
6
7
8
9
10
11#include <linux/syscalls.h>
12#include <linux/export.h>
13#include <linux/capability.h>
14#include <linux/mnt_namespace.h>
15#include <linux/namei.h>
16#include <linux/security.h>
17#include <linux/idr.h>
18#include <linux/acct.h>
19#include <linux/ramfs.h>
20#include <linux/fs_struct.h>
21#include <linux/fsnotify.h>
22#include <linux/uaccess.h>
23#include "pnode.h"
24#include "internal.h"
25
26#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
27#define HASH_SIZE (1UL << HASH_SHIFT)
28
29static int event;
30static DEFINE_IDA(mnt_id_ida);
31static DEFINE_IDA(mnt_group_ida);
32static DEFINE_SPINLOCK(mnt_id_lock);
33static int mnt_id_start = 0;
34static int mnt_group_start = 1;
35
36static struct list_head *mount_hashtable __read_mostly;
37static struct kmem_cache *mnt_cache __read_mostly;
38static struct rw_semaphore namespace_sem;
39
40
41struct kobject *fs_kobj;
42EXPORT_SYMBOL_GPL(fs_kobj);
43
44
45
46
47
48
49
50
51
52DEFINE_BRLOCK(vfsmount_lock);
53
54static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
55{
56 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
57 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
58 tmp = tmp + (tmp >> HASH_SHIFT);
59 return tmp & (HASH_SIZE - 1);
60}
61
62#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
63
64
65
66
67
68static int mnt_alloc_id(struct mount *mnt)
69{
70 int res;
71
72retry:
73 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
74 spin_lock(&mnt_id_lock);
75 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
76 if (!res)
77 mnt_id_start = mnt->mnt_id + 1;
78 spin_unlock(&mnt_id_lock);
79 if (res == -EAGAIN)
80 goto retry;
81
82 return res;
83}
84
85static void mnt_free_id(struct mount *mnt)
86{
87 int id = mnt->mnt_id;
88 spin_lock(&mnt_id_lock);
89 ida_remove(&mnt_id_ida, id);
90 if (mnt_id_start > id)
91 mnt_id_start = id;
92 spin_unlock(&mnt_id_lock);
93}
94
95
96
97
98
99
100static int mnt_alloc_group_id(struct mount *mnt)
101{
102 int res;
103
104 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
105 return -ENOMEM;
106
107 res = ida_get_new_above(&mnt_group_ida,
108 mnt_group_start,
109 &mnt->mnt_group_id);
110 if (!res)
111 mnt_group_start = mnt->mnt_group_id + 1;
112
113 return res;
114}
115
116
117
118
119void mnt_release_group_id(struct mount *mnt)
120{
121 int id = mnt->mnt_group_id;
122 ida_remove(&mnt_group_ida, id);
123 if (mnt_group_start > id)
124 mnt_group_start = id;
125 mnt->mnt_group_id = 0;
126}
127
128
129
130
131static inline void mnt_add_count(struct mount *mnt, int n)
132{
133#ifdef CONFIG_SMP
134 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
135#else
136 preempt_disable();
137 mnt->mnt_count += n;
138 preempt_enable();
139#endif
140}
141
142
143
144
145unsigned int mnt_get_count(struct mount *mnt)
146{
147#ifdef CONFIG_SMP
148 unsigned int count = 0;
149 int cpu;
150
151 for_each_possible_cpu(cpu) {
152 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
153 }
154
155 return count;
156#else
157 return mnt->mnt_count;
158#endif
159}
160
161static struct mount *alloc_vfsmnt(const char *name)
162{
163 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
164 if (mnt) {
165 int err;
166
167 err = mnt_alloc_id(mnt);
168 if (err)
169 goto out_free_cache;
170
171 if (name) {
172 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
173 if (!mnt->mnt_devname)
174 goto out_free_id;
175 }
176
177#ifdef CONFIG_SMP
178 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
179 if (!mnt->mnt_pcp)
180 goto out_free_devname;
181
182 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
183#else
184 mnt->mnt_count = 1;
185 mnt->mnt_writers = 0;
186#endif
187
188 INIT_LIST_HEAD(&mnt->mnt_hash);
189 INIT_LIST_HEAD(&mnt->mnt_child);
190 INIT_LIST_HEAD(&mnt->mnt_mounts);
191 INIT_LIST_HEAD(&mnt->mnt_list);
192 INIT_LIST_HEAD(&mnt->mnt_expire);
193 INIT_LIST_HEAD(&mnt->mnt_share);
194 INIT_LIST_HEAD(&mnt->mnt_slave_list);
195 INIT_LIST_HEAD(&mnt->mnt_slave);
196#ifdef CONFIG_FSNOTIFY
197 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
198#endif
199 }
200 return mnt;
201
202#ifdef CONFIG_SMP
203out_free_devname:
204 kfree(mnt->mnt_devname);
205#endif
206out_free_id:
207 mnt_free_id(mnt);
208out_free_cache:
209 kmem_cache_free(mnt_cache, mnt);
210 return NULL;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232int __mnt_is_readonly(struct vfsmount *mnt)
233{
234 if (mnt->mnt_flags & MNT_READONLY)
235 return 1;
236 if (mnt->mnt_sb->s_flags & MS_RDONLY)
237 return 1;
238 return 0;
239}
240EXPORT_SYMBOL_GPL(__mnt_is_readonly);
241
242static inline void mnt_inc_writers(struct mount *mnt)
243{
244#ifdef CONFIG_SMP
245 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
246#else
247 mnt->mnt_writers++;
248#endif
249}
250
251static inline void mnt_dec_writers(struct mount *mnt)
252{
253#ifdef CONFIG_SMP
254 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
255#else
256 mnt->mnt_writers--;
257#endif
258}
259
260static unsigned int mnt_get_writers(struct mount *mnt)
261{
262#ifdef CONFIG_SMP
263 unsigned int count = 0;
264 int cpu;
265
266 for_each_possible_cpu(cpu) {
267 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
268 }
269
270 return count;
271#else
272 return mnt->mnt_writers;
273#endif
274}
275
276static int mnt_is_readonly(struct vfsmount *mnt)
277{
278 if (mnt->mnt_sb->s_readonly_remount)
279 return 1;
280
281 smp_rmb();
282 return __mnt_is_readonly(mnt);
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303int mnt_want_write(struct vfsmount *m)
304{
305 struct mount *mnt = real_mount(m);
306 int ret = 0;
307
308 preempt_disable();
309 mnt_inc_writers(mnt);
310
311
312
313
314
315 smp_mb();
316 while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
317 cpu_relax();
318
319
320
321
322
323 smp_rmb();
324 if (mnt_is_readonly(m)) {
325 mnt_dec_writers(mnt);
326 ret = -EROFS;
327 }
328 preempt_enable();
329 return ret;
330}
331EXPORT_SYMBOL_GPL(mnt_want_write);
332
333
334
335
336
337
338
339
340
341
342
343
344
345int mnt_clone_write(struct vfsmount *mnt)
346{
347
348 if (__mnt_is_readonly(mnt))
349 return -EROFS;
350 preempt_disable();
351 mnt_inc_writers(real_mount(mnt));
352 preempt_enable();
353 return 0;
354}
355EXPORT_SYMBOL_GPL(mnt_clone_write);
356
357
358
359
360
361
362
363
364int mnt_want_write_file(struct file *file)
365{
366 struct inode *inode = file->f_dentry->d_inode;
367 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
368 return mnt_want_write(file->f_path.mnt);
369 else
370 return mnt_clone_write(file->f_path.mnt);
371}
372EXPORT_SYMBOL_GPL(mnt_want_write_file);
373
374
375
376
377
378
379
380
381
382void mnt_drop_write(struct vfsmount *mnt)
383{
384 preempt_disable();
385 mnt_dec_writers(real_mount(mnt));
386 preempt_enable();
387}
388EXPORT_SYMBOL_GPL(mnt_drop_write);
389
390void mnt_drop_write_file(struct file *file)
391{
392 mnt_drop_write(file->f_path.mnt);
393}
394EXPORT_SYMBOL(mnt_drop_write_file);
395
396static int mnt_make_readonly(struct mount *mnt)
397{
398 int ret = 0;
399
400 br_write_lock(vfsmount_lock);
401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
402
403
404
405
406 smp_mb();
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424 if (mnt_get_writers(mnt) > 0)
425 ret = -EBUSY;
426 else
427 mnt->mnt.mnt_flags |= MNT_READONLY;
428
429
430
431
432 smp_wmb();
433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
434 br_write_unlock(vfsmount_lock);
435 return ret;
436}
437
438static void __mnt_unmake_readonly(struct mount *mnt)
439{
440 br_write_lock(vfsmount_lock);
441 mnt->mnt.mnt_flags &= ~MNT_READONLY;
442 br_write_unlock(vfsmount_lock);
443}
444
445int sb_prepare_remount_readonly(struct super_block *sb)
446{
447 struct mount *mnt;
448 int err = 0;
449
450
451 if (atomic_long_read(&sb->s_remove_count))
452 return -EBUSY;
453
454 br_write_lock(vfsmount_lock);
455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
458 smp_mb();
459 if (mnt_get_writers(mnt) > 0) {
460 err = -EBUSY;
461 break;
462 }
463 }
464 }
465 if (!err && atomic_long_read(&sb->s_remove_count))
466 err = -EBUSY;
467
468 if (!err) {
469 sb->s_readonly_remount = 1;
470 smp_wmb();
471 }
472 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
475 }
476 br_write_unlock(vfsmount_lock);
477
478 return err;
479}
480
481static void free_vfsmnt(struct mount *mnt)
482{
483 kfree(mnt->mnt_devname);
484 mnt_free_id(mnt);
485#ifdef CONFIG_SMP
486 free_percpu(mnt->mnt_pcp);
487#endif
488 kmem_cache_free(mnt_cache, mnt);
489}
490
491
492
493
494
495
496struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
497 int dir)
498{
499 struct list_head *head = mount_hashtable + hash(mnt, dentry);
500 struct list_head *tmp = head;
501 struct mount *p, *found = NULL;
502
503 for (;;) {
504 tmp = dir ? tmp->next : tmp->prev;
505 p = NULL;
506 if (tmp == head)
507 break;
508 p = list_entry(tmp, struct mount, mnt_hash);
509 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
510 found = p;
511 break;
512 }
513 }
514 return found;
515}
516
517
518
519
520
521struct vfsmount *lookup_mnt(struct path *path)
522{
523 struct mount *child_mnt;
524
525 br_read_lock(vfsmount_lock);
526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
527 if (child_mnt) {
528 mnt_add_count(child_mnt, 1);
529 br_read_unlock(vfsmount_lock);
530 return &child_mnt->mnt;
531 } else {
532 br_read_unlock(vfsmount_lock);
533 return NULL;
534 }
535}
536
537static inline int check_mnt(struct mount *mnt)
538{
539 return mnt->mnt_ns == current->nsproxy->mnt_ns;
540}
541
542
543
544
545static void touch_mnt_namespace(struct mnt_namespace *ns)
546{
547 if (ns) {
548 ns->event = ++event;
549 wake_up_interruptible(&ns->poll);
550 }
551}
552
553
554
555
556static void __touch_mnt_namespace(struct mnt_namespace *ns)
557{
558 if (ns && ns->event != event) {
559 ns->event = event;
560 wake_up_interruptible(&ns->poll);
561 }
562}
563
564
565
566
567
568static void dentry_reset_mounted(struct dentry *dentry)
569{
570 unsigned u;
571
572 for (u = 0; u < HASH_SIZE; u++) {
573 struct mount *p;
574
575 list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
576 if (p->mnt_mountpoint == dentry)
577 return;
578 }
579 }
580 spin_lock(&dentry->d_lock);
581 dentry->d_flags &= ~DCACHE_MOUNTED;
582 spin_unlock(&dentry->d_lock);
583}
584
585
586
587
588static void detach_mnt(struct mount *mnt, struct path *old_path)
589{
590 old_path->dentry = mnt->mnt_mountpoint;
591 old_path->mnt = &mnt->mnt_parent->mnt;
592 mnt->mnt_parent = mnt;
593 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
594 list_del_init(&mnt->mnt_child);
595 list_del_init(&mnt->mnt_hash);
596 dentry_reset_mounted(old_path->dentry);
597}
598
599
600
601
602void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry,
603 struct mount *child_mnt)
604{
605 mnt_add_count(mnt, 1);
606 child_mnt->mnt_mountpoint = dget(dentry);
607 child_mnt->mnt_parent = mnt;
608 spin_lock(&dentry->d_lock);
609 dentry->d_flags |= DCACHE_MOUNTED;
610 spin_unlock(&dentry->d_lock);
611}
612
613
614
615
616static void attach_mnt(struct mount *mnt, struct path *path)
617{
618 mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt);
619 list_add_tail(&mnt->mnt_hash, mount_hashtable +
620 hash(path->mnt, path->dentry));
621 list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
622}
623
624static inline void __mnt_make_longterm(struct mount *mnt)
625{
626#ifdef CONFIG_SMP
627 atomic_inc(&mnt->mnt_longterm);
628#endif
629}
630
631
632static inline void __mnt_make_shortterm(struct mount *mnt)
633{
634#ifdef CONFIG_SMP
635 atomic_dec(&mnt->mnt_longterm);
636#endif
637}
638
639
640
641
642static void commit_tree(struct mount *mnt)
643{
644 struct mount *parent = mnt->mnt_parent;
645 struct mount *m;
646 LIST_HEAD(head);
647 struct mnt_namespace *n = parent->mnt_ns;
648
649 BUG_ON(parent == mnt);
650
651 list_add_tail(&head, &mnt->mnt_list);
652 list_for_each_entry(m, &head, mnt_list) {
653 m->mnt_ns = n;
654 __mnt_make_longterm(m);
655 }
656
657 list_splice(&head, n->list.prev);
658
659 list_add_tail(&mnt->mnt_hash, mount_hashtable +
660 hash(&parent->mnt, mnt->mnt_mountpoint));
661 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
662 touch_mnt_namespace(n);
663}
664
665static struct mount *next_mnt(struct mount *p, struct mount *root)
666{
667 struct list_head *next = p->mnt_mounts.next;
668 if (next == &p->mnt_mounts) {
669 while (1) {
670 if (p == root)
671 return NULL;
672 next = p->mnt_child.next;
673 if (next != &p->mnt_parent->mnt_mounts)
674 break;
675 p = p->mnt_parent;
676 }
677 }
678 return list_entry(next, struct mount, mnt_child);
679}
680
681static struct mount *skip_mnt_tree(struct mount *p)
682{
683 struct list_head *prev = p->mnt_mounts.prev;
684 while (prev != &p->mnt_mounts) {
685 p = list_entry(prev, struct mount, mnt_child);
686 prev = p->mnt_mounts.prev;
687 }
688 return p;
689}
690
691struct vfsmount *
692vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
693{
694 struct mount *mnt;
695 struct dentry *root;
696
697 if (!type)
698 return ERR_PTR(-ENODEV);
699
700 mnt = alloc_vfsmnt(name);
701 if (!mnt)
702 return ERR_PTR(-ENOMEM);
703
704 if (flags & MS_KERNMOUNT)
705 mnt->mnt.mnt_flags = MNT_INTERNAL;
706
707 root = mount_fs(type, flags, name, data);
708 if (IS_ERR(root)) {
709 free_vfsmnt(mnt);
710 return ERR_CAST(root);
711 }
712
713 mnt->mnt.mnt_root = root;
714 mnt->mnt.mnt_sb = root->d_sb;
715 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
716 mnt->mnt_parent = mnt;
717 br_write_lock(vfsmount_lock);
718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
719 br_write_unlock(vfsmount_lock);
720 return &mnt->mnt;
721}
722EXPORT_SYMBOL_GPL(vfs_kern_mount);
723
724static struct mount *clone_mnt(struct mount *old, struct dentry *root,
725 int flag)
726{
727 struct super_block *sb = old->mnt.mnt_sb;
728 struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
729
730 if (mnt) {
731 if (flag & (CL_SLAVE | CL_PRIVATE))
732 mnt->mnt_group_id = 0;
733 else
734 mnt->mnt_group_id = old->mnt_group_id;
735
736 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
737 int err = mnt_alloc_group_id(mnt);
738 if (err)
739 goto out_free;
740 }
741
742 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
743 atomic_inc(&sb->s_active);
744 mnt->mnt.mnt_sb = sb;
745 mnt->mnt.mnt_root = dget(root);
746 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
747 mnt->mnt_parent = mnt;
748 br_write_lock(vfsmount_lock);
749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
750 br_write_unlock(vfsmount_lock);
751
752 if (flag & CL_SLAVE) {
753 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
754 mnt->mnt_master = old;
755 CLEAR_MNT_SHARED(mnt);
756 } else if (!(flag & CL_PRIVATE)) {
757 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
758 list_add(&mnt->mnt_share, &old->mnt_share);
759 if (IS_MNT_SLAVE(old))
760 list_add(&mnt->mnt_slave, &old->mnt_slave);
761 mnt->mnt_master = old->mnt_master;
762 }
763 if (flag & CL_MAKE_SHARED)
764 set_mnt_shared(mnt);
765
766
767
768 if (flag & CL_EXPIRE) {
769 if (!list_empty(&old->mnt_expire))
770 list_add(&mnt->mnt_expire, &old->mnt_expire);
771 }
772 }
773 return mnt;
774
775 out_free:
776 free_vfsmnt(mnt);
777 return NULL;
778}
779
780static inline void mntfree(struct mount *mnt)
781{
782 struct vfsmount *m = &mnt->mnt;
783 struct super_block *sb = m->mnt_sb;
784
785
786
787
788
789
790
791
792
793
794
795 WARN_ON(mnt_get_writers(mnt));
796 fsnotify_vfsmount_delete(m);
797 dput(m->mnt_root);
798 free_vfsmnt(mnt);
799 deactivate_super(sb);
800}
801
802static void mntput_no_expire(struct mount *mnt)
803{
804put_again:
805#ifdef CONFIG_SMP
806 br_read_lock(vfsmount_lock);
807 if (likely(atomic_read(&mnt->mnt_longterm))) {
808 mnt_add_count(mnt, -1);
809 br_read_unlock(vfsmount_lock);
810 return;
811 }
812 br_read_unlock(vfsmount_lock);
813
814 br_write_lock(vfsmount_lock);
815 mnt_add_count(mnt, -1);
816 if (mnt_get_count(mnt)) {
817 br_write_unlock(vfsmount_lock);
818 return;
819 }
820#else
821 mnt_add_count(mnt, -1);
822 if (likely(mnt_get_count(mnt)))
823 return;
824 br_write_lock(vfsmount_lock);
825#endif
826 if (unlikely(mnt->mnt_pinned)) {
827 mnt_add_count(mnt, mnt->mnt_pinned + 1);
828 mnt->mnt_pinned = 0;
829 br_write_unlock(vfsmount_lock);
830 acct_auto_close_mnt(&mnt->mnt);
831 goto put_again;
832 }
833 list_del(&mnt->mnt_instance);
834 br_write_unlock(vfsmount_lock);
835 mntfree(mnt);
836}
837
838void mntput(struct vfsmount *mnt)
839{
840 if (mnt) {
841 struct mount *m = real_mount(mnt);
842
843 if (unlikely(m->mnt_expiry_mark))
844 m->mnt_expiry_mark = 0;
845 mntput_no_expire(m);
846 }
847}
848EXPORT_SYMBOL(mntput);
849
850struct vfsmount *mntget(struct vfsmount *mnt)
851{
852 if (mnt)
853 mnt_add_count(real_mount(mnt), 1);
854 return mnt;
855}
856EXPORT_SYMBOL(mntget);
857
858void mnt_pin(struct vfsmount *mnt)
859{
860 br_write_lock(vfsmount_lock);
861 real_mount(mnt)->mnt_pinned++;
862 br_write_unlock(vfsmount_lock);
863}
864EXPORT_SYMBOL(mnt_pin);
865
866void mnt_unpin(struct vfsmount *m)
867{
868 struct mount *mnt = real_mount(m);
869 br_write_lock(vfsmount_lock);
870 if (mnt->mnt_pinned) {
871 mnt_add_count(mnt, 1);
872 mnt->mnt_pinned--;
873 }
874 br_write_unlock(vfsmount_lock);
875}
876EXPORT_SYMBOL(mnt_unpin);
877
878static inline void mangle(struct seq_file *m, const char *s)
879{
880 seq_escape(m, s, " \t\n\\");
881}
882
883
884
885
886
887
888
889int generic_show_options(struct seq_file *m, struct dentry *root)
890{
891 const char *options;
892
893 rcu_read_lock();
894 options = rcu_dereference(root->d_sb->s_options);
895
896 if (options != NULL && options[0]) {
897 seq_putc(m, ',');
898 mangle(m, options);
899 }
900 rcu_read_unlock();
901
902 return 0;
903}
904EXPORT_SYMBOL(generic_show_options);
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919void save_mount_options(struct super_block *sb, char *options)
920{
921 BUG_ON(sb->s_options);
922 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
923}
924EXPORT_SYMBOL(save_mount_options);
925
926void replace_mount_options(struct super_block *sb, char *options)
927{
928 char *old = sb->s_options;
929 rcu_assign_pointer(sb->s_options, options);
930 if (old) {
931 synchronize_rcu();
932 kfree(old);
933 }
934}
935EXPORT_SYMBOL(replace_mount_options);
936
937#ifdef CONFIG_PROC_FS
938
939static void *m_start(struct seq_file *m, loff_t *pos)
940{
941 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
942
943 down_read(&namespace_sem);
944 return seq_list_start(&p->ns->list, *pos);
945}
946
947static void *m_next(struct seq_file *m, void *v, loff_t *pos)
948{
949 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
950
951 return seq_list_next(v, &p->ns->list, pos);
952}
953
954static void m_stop(struct seq_file *m, void *v)
955{
956 up_read(&namespace_sem);
957}
958
959static int m_show(struct seq_file *m, void *v)
960{
961 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
962 struct mount *r = list_entry(v, struct mount, mnt_list);
963 return p->show(m, &r->mnt);
964}
965
966const struct seq_operations mounts_op = {
967 .start = m_start,
968 .next = m_next,
969 .stop = m_stop,
970 .show = m_show,
971};
972#endif
973
974
975
976
977
978
979
980
981
982int may_umount_tree(struct vfsmount *m)
983{
984 struct mount *mnt = real_mount(m);
985 int actual_refs = 0;
986 int minimum_refs = 0;
987 struct mount *p;
988 BUG_ON(!m);
989
990
991 br_write_lock(vfsmount_lock);
992 for (p = mnt; p; p = next_mnt(p, mnt)) {
993 actual_refs += mnt_get_count(p);
994 minimum_refs += 2;
995 }
996 br_write_unlock(vfsmount_lock);
997
998 if (actual_refs > minimum_refs)
999 return 0;
1000
1001 return 1;
1002}
1003
1004EXPORT_SYMBOL(may_umount_tree);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019int may_umount(struct vfsmount *mnt)
1020{
1021 int ret = 1;
1022 down_read(&namespace_sem);
1023 br_write_lock(vfsmount_lock);
1024 if (propagate_mount_busy(real_mount(mnt), 2))
1025 ret = 0;
1026 br_write_unlock(vfsmount_lock);
1027 up_read(&namespace_sem);
1028 return ret;
1029}
1030
1031EXPORT_SYMBOL(may_umount);
1032
1033void release_mounts(struct list_head *head)
1034{
1035 struct mount *mnt;
1036 while (!list_empty(head)) {
1037 mnt = list_first_entry(head, struct mount, mnt_hash);
1038 list_del_init(&mnt->mnt_hash);
1039 if (mnt_has_parent(mnt)) {
1040 struct dentry *dentry;
1041 struct mount *m;
1042
1043 br_write_lock(vfsmount_lock);
1044 dentry = mnt->mnt_mountpoint;
1045 m = mnt->mnt_parent;
1046 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1047 mnt->mnt_parent = mnt;
1048 m->mnt_ghosts--;
1049 br_write_unlock(vfsmount_lock);
1050 dput(dentry);
1051 mntput(&m->mnt);
1052 }
1053 mntput(&mnt->mnt);
1054 }
1055}
1056
1057
1058
1059
1060
1061void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1062{
1063 LIST_HEAD(tmp_list);
1064 struct mount *p;
1065
1066 for (p = mnt; p; p = next_mnt(p, mnt))
1067 list_move(&p->mnt_hash, &tmp_list);
1068
1069 if (propagate)
1070 propagate_umount(&tmp_list);
1071
1072 list_for_each_entry(p, &tmp_list, mnt_hash) {
1073 list_del_init(&p->mnt_expire);
1074 list_del_init(&p->mnt_list);
1075 __touch_mnt_namespace(p->mnt_ns);
1076 p->mnt_ns = NULL;
1077 __mnt_make_shortterm(p);
1078 list_del_init(&p->mnt_child);
1079 if (mnt_has_parent(p)) {
1080 p->mnt_parent->mnt_ghosts++;
1081 dentry_reset_mounted(p->mnt_mountpoint);
1082 }
1083 change_mnt_propagation(p, MS_PRIVATE);
1084 }
1085 list_splice(&tmp_list, kill);
1086}
1087
1088static void shrink_submounts(struct mount *mnt, struct list_head *umounts);
1089
1090static int do_umount(struct mount *mnt, int flags)
1091{
1092 struct super_block *sb = mnt->mnt.mnt_sb;
1093 int retval;
1094 LIST_HEAD(umount_list);
1095
1096 retval = security_sb_umount(&mnt->mnt, flags);
1097 if (retval)
1098 return retval;
1099
1100
1101
1102
1103
1104
1105
1106 if (flags & MNT_EXPIRE) {
1107 if (&mnt->mnt == current->fs->root.mnt ||
1108 flags & (MNT_FORCE | MNT_DETACH))
1109 return -EINVAL;
1110
1111
1112
1113
1114
1115 br_write_lock(vfsmount_lock);
1116 if (mnt_get_count(mnt) != 2) {
1117 br_write_unlock(vfsmount_lock);
1118 return -EBUSY;
1119 }
1120 br_write_unlock(vfsmount_lock);
1121
1122 if (!xchg(&mnt->mnt_expiry_mark, 1))
1123 return -EAGAIN;
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1137 sb->s_op->umount_begin(sb);
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1150
1151
1152
1153
1154 down_write(&sb->s_umount);
1155 if (!(sb->s_flags & MS_RDONLY))
1156 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1157 up_write(&sb->s_umount);
1158 return retval;
1159 }
1160
1161 down_write(&namespace_sem);
1162 br_write_lock(vfsmount_lock);
1163 event++;
1164
1165 if (!(flags & MNT_DETACH))
1166 shrink_submounts(mnt, &umount_list);
1167
1168 retval = -EBUSY;
1169 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1170 if (!list_empty(&mnt->mnt_list))
1171 umount_tree(mnt, 1, &umount_list);
1172 retval = 0;
1173 }
1174 br_write_unlock(vfsmount_lock);
1175 up_write(&namespace_sem);
1176 release_mounts(&umount_list);
1177 return retval;
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1189{
1190 struct path path;
1191 struct mount *mnt;
1192 int retval;
1193 int lookup_flags = 0;
1194
1195 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1196 return -EINVAL;
1197
1198 if (!(flags & UMOUNT_NOFOLLOW))
1199 lookup_flags |= LOOKUP_FOLLOW;
1200
1201 retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1202 if (retval)
1203 goto out;
1204 mnt = real_mount(path.mnt);
1205 retval = -EINVAL;
1206 if (path.dentry != path.mnt->mnt_root)
1207 goto dput_and_out;
1208 if (!check_mnt(mnt))
1209 goto dput_and_out;
1210
1211 retval = -EPERM;
1212 if (!capable(CAP_SYS_ADMIN))
1213 goto dput_and_out;
1214
1215 retval = do_umount(mnt, flags);
1216dput_and_out:
1217
1218 dput(path.dentry);
1219 mntput_no_expire(mnt);
1220out:
1221 return retval;
1222}
1223
1224#ifdef __ARCH_WANT_SYS_OLDUMOUNT
1225
1226
1227
1228
1229SYSCALL_DEFINE1(oldumount, char __user *, name)
1230{
1231 return sys_umount(name, 0);
1232}
1233
1234#endif
1235
1236static int mount_is_safe(struct path *path)
1237{
1238 if (capable(CAP_SYS_ADMIN))
1239 return 0;
1240 return -EPERM;
1241#ifdef notyet
1242 if (S_ISLNK(path->dentry->d_inode->i_mode))
1243 return -EPERM;
1244 if (path->dentry->d_inode->i_mode & S_ISVTX) {
1245 if (current_uid() != path->dentry->d_inode->i_uid)
1246 return -EPERM;
1247 }
1248 if (inode_permission(path->dentry->d_inode, MAY_WRITE))
1249 return -EPERM;
1250 return 0;
1251#endif
1252}
1253
1254struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1255 int flag)
1256{
1257 struct mount *res, *p, *q, *r;
1258 struct path path;
1259
1260 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1261 return NULL;
1262
1263 res = q = clone_mnt(mnt, dentry, flag);
1264 if (!q)
1265 goto Enomem;
1266 q->mnt_mountpoint = mnt->mnt_mountpoint;
1267
1268 p = mnt;
1269 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1270 struct mount *s;
1271 if (!is_subdir(r->mnt_mountpoint, dentry))
1272 continue;
1273
1274 for (s = r; s; s = next_mnt(s, r)) {
1275 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1276 s = skip_mnt_tree(s);
1277 continue;
1278 }
1279 while (p != s->mnt_parent) {
1280 p = p->mnt_parent;
1281 q = q->mnt_parent;
1282 }
1283 p = s;
1284 path.mnt = &q->mnt;
1285 path.dentry = p->mnt_mountpoint;
1286 q = clone_mnt(p, p->mnt.mnt_root, flag);
1287 if (!q)
1288 goto Enomem;
1289 br_write_lock(vfsmount_lock);
1290 list_add_tail(&q->mnt_list, &res->mnt_list);
1291 attach_mnt(q, &path);
1292 br_write_unlock(vfsmount_lock);
1293 }
1294 }
1295 return res;
1296Enomem:
1297 if (res) {
1298 LIST_HEAD(umount_list);
1299 br_write_lock(vfsmount_lock);
1300 umount_tree(res, 0, &umount_list);
1301 br_write_unlock(vfsmount_lock);
1302 release_mounts(&umount_list);
1303 }
1304 return NULL;
1305}
1306
1307struct vfsmount *collect_mounts(struct path *path)
1308{
1309 struct mount *tree;
1310 down_write(&namespace_sem);
1311 tree = copy_tree(real_mount(path->mnt), path->dentry,
1312 CL_COPY_ALL | CL_PRIVATE);
1313 up_write(&namespace_sem);
1314 return tree ? &tree->mnt : NULL;
1315}
1316
1317void drop_collected_mounts(struct vfsmount *mnt)
1318{
1319 LIST_HEAD(umount_list);
1320 down_write(&namespace_sem);
1321 br_write_lock(vfsmount_lock);
1322 umount_tree(real_mount(mnt), 0, &umount_list);
1323 br_write_unlock(vfsmount_lock);
1324 up_write(&namespace_sem);
1325 release_mounts(&umount_list);
1326}
1327
1328int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1329 struct vfsmount *root)
1330{
1331 struct mount *mnt;
1332 int res = f(root, arg);
1333 if (res)
1334 return res;
1335 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1336 res = f(&mnt->mnt, arg);
1337 if (res)
1338 return res;
1339 }
1340 return 0;
1341}
1342
1343static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1344{
1345 struct mount *p;
1346
1347 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1348 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1349 mnt_release_group_id(p);
1350 }
1351}
1352
1353static int invent_group_ids(struct mount *mnt, bool recurse)
1354{
1355 struct mount *p;
1356
1357 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1358 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1359 int err = mnt_alloc_group_id(p);
1360 if (err) {
1361 cleanup_group_ids(mnt, p);
1362 return err;
1363 }
1364 }
1365 }
1366
1367 return 0;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static int attach_recursive_mnt(struct mount *source_mnt,
1434 struct path *path, struct path *parent_path)
1435{
1436 LIST_HEAD(tree_list);
1437 struct mount *dest_mnt = real_mount(path->mnt);
1438 struct dentry *dest_dentry = path->dentry;
1439 struct mount *child, *p;
1440 int err;
1441
1442 if (IS_MNT_SHARED(dest_mnt)) {
1443 err = invent_group_ids(source_mnt, true);
1444 if (err)
1445 goto out;
1446 }
1447 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1448 if (err)
1449 goto out_cleanup_ids;
1450
1451 br_write_lock(vfsmount_lock);
1452
1453 if (IS_MNT_SHARED(dest_mnt)) {
1454 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1455 set_mnt_shared(p);
1456 }
1457 if (parent_path) {
1458 detach_mnt(source_mnt, parent_path);
1459 attach_mnt(source_mnt, path);
1460 touch_mnt_namespace(source_mnt->mnt_ns);
1461 } else {
1462 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1463 commit_tree(source_mnt);
1464 }
1465
1466 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1467 list_del_init(&child->mnt_hash);
1468 commit_tree(child);
1469 }
1470 br_write_unlock(vfsmount_lock);
1471
1472 return 0;
1473
1474 out_cleanup_ids:
1475 if (IS_MNT_SHARED(dest_mnt))
1476 cleanup_group_ids(source_mnt, NULL);
1477 out:
1478 return err;
1479}
1480
1481static int lock_mount(struct path *path)
1482{
1483 struct vfsmount *mnt;
1484retry:
1485 mutex_lock(&path->dentry->d_inode->i_mutex);
1486 if (unlikely(cant_mount(path->dentry))) {
1487 mutex_unlock(&path->dentry->d_inode->i_mutex);
1488 return -ENOENT;
1489 }
1490 down_write(&namespace_sem);
1491 mnt = lookup_mnt(path);
1492 if (likely(!mnt))
1493 return 0;
1494 up_write(&namespace_sem);
1495 mutex_unlock(&path->dentry->d_inode->i_mutex);
1496 path_put(path);
1497 path->mnt = mnt;
1498 path->dentry = dget(mnt->mnt_root);
1499 goto retry;
1500}
1501
1502static void unlock_mount(struct path *path)
1503{
1504 up_write(&namespace_sem);
1505 mutex_unlock(&path->dentry->d_inode->i_mutex);
1506}
1507
1508static int graft_tree(struct mount *mnt, struct path *path)
1509{
1510 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1511 return -EINVAL;
1512
1513 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1514 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
1515 return -ENOTDIR;
1516
1517 if (d_unlinked(path->dentry))
1518 return -ENOENT;
1519
1520 return attach_recursive_mnt(mnt, path, NULL);
1521}
1522
1523
1524
1525
1526
1527static int flags_to_propagation_type(int flags)
1528{
1529 int type = flags & ~(MS_REC | MS_SILENT);
1530
1531
1532 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1533 return 0;
1534
1535 if (!is_power_of_2(type))
1536 return 0;
1537 return type;
1538}
1539
1540
1541
1542
1543static int do_change_type(struct path *path, int flag)
1544{
1545 struct mount *m;
1546 struct mount *mnt = real_mount(path->mnt);
1547 int recurse = flag & MS_REC;
1548 int type;
1549 int err = 0;
1550
1551 if (!capable(CAP_SYS_ADMIN))
1552 return -EPERM;
1553
1554 if (path->dentry != path->mnt->mnt_root)
1555 return -EINVAL;
1556
1557 type = flags_to_propagation_type(flag);
1558 if (!type)
1559 return -EINVAL;
1560
1561 down_write(&namespace_sem);
1562 if (type == MS_SHARED) {
1563 err = invent_group_ids(mnt, recurse);
1564 if (err)
1565 goto out_unlock;
1566 }
1567
1568 br_write_lock(vfsmount_lock);
1569 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1570 change_mnt_propagation(m, type);
1571 br_write_unlock(vfsmount_lock);
1572
1573 out_unlock:
1574 up_write(&namespace_sem);
1575 return err;
1576}
1577
1578
1579
1580
1581static int do_loopback(struct path *path, char *old_name,
1582 int recurse)
1583{
1584 LIST_HEAD(umount_list);
1585 struct path old_path;
1586 struct mount *mnt = NULL, *old;
1587 int err = mount_is_safe(path);
1588 if (err)
1589 return err;
1590 if (!old_name || !*old_name)
1591 return -EINVAL;
1592 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1593 if (err)
1594 return err;
1595
1596 err = lock_mount(path);
1597 if (err)
1598 goto out;
1599
1600 old = real_mount(old_path.mnt);
1601
1602 err = -EINVAL;
1603 if (IS_MNT_UNBINDABLE(old))
1604 goto out2;
1605
1606 if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
1607 goto out2;
1608
1609 err = -ENOMEM;
1610 if (recurse)
1611 mnt = copy_tree(old, old_path.dentry, 0);
1612 else
1613 mnt = clone_mnt(old, old_path.dentry, 0);
1614
1615 if (!mnt)
1616 goto out2;
1617
1618 err = graft_tree(mnt, path);
1619 if (err) {
1620 br_write_lock(vfsmount_lock);
1621 umount_tree(mnt, 0, &umount_list);
1622 br_write_unlock(vfsmount_lock);
1623 }
1624out2:
1625 unlock_mount(path);
1626 release_mounts(&umount_list);
1627out:
1628 path_put(&old_path);
1629 return err;
1630}
1631
1632static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1633{
1634 int error = 0;
1635 int readonly_request = 0;
1636
1637 if (ms_flags & MS_RDONLY)
1638 readonly_request = 1;
1639 if (readonly_request == __mnt_is_readonly(mnt))
1640 return 0;
1641
1642 if (readonly_request)
1643 error = mnt_make_readonly(real_mount(mnt));
1644 else
1645 __mnt_unmake_readonly(real_mount(mnt));
1646 return error;
1647}
1648
1649
1650
1651
1652
1653
1654static int do_remount(struct path *path, int flags, int mnt_flags,
1655 void *data)
1656{
1657 int err;
1658 struct super_block *sb = path->mnt->mnt_sb;
1659 struct mount *mnt = real_mount(path->mnt);
1660
1661 if (!capable(CAP_SYS_ADMIN))
1662 return -EPERM;
1663
1664 if (!check_mnt(mnt))
1665 return -EINVAL;
1666
1667 if (path->dentry != path->mnt->mnt_root)
1668 return -EINVAL;
1669
1670 err = security_sb_remount(sb, data);
1671 if (err)
1672 return err;
1673
1674 down_write(&sb->s_umount);
1675 if (flags & MS_BIND)
1676 err = change_mount_flags(path->mnt, flags);
1677 else
1678 err = do_remount_sb(sb, flags, data, 0);
1679 if (!err) {
1680 br_write_lock(vfsmount_lock);
1681 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1682 mnt->mnt.mnt_flags = mnt_flags;
1683 br_write_unlock(vfsmount_lock);
1684 }
1685 up_write(&sb->s_umount);
1686 if (!err) {
1687 br_write_lock(vfsmount_lock);
1688 touch_mnt_namespace(mnt->mnt_ns);
1689 br_write_unlock(vfsmount_lock);
1690 }
1691 return err;
1692}
1693
1694static inline int tree_contains_unbindable(struct mount *mnt)
1695{
1696 struct mount *p;
1697 for (p = mnt; p; p = next_mnt(p, mnt)) {
1698 if (IS_MNT_UNBINDABLE(p))
1699 return 1;
1700 }
1701 return 0;
1702}
1703
1704static int do_move_mount(struct path *path, char *old_name)
1705{
1706 struct path old_path, parent_path;
1707 struct mount *p;
1708 struct mount *old;
1709 int err = 0;
1710 if (!capable(CAP_SYS_ADMIN))
1711 return -EPERM;
1712 if (!old_name || !*old_name)
1713 return -EINVAL;
1714 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1715 if (err)
1716 return err;
1717
1718 err = lock_mount(path);
1719 if (err < 0)
1720 goto out;
1721
1722 old = real_mount(old_path.mnt);
1723 p = real_mount(path->mnt);
1724
1725 err = -EINVAL;
1726 if (!check_mnt(p) || !check_mnt(old))
1727 goto out1;
1728
1729 if (d_unlinked(path->dentry))
1730 goto out1;
1731
1732 err = -EINVAL;
1733 if (old_path.dentry != old_path.mnt->mnt_root)
1734 goto out1;
1735
1736 if (!mnt_has_parent(old))
1737 goto out1;
1738
1739 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1740 S_ISDIR(old_path.dentry->d_inode->i_mode))
1741 goto out1;
1742
1743
1744
1745 if (IS_MNT_SHARED(old->mnt_parent))
1746 goto out1;
1747
1748
1749
1750
1751 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
1752 goto out1;
1753 err = -ELOOP;
1754 for (; mnt_has_parent(p); p = p->mnt_parent)
1755 if (p == old)
1756 goto out1;
1757
1758 err = attach_recursive_mnt(old, path, &parent_path);
1759 if (err)
1760 goto out1;
1761
1762
1763
1764 list_del_init(&old->mnt_expire);
1765out1:
1766 unlock_mount(path);
1767out:
1768 if (!err)
1769 path_put(&parent_path);
1770 path_put(&old_path);
1771 return err;
1772}
1773
1774static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
1775{
1776 int err;
1777 const char *subtype = strchr(fstype, '.');
1778 if (subtype) {
1779 subtype++;
1780 err = -EINVAL;
1781 if (!subtype[0])
1782 goto err;
1783 } else
1784 subtype = "";
1785
1786 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
1787 err = -ENOMEM;
1788 if (!mnt->mnt_sb->s_subtype)
1789 goto err;
1790 return mnt;
1791
1792 err:
1793 mntput(mnt);
1794 return ERR_PTR(err);
1795}
1796
1797static struct vfsmount *
1798do_kern_mount(const char *fstype, int flags, const char *name, void *data)
1799{
1800 struct file_system_type *type = get_fs_type(fstype);
1801 struct vfsmount *mnt;
1802 if (!type)
1803 return ERR_PTR(-ENODEV);
1804 mnt = vfs_kern_mount(type, flags, name, data);
1805 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
1806 !mnt->mnt_sb->s_subtype)
1807 mnt = fs_set_subtype(mnt, fstype);
1808 put_filesystem(type);
1809 return mnt;
1810}
1811
1812
1813
1814
1815static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
1816{
1817 int err;
1818
1819 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
1820
1821 err = lock_mount(path);
1822 if (err)
1823 return err;
1824
1825 err = -EINVAL;
1826 if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
1827 goto unlock;
1828
1829
1830 err = -EBUSY;
1831 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
1832 path->mnt->mnt_root == path->dentry)
1833 goto unlock;
1834
1835 err = -EINVAL;
1836 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
1837 goto unlock;
1838
1839 newmnt->mnt.mnt_flags = mnt_flags;
1840 err = graft_tree(newmnt, path);
1841
1842unlock:
1843 unlock_mount(path);
1844 return err;
1845}
1846
1847
1848
1849
1850
1851static int do_new_mount(struct path *path, char *type, int flags,
1852 int mnt_flags, char *name, void *data)
1853{
1854 struct vfsmount *mnt;
1855 int err;
1856
1857 if (!type)
1858 return -EINVAL;
1859
1860
1861 if (!capable(CAP_SYS_ADMIN))
1862 return -EPERM;
1863
1864 mnt = do_kern_mount(type, flags, name, data);
1865 if (IS_ERR(mnt))
1866 return PTR_ERR(mnt);
1867
1868 err = do_add_mount(real_mount(mnt), path, mnt_flags);
1869 if (err)
1870 mntput(mnt);
1871 return err;
1872}
1873
1874int finish_automount(struct vfsmount *m, struct path *path)
1875{
1876 struct mount *mnt = real_mount(m);
1877 int err;
1878
1879
1880
1881 BUG_ON(mnt_get_count(mnt) < 2);
1882
1883 if (m->mnt_sb == path->mnt->mnt_sb &&
1884 m->mnt_root == path->dentry) {
1885 err = -ELOOP;
1886 goto fail;
1887 }
1888
1889 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
1890 if (!err)
1891 return 0;
1892fail:
1893
1894 if (!list_empty(&mnt->mnt_expire)) {
1895 down_write(&namespace_sem);
1896 br_write_lock(vfsmount_lock);
1897 list_del_init(&mnt->mnt_expire);
1898 br_write_unlock(vfsmount_lock);
1899 up_write(&namespace_sem);
1900 }
1901 mntput(m);
1902 mntput(m);
1903 return err;
1904}
1905
1906
1907
1908
1909
1910
1911void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
1912{
1913 down_write(&namespace_sem);
1914 br_write_lock(vfsmount_lock);
1915
1916 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
1917
1918 br_write_unlock(vfsmount_lock);
1919 up_write(&namespace_sem);
1920}
1921EXPORT_SYMBOL(mnt_set_expiry);
1922
1923
1924
1925
1926
1927
1928void mark_mounts_for_expiry(struct list_head *mounts)
1929{
1930 struct mount *mnt, *next;
1931 LIST_HEAD(graveyard);
1932 LIST_HEAD(umounts);
1933
1934 if (list_empty(mounts))
1935 return;
1936
1937 down_write(&namespace_sem);
1938 br_write_lock(vfsmount_lock);
1939
1940
1941
1942
1943
1944
1945
1946 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1947 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
1948 propagate_mount_busy(mnt, 1))
1949 continue;
1950 list_move(&mnt->mnt_expire, &graveyard);
1951 }
1952 while (!list_empty(&graveyard)) {
1953 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
1954 touch_mnt_namespace(mnt->mnt_ns);
1955 umount_tree(mnt, 1, &umounts);
1956 }
1957 br_write_unlock(vfsmount_lock);
1958 up_write(&namespace_sem);
1959
1960 release_mounts(&umounts);
1961}
1962
1963EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1964
1965
1966
1967
1968
1969
1970
1971static int select_submounts(struct mount *parent, struct list_head *graveyard)
1972{
1973 struct mount *this_parent = parent;
1974 struct list_head *next;
1975 int found = 0;
1976
1977repeat:
1978 next = this_parent->mnt_mounts.next;
1979resume:
1980 while (next != &this_parent->mnt_mounts) {
1981 struct list_head *tmp = next;
1982 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
1983
1984 next = tmp->next;
1985 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
1986 continue;
1987
1988
1989
1990 if (!list_empty(&mnt->mnt_mounts)) {
1991 this_parent = mnt;
1992 goto repeat;
1993 }
1994
1995 if (!propagate_mount_busy(mnt, 1)) {
1996 list_move_tail(&mnt->mnt_expire, graveyard);
1997 found++;
1998 }
1999 }
2000
2001
2002
2003 if (this_parent != parent) {
2004 next = this_parent->mnt_child.next;
2005 this_parent = this_parent->mnt_parent;
2006 goto resume;
2007 }
2008 return found;
2009}
2010
2011
2012
2013
2014
2015
2016
2017static void shrink_submounts(struct mount *mnt, struct list_head *umounts)
2018{
2019 LIST_HEAD(graveyard);
2020 struct mount *m;
2021
2022
2023 while (select_submounts(mnt, &graveyard)) {
2024 while (!list_empty(&graveyard)) {
2025 m = list_first_entry(&graveyard, struct mount,
2026 mnt_expire);
2027 touch_mnt_namespace(m->mnt_ns);
2028 umount_tree(m, 1, umounts);
2029 }
2030 }
2031}
2032
2033
2034
2035
2036
2037
2038
2039static long exact_copy_from_user(void *to, const void __user * from,
2040 unsigned long n)
2041{
2042 char *t = to;
2043 const char __user *f = from;
2044 char c;
2045
2046 if (!access_ok(VERIFY_READ, from, n))
2047 return n;
2048
2049 while (n) {
2050 if (__get_user(c, f)) {
2051 memset(t, 0, n);
2052 break;
2053 }
2054 *t++ = c;
2055 f++;
2056 n--;
2057 }
2058 return n;
2059}
2060
2061int copy_mount_options(const void __user * data, unsigned long *where)
2062{
2063 int i;
2064 unsigned long page;
2065 unsigned long size;
2066
2067 *where = 0;
2068 if (!data)
2069 return 0;
2070
2071 if (!(page = __get_free_page(GFP_KERNEL)))
2072 return -ENOMEM;
2073
2074
2075
2076
2077
2078
2079 size = TASK_SIZE - (unsigned long)data;
2080 if (size > PAGE_SIZE)
2081 size = PAGE_SIZE;
2082
2083 i = size - exact_copy_from_user((void *)page, data, size);
2084 if (!i) {
2085 free_page(page);
2086 return -EFAULT;
2087 }
2088 if (i != PAGE_SIZE)
2089 memset((char *)page + i, 0, PAGE_SIZE - i);
2090 *where = page;
2091 return 0;
2092}
2093
2094int copy_mount_string(const void __user *data, char **where)
2095{
2096 char *tmp;
2097
2098 if (!data) {
2099 *where = NULL;
2100 return 0;
2101 }
2102
2103 tmp = strndup_user(data, PAGE_SIZE);
2104 if (IS_ERR(tmp))
2105 return PTR_ERR(tmp);
2106
2107 *where = tmp;
2108 return 0;
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125long do_mount(char *dev_name, char *dir_name, char *type_page,
2126 unsigned long flags, void *data_page)
2127{
2128 struct path path;
2129 int retval = 0;
2130 int mnt_flags = 0;
2131
2132
2133 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2134 flags &= ~MS_MGC_MSK;
2135
2136
2137
2138 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
2139 return -EINVAL;
2140
2141 if (data_page)
2142 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2143
2144
2145 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
2146 if (retval)
2147 return retval;
2148
2149 retval = security_sb_mount(dev_name, &path,
2150 type_page, flags, data_page);
2151 if (retval)
2152 goto dput_out;
2153
2154
2155 if (!(flags & MS_NOATIME))
2156 mnt_flags |= MNT_RELATIME;
2157
2158
2159 if (flags & MS_NOSUID)
2160 mnt_flags |= MNT_NOSUID;
2161 if (flags & MS_NODEV)
2162 mnt_flags |= MNT_NODEV;
2163 if (flags & MS_NOEXEC)
2164 mnt_flags |= MNT_NOEXEC;
2165 if (flags & MS_NOATIME)
2166 mnt_flags |= MNT_NOATIME;
2167 if (flags & MS_NODIRATIME)
2168 mnt_flags |= MNT_NODIRATIME;
2169 if (flags & MS_STRICTATIME)
2170 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2171 if (flags & MS_RDONLY)
2172 mnt_flags |= MNT_READONLY;
2173
2174 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2175 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2176 MS_STRICTATIME);
2177
2178 if (flags & MS_REMOUNT)
2179 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2180 data_page);
2181 else if (flags & MS_BIND)
2182 retval = do_loopback(&path, dev_name, flags & MS_REC);
2183 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2184 retval = do_change_type(&path, flags);
2185 else if (flags & MS_MOVE)
2186 retval = do_move_mount(&path, dev_name);
2187 else
2188 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2189 dev_name, data_page);
2190dput_out:
2191 path_put(&path);
2192 return retval;
2193}
2194
2195static struct mnt_namespace *alloc_mnt_ns(void)
2196{
2197 struct mnt_namespace *new_ns;
2198
2199 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2200 if (!new_ns)
2201 return ERR_PTR(-ENOMEM);
2202 atomic_set(&new_ns->count, 1);
2203 new_ns->root = NULL;
2204 INIT_LIST_HEAD(&new_ns->list);
2205 init_waitqueue_head(&new_ns->poll);
2206 new_ns->event = 0;
2207 return new_ns;
2208}
2209
2210void mnt_make_longterm(struct vfsmount *mnt)
2211{
2212 __mnt_make_longterm(real_mount(mnt));
2213}
2214
2215void mnt_make_shortterm(struct vfsmount *m)
2216{
2217#ifdef CONFIG_SMP
2218 struct mount *mnt = real_mount(m);
2219 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
2220 return;
2221 br_write_lock(vfsmount_lock);
2222 atomic_dec(&mnt->mnt_longterm);
2223 br_write_unlock(vfsmount_lock);
2224#endif
2225}
2226
2227
2228
2229
2230
2231static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2232 struct fs_struct *fs)
2233{
2234 struct mnt_namespace *new_ns;
2235 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2236 struct mount *p, *q;
2237 struct mount *old = mnt_ns->root;
2238 struct mount *new;
2239
2240 new_ns = alloc_mnt_ns();
2241 if (IS_ERR(new_ns))
2242 return new_ns;
2243
2244 down_write(&namespace_sem);
2245
2246 new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
2247 if (!new) {
2248 up_write(&namespace_sem);
2249 kfree(new_ns);
2250 return ERR_PTR(-ENOMEM);
2251 }
2252 new_ns->root = new;
2253 br_write_lock(vfsmount_lock);
2254 list_add_tail(&new_ns->list, &new->mnt_list);
2255 br_write_unlock(vfsmount_lock);
2256
2257
2258
2259
2260
2261
2262 p = old;
2263 q = new;
2264 while (p) {
2265 q->mnt_ns = new_ns;
2266 __mnt_make_longterm(q);
2267 if (fs) {
2268 if (&p->mnt == fs->root.mnt) {
2269 fs->root.mnt = mntget(&q->mnt);
2270 __mnt_make_longterm(q);
2271 mnt_make_shortterm(&p->mnt);
2272 rootmnt = &p->mnt;
2273 }
2274 if (&p->mnt == fs->pwd.mnt) {
2275 fs->pwd.mnt = mntget(&q->mnt);
2276 __mnt_make_longterm(q);
2277 mnt_make_shortterm(&p->mnt);
2278 pwdmnt = &p->mnt;
2279 }
2280 }
2281 p = next_mnt(p, old);
2282 q = next_mnt(q, new);
2283 }
2284 up_write(&namespace_sem);
2285
2286 if (rootmnt)
2287 mntput(rootmnt);
2288 if (pwdmnt)
2289 mntput(pwdmnt);
2290
2291 return new_ns;
2292}
2293
2294struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2295 struct fs_struct *new_fs)
2296{
2297 struct mnt_namespace *new_ns;
2298
2299 BUG_ON(!ns);
2300 get_mnt_ns(ns);
2301
2302 if (!(flags & CLONE_NEWNS))
2303 return ns;
2304
2305 new_ns = dup_mnt_ns(ns, new_fs);
2306
2307 put_mnt_ns(ns);
2308 return new_ns;
2309}
2310
2311
2312
2313
2314
2315static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2316{
2317 struct mnt_namespace *new_ns = alloc_mnt_ns();
2318 if (!IS_ERR(new_ns)) {
2319 struct mount *mnt = real_mount(m);
2320 mnt->mnt_ns = new_ns;
2321 __mnt_make_longterm(mnt);
2322 new_ns->root = mnt;
2323 list_add(&new_ns->list, &mnt->mnt_list);
2324 } else {
2325 mntput(m);
2326 }
2327 return new_ns;
2328}
2329
2330struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2331{
2332 struct mnt_namespace *ns;
2333 struct super_block *s;
2334 struct path path;
2335 int err;
2336
2337 ns = create_mnt_ns(mnt);
2338 if (IS_ERR(ns))
2339 return ERR_CAST(ns);
2340
2341 err = vfs_path_lookup(mnt->mnt_root, mnt,
2342 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2343
2344 put_mnt_ns(ns);
2345
2346 if (err)
2347 return ERR_PTR(err);
2348
2349
2350 s = path.mnt->mnt_sb;
2351 atomic_inc(&s->s_active);
2352 mntput(path.mnt);
2353
2354 down_write(&s->s_umount);
2355
2356 return path.dentry;
2357}
2358EXPORT_SYMBOL(mount_subtree);
2359
2360SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2361 char __user *, type, unsigned long, flags, void __user *, data)
2362{
2363 int ret;
2364 char *kernel_type;
2365 char *kernel_dir;
2366 char *kernel_dev;
2367 unsigned long data_page;
2368
2369 ret = copy_mount_string(type, &kernel_type);
2370 if (ret < 0)
2371 goto out_type;
2372
2373 kernel_dir = getname(dir_name);
2374 if (IS_ERR(kernel_dir)) {
2375 ret = PTR_ERR(kernel_dir);
2376 goto out_dir;
2377 }
2378
2379 ret = copy_mount_string(dev_name, &kernel_dev);
2380 if (ret < 0)
2381 goto out_dev;
2382
2383 ret = copy_mount_options(data, &data_page);
2384 if (ret < 0)
2385 goto out_data;
2386
2387 ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
2388 (void *) data_page);
2389
2390 free_page(data_page);
2391out_data:
2392 kfree(kernel_dev);
2393out_dev:
2394 putname(kernel_dir);
2395out_dir:
2396 kfree(kernel_type);
2397out_type:
2398 return ret;
2399}
2400
2401
2402
2403
2404
2405
2406bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2407 const struct path *root)
2408{
2409 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2410 dentry = mnt->mnt_mountpoint;
2411 mnt = mnt->mnt_parent;
2412 }
2413 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2414}
2415
2416int path_is_under(struct path *path1, struct path *path2)
2417{
2418 int res;
2419 br_read_lock(vfsmount_lock);
2420 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2421 br_read_unlock(vfsmount_lock);
2422 return res;
2423}
2424EXPORT_SYMBOL(path_is_under);
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2452 const char __user *, put_old)
2453{
2454 struct path new, old, parent_path, root_parent, root;
2455 struct mount *new_mnt, *root_mnt;
2456 int error;
2457
2458 if (!capable(CAP_SYS_ADMIN))
2459 return -EPERM;
2460
2461 error = user_path_dir(new_root, &new);
2462 if (error)
2463 goto out0;
2464
2465 error = user_path_dir(put_old, &old);
2466 if (error)
2467 goto out1;
2468
2469 error = security_sb_pivotroot(&old, &new);
2470 if (error)
2471 goto out2;
2472
2473 get_fs_root(current->fs, &root);
2474 error = lock_mount(&old);
2475 if (error)
2476 goto out3;
2477
2478 error = -EINVAL;
2479 new_mnt = real_mount(new.mnt);
2480 root_mnt = real_mount(root.mnt);
2481 if (IS_MNT_SHARED(real_mount(old.mnt)) ||
2482 IS_MNT_SHARED(new_mnt->mnt_parent) ||
2483 IS_MNT_SHARED(root_mnt->mnt_parent))
2484 goto out4;
2485 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2486 goto out4;
2487 error = -ENOENT;
2488 if (d_unlinked(new.dentry))
2489 goto out4;
2490 if (d_unlinked(old.dentry))
2491 goto out4;
2492 error = -EBUSY;
2493 if (new.mnt == root.mnt ||
2494 old.mnt == root.mnt)
2495 goto out4;
2496 error = -EINVAL;
2497 if (root.mnt->mnt_root != root.dentry)
2498 goto out4;
2499 if (!mnt_has_parent(root_mnt))
2500 goto out4;
2501 if (new.mnt->mnt_root != new.dentry)
2502 goto out4;
2503 if (!mnt_has_parent(new_mnt))
2504 goto out4;
2505
2506 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
2507 goto out4;
2508 br_write_lock(vfsmount_lock);
2509 detach_mnt(new_mnt, &parent_path);
2510 detach_mnt(root_mnt, &root_parent);
2511
2512 attach_mnt(root_mnt, &old);
2513
2514 attach_mnt(new_mnt, &root_parent);
2515 touch_mnt_namespace(current->nsproxy->mnt_ns);
2516 br_write_unlock(vfsmount_lock);
2517 chroot_fs_refs(&root, &new);
2518 error = 0;
2519out4:
2520 unlock_mount(&old);
2521 if (!error) {
2522 path_put(&root_parent);
2523 path_put(&parent_path);
2524 }
2525out3:
2526 path_put(&root);
2527out2:
2528 path_put(&old);
2529out1:
2530 path_put(&new);
2531out0:
2532 return error;
2533}
2534
2535static void __init init_mount_tree(void)
2536{
2537 struct vfsmount *mnt;
2538 struct mnt_namespace *ns;
2539 struct path root;
2540
2541 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2542 if (IS_ERR(mnt))
2543 panic("Can't create rootfs");
2544
2545 ns = create_mnt_ns(mnt);
2546 if (IS_ERR(ns))
2547 panic("Can't allocate initial namespace");
2548
2549 init_task.nsproxy->mnt_ns = ns;
2550 get_mnt_ns(ns);
2551
2552 root.mnt = mnt;
2553 root.dentry = mnt->mnt_root;
2554
2555 set_fs_pwd(current->fs, &root);
2556 set_fs_root(current->fs, &root);
2557}
2558
2559void __init mnt_init(void)
2560{
2561 unsigned u;
2562 int err;
2563
2564 init_rwsem(&namespace_sem);
2565
2566 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
2567 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2568
2569 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2570
2571 if (!mount_hashtable)
2572 panic("Failed to allocate mount hash table\n");
2573
2574 printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
2575
2576 for (u = 0; u < HASH_SIZE; u++)
2577 INIT_LIST_HEAD(&mount_hashtable[u]);
2578
2579 br_lock_init(vfsmount_lock);
2580
2581 err = sysfs_init();
2582 if (err)
2583 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2584 __func__, err);
2585 fs_kobj = kobject_create_and_add("fs", NULL);
2586 if (!fs_kobj)
2587 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2588 init_rootfs();
2589 init_mount_tree();
2590}
2591
2592void put_mnt_ns(struct mnt_namespace *ns)
2593{
2594 LIST_HEAD(umount_list);
2595
2596 if (!atomic_dec_and_test(&ns->count))
2597 return;
2598 down_write(&namespace_sem);
2599 br_write_lock(vfsmount_lock);
2600 umount_tree(ns->root, 0, &umount_list);
2601 br_write_unlock(vfsmount_lock);
2602 up_write(&namespace_sem);
2603 release_mounts(&umount_list);
2604 kfree(ns);
2605}
2606
2607struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
2608{
2609 struct vfsmount *mnt;
2610 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
2611 if (!IS_ERR(mnt)) {
2612
2613
2614
2615
2616 mnt_make_longterm(mnt);
2617 }
2618 return mnt;
2619}
2620EXPORT_SYMBOL_GPL(kern_mount_data);
2621
2622void kern_unmount(struct vfsmount *mnt)
2623{
2624
2625 if (!IS_ERR_OR_NULL(mnt)) {
2626 mnt_make_shortterm(mnt);
2627 mntput(mnt);
2628 }
2629}
2630EXPORT_SYMBOL(kern_unmount);
2631
2632bool our_mnt(struct vfsmount *mnt)
2633{
2634 return check_mnt(real_mount(mnt));
2635}
2636