1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136static bool lease_breaking(struct file_lock *fl)
137{
138 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139}
140
141static int target_leasetype(struct file_lock *fl)
142{
143 if (fl->fl_flags & FL_UNLOCK_PENDING)
144 return F_UNLCK;
145 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 return F_RDLCK;
147 return fl->fl_type;
148}
149
150int leases_enable = 1;
151int lease_break_time = 45;
152
153#define for_each_lock(inode, lockp) \
154 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155
156static LIST_HEAD(file_lock_list);
157static LIST_HEAD(blocked_list);
158static DEFINE_SPINLOCK(file_lock_lock);
159
160
161
162
163void lock_flocks(void)
164{
165 spin_lock(&file_lock_lock);
166}
167EXPORT_SYMBOL_GPL(lock_flocks);
168
169void unlock_flocks(void)
170{
171 spin_unlock(&file_lock_lock);
172}
173EXPORT_SYMBOL_GPL(unlock_flocks);
174
175static struct kmem_cache *filelock_cache __read_mostly;
176
177static void locks_init_lock_heads(struct file_lock *fl)
178{
179 INIT_LIST_HEAD(&fl->fl_link);
180 INIT_LIST_HEAD(&fl->fl_block);
181 init_waitqueue_head(&fl->fl_wait);
182}
183
184
185struct file_lock *locks_alloc_lock(void)
186{
187 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188
189 if (fl)
190 locks_init_lock_heads(fl);
191
192 return fl;
193}
194EXPORT_SYMBOL_GPL(locks_alloc_lock);
195
196void locks_release_private(struct file_lock *fl)
197{
198 if (fl->fl_ops) {
199 if (fl->fl_ops->fl_release_private)
200 fl->fl_ops->fl_release_private(fl);
201 fl->fl_ops = NULL;
202 }
203 if (fl->fl_lmops) {
204 if (fl->fl_lmops->lm_release_private)
205 fl->fl_lmops->lm_release_private(fl);
206 fl->fl_lmops = NULL;
207 }
208
209}
210EXPORT_SYMBOL_GPL(locks_release_private);
211
212
213void locks_free_lock(struct file_lock *fl)
214{
215 BUG_ON(waitqueue_active(&fl->fl_wait));
216 BUG_ON(!list_empty(&fl->fl_block));
217 BUG_ON(!list_empty(&fl->fl_link));
218
219 locks_release_private(fl);
220 kmem_cache_free(filelock_cache, fl);
221}
222EXPORT_SYMBOL(locks_free_lock);
223
224void locks_init_lock(struct file_lock *fl)
225{
226 memset(fl, 0, sizeof(struct file_lock));
227 locks_init_lock_heads(fl);
228}
229
230EXPORT_SYMBOL(locks_init_lock);
231
232static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
233{
234 if (fl->fl_ops) {
235 if (fl->fl_ops->fl_copy_lock)
236 fl->fl_ops->fl_copy_lock(new, fl);
237 new->fl_ops = fl->fl_ops;
238 }
239 if (fl->fl_lmops)
240 new->fl_lmops = fl->fl_lmops;
241}
242
243
244
245
246void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
247{
248 new->fl_owner = fl->fl_owner;
249 new->fl_pid = fl->fl_pid;
250 new->fl_file = NULL;
251 new->fl_flags = fl->fl_flags;
252 new->fl_type = fl->fl_type;
253 new->fl_start = fl->fl_start;
254 new->fl_end = fl->fl_end;
255 new->fl_ops = NULL;
256 new->fl_lmops = NULL;
257}
258EXPORT_SYMBOL(__locks_copy_lock);
259
260void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
261{
262 locks_release_private(new);
263
264 __locks_copy_lock(new, fl);
265 new->fl_file = fl->fl_file;
266 new->fl_ops = fl->fl_ops;
267 new->fl_lmops = fl->fl_lmops;
268
269 locks_copy_private(new, fl);
270}
271
272EXPORT_SYMBOL(locks_copy_lock);
273
274static inline int flock_translate_cmd(int cmd) {
275 if (cmd & LOCK_MAND)
276 return cmd & (LOCK_MAND | LOCK_RW);
277 switch (cmd) {
278 case LOCK_SH:
279 return F_RDLCK;
280 case LOCK_EX:
281 return F_WRLCK;
282 case LOCK_UN:
283 return F_UNLCK;
284 }
285 return -EINVAL;
286}
287
288
289static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 unsigned int cmd)
291{
292 struct file_lock *fl;
293 int type = flock_translate_cmd(cmd);
294 if (type < 0)
295 return type;
296
297 fl = locks_alloc_lock();
298 if (fl == NULL)
299 return -ENOMEM;
300
301 fl->fl_file = filp;
302 fl->fl_pid = current->tgid;
303 fl->fl_flags = FL_FLOCK;
304 fl->fl_type = type;
305 fl->fl_end = OFFSET_MAX;
306
307 *lock = fl;
308 return 0;
309}
310
311static int assign_type(struct file_lock *fl, int type)
312{
313 switch (type) {
314 case F_RDLCK:
315 case F_WRLCK:
316 case F_UNLCK:
317 fl->fl_type = type;
318 break;
319 default:
320 return -EINVAL;
321 }
322 return 0;
323}
324
325
326
327
328static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 struct flock *l)
330{
331 off_t start, end;
332
333 switch (l->l_whence) {
334 case SEEK_SET:
335 start = 0;
336 break;
337 case SEEK_CUR:
338 start = filp->f_pos;
339 break;
340 case SEEK_END:
341 start = i_size_read(filp->f_path.dentry->d_inode);
342 break;
343 default:
344 return -EINVAL;
345 }
346
347
348
349 start += l->l_start;
350 if (start < 0)
351 return -EINVAL;
352 fl->fl_end = OFFSET_MAX;
353 if (l->l_len > 0) {
354 end = start + l->l_len - 1;
355 fl->fl_end = end;
356 } else if (l->l_len < 0) {
357 end = start - 1;
358 fl->fl_end = end;
359 start += l->l_len;
360 if (start < 0)
361 return -EINVAL;
362 }
363 fl->fl_start = start;
364 if (fl->fl_end < fl->fl_start)
365 return -EOVERFLOW;
366
367 fl->fl_owner = current->files;
368 fl->fl_pid = current->tgid;
369 fl->fl_file = filp;
370 fl->fl_flags = FL_POSIX;
371 fl->fl_ops = NULL;
372 fl->fl_lmops = NULL;
373
374 return assign_type(fl, l->l_type);
375}
376
377#if BITS_PER_LONG == 32
378static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 struct flock64 *l)
380{
381 loff_t start;
382
383 switch (l->l_whence) {
384 case SEEK_SET:
385 start = 0;
386 break;
387 case SEEK_CUR:
388 start = filp->f_pos;
389 break;
390 case SEEK_END:
391 start = i_size_read(filp->f_path.dentry->d_inode);
392 break;
393 default:
394 return -EINVAL;
395 }
396
397 start += l->l_start;
398 if (start < 0)
399 return -EINVAL;
400 fl->fl_end = OFFSET_MAX;
401 if (l->l_len > 0) {
402 fl->fl_end = start + l->l_len - 1;
403 } else if (l->l_len < 0) {
404 fl->fl_end = start - 1;
405 start += l->l_len;
406 if (start < 0)
407 return -EINVAL;
408 }
409 fl->fl_start = start;
410 if (fl->fl_end < fl->fl_start)
411 return -EOVERFLOW;
412
413 fl->fl_owner = current->files;
414 fl->fl_pid = current->tgid;
415 fl->fl_file = filp;
416 fl->fl_flags = FL_POSIX;
417 fl->fl_ops = NULL;
418 fl->fl_lmops = NULL;
419
420 return assign_type(fl, l->l_type);
421}
422#endif
423
424
425static void lease_break_callback(struct file_lock *fl)
426{
427 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
428}
429
430static void lease_release_private_callback(struct file_lock *fl)
431{
432 if (!fl->fl_file)
433 return;
434
435 f_delown(fl->fl_file);
436 fl->fl_file->f_owner.signum = 0;
437}
438
439static const struct lock_manager_operations lease_manager_ops = {
440 .lm_break = lease_break_callback,
441 .lm_release_private = lease_release_private_callback,
442 .lm_change = lease_modify,
443};
444
445
446
447
448static int lease_init(struct file *filp, int type, struct file_lock *fl)
449 {
450 if (assign_type(fl, type) != 0)
451 return -EINVAL;
452
453 fl->fl_owner = current->files;
454 fl->fl_pid = current->tgid;
455
456 fl->fl_file = filp;
457 fl->fl_flags = FL_LEASE;
458 fl->fl_start = 0;
459 fl->fl_end = OFFSET_MAX;
460 fl->fl_ops = NULL;
461 fl->fl_lmops = &lease_manager_ops;
462 return 0;
463}
464
465
466static struct file_lock *lease_alloc(struct file *filp, int type)
467{
468 struct file_lock *fl = locks_alloc_lock();
469 int error = -ENOMEM;
470
471 if (fl == NULL)
472 return ERR_PTR(error);
473
474 error = lease_init(filp, type, fl);
475 if (error) {
476 locks_free_lock(fl);
477 return ERR_PTR(error);
478 }
479 return fl;
480}
481
482
483
484static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
485{
486 return ((fl1->fl_end >= fl2->fl_start) &&
487 (fl2->fl_end >= fl1->fl_start));
488}
489
490
491
492
493static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
494{
495 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
496 return fl2->fl_lmops == fl1->fl_lmops &&
497 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
498 return fl1->fl_owner == fl2->fl_owner;
499}
500
501
502
503
504static void __locks_delete_block(struct file_lock *waiter)
505{
506 list_del_init(&waiter->fl_block);
507 list_del_init(&waiter->fl_link);
508 waiter->fl_next = NULL;
509}
510
511
512
513void locks_delete_block(struct file_lock *waiter)
514{
515 lock_flocks();
516 __locks_delete_block(waiter);
517 unlock_flocks();
518}
519EXPORT_SYMBOL(locks_delete_block);
520
521
522
523
524
525
526static void locks_insert_block(struct file_lock *blocker,
527 struct file_lock *waiter)
528{
529 BUG_ON(!list_empty(&waiter->fl_block));
530 list_add_tail(&waiter->fl_block, &blocker->fl_block);
531 waiter->fl_next = blocker;
532 if (IS_POSIX(blocker))
533 list_add(&waiter->fl_link, &blocked_list);
534}
535
536
537
538
539
540static void locks_wake_up_blocks(struct file_lock *blocker)
541{
542 while (!list_empty(&blocker->fl_block)) {
543 struct file_lock *waiter;
544
545 waiter = list_first_entry(&blocker->fl_block,
546 struct file_lock, fl_block);
547 __locks_delete_block(waiter);
548 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
549 waiter->fl_lmops->lm_notify(waiter);
550 else
551 wake_up(&waiter->fl_wait);
552 }
553}
554
555
556
557
558static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
559{
560 list_add(&fl->fl_link, &file_lock_list);
561
562 fl->fl_nspid = get_pid(task_tgid(current));
563
564
565 fl->fl_next = *pos;
566 *pos = fl;
567}
568
569
570
571
572
573
574
575static void locks_delete_lock(struct file_lock **thisfl_p)
576{
577 struct file_lock *fl = *thisfl_p;
578
579 *thisfl_p = fl->fl_next;
580 fl->fl_next = NULL;
581 list_del_init(&fl->fl_link);
582
583 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
584 if (fl->fl_fasync != NULL) {
585 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
586 fl->fl_fasync = NULL;
587 }
588
589 if (fl->fl_nspid) {
590 put_pid(fl->fl_nspid);
591 fl->fl_nspid = NULL;
592 }
593
594 locks_wake_up_blocks(fl);
595 locks_free_lock(fl);
596}
597
598
599
600
601static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
602{
603 if (sys_fl->fl_type == F_WRLCK)
604 return 1;
605 if (caller_fl->fl_type == F_WRLCK)
606 return 1;
607 return 0;
608}
609
610
611
612
613static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
614{
615
616
617
618 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
619 return (0);
620
621
622 if (!locks_overlap(caller_fl, sys_fl))
623 return 0;
624
625 return (locks_conflict(caller_fl, sys_fl));
626}
627
628
629
630
631static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
632{
633
634
635
636 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
637 return (0);
638 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
639 return 0;
640
641 return (locks_conflict(caller_fl, sys_fl));
642}
643
644void
645posix_test_lock(struct file *filp, struct file_lock *fl)
646{
647 struct file_lock *cfl;
648
649 lock_flocks();
650 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
651 if (!IS_POSIX(cfl))
652 continue;
653 if (posix_locks_conflict(fl, cfl))
654 break;
655 }
656 if (cfl) {
657 __locks_copy_lock(fl, cfl);
658 if (cfl->fl_nspid)
659 fl->fl_pid = pid_vnr(cfl->fl_nspid);
660 } else
661 fl->fl_type = F_UNLCK;
662 unlock_flocks();
663 return;
664}
665EXPORT_SYMBOL(posix_test_lock);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692#define MAX_DEADLK_ITERATIONS 10
693
694
695static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
696{
697 struct file_lock *fl;
698
699 list_for_each_entry(fl, &blocked_list, fl_link) {
700 if (posix_same_owner(fl, block_fl))
701 return fl->fl_next;
702 }
703 return NULL;
704}
705
706static int posix_locks_deadlock(struct file_lock *caller_fl,
707 struct file_lock *block_fl)
708{
709 int i = 0;
710
711 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
712 if (i++ > MAX_DEADLK_ITERATIONS)
713 return 0;
714 if (posix_same_owner(caller_fl, block_fl))
715 return 1;
716 }
717 return 0;
718}
719
720
721
722
723
724
725
726
727static int flock_lock_file(struct file *filp, struct file_lock *request)
728{
729 struct file_lock *new_fl = NULL;
730 struct file_lock **before;
731 struct inode * inode = filp->f_path.dentry->d_inode;
732 int error = 0;
733 int found = 0;
734
735 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
736 new_fl = locks_alloc_lock();
737 if (!new_fl)
738 return -ENOMEM;
739 }
740
741 lock_flocks();
742 if (request->fl_flags & FL_ACCESS)
743 goto find_conflict;
744
745 for_each_lock(inode, before) {
746 struct file_lock *fl = *before;
747 if (IS_POSIX(fl))
748 break;
749 if (IS_LEASE(fl))
750 continue;
751 if (filp != fl->fl_file)
752 continue;
753 if (request->fl_type == fl->fl_type)
754 goto out;
755 found = 1;
756 locks_delete_lock(before);
757 break;
758 }
759
760 if (request->fl_type == F_UNLCK) {
761 if ((request->fl_flags & FL_EXISTS) && !found)
762 error = -ENOENT;
763 goto out;
764 }
765
766
767
768
769
770 if (found) {
771 unlock_flocks();
772 cond_resched();
773 lock_flocks();
774 }
775
776find_conflict:
777 for_each_lock(inode, before) {
778 struct file_lock *fl = *before;
779 if (IS_POSIX(fl))
780 break;
781 if (IS_LEASE(fl))
782 continue;
783 if (!flock_locks_conflict(request, fl))
784 continue;
785 error = -EAGAIN;
786 if (!(request->fl_flags & FL_SLEEP))
787 goto out;
788 error = FILE_LOCK_DEFERRED;
789 locks_insert_block(fl, request);
790 goto out;
791 }
792 if (request->fl_flags & FL_ACCESS)
793 goto out;
794 locks_copy_lock(new_fl, request);
795 locks_insert_lock(before, new_fl);
796 new_fl = NULL;
797 error = 0;
798
799out:
800 unlock_flocks();
801 if (new_fl)
802 locks_free_lock(new_fl);
803 return error;
804}
805
806static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
807{
808 struct file_lock *fl;
809 struct file_lock *new_fl = NULL;
810 struct file_lock *new_fl2 = NULL;
811 struct file_lock *left = NULL;
812 struct file_lock *right = NULL;
813 struct file_lock **before;
814 int error, added = 0;
815
816
817
818
819
820
821
822 if (!(request->fl_flags & FL_ACCESS) &&
823 (request->fl_type != F_UNLCK ||
824 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
825 new_fl = locks_alloc_lock();
826 new_fl2 = locks_alloc_lock();
827 }
828
829 lock_flocks();
830 if (request->fl_type != F_UNLCK) {
831 for_each_lock(inode, before) {
832 fl = *before;
833 if (!IS_POSIX(fl))
834 continue;
835 if (!posix_locks_conflict(request, fl))
836 continue;
837 if (conflock)
838 __locks_copy_lock(conflock, fl);
839 error = -EAGAIN;
840 if (!(request->fl_flags & FL_SLEEP))
841 goto out;
842 error = -EDEADLK;
843 if (posix_locks_deadlock(request, fl))
844 goto out;
845 error = FILE_LOCK_DEFERRED;
846 locks_insert_block(fl, request);
847 goto out;
848 }
849 }
850
851
852 error = 0;
853 if (request->fl_flags & FL_ACCESS)
854 goto out;
855
856
857
858
859
860 before = &inode->i_flock;
861
862
863 while ((fl = *before) && (!IS_POSIX(fl) ||
864 !posix_same_owner(request, fl))) {
865 before = &fl->fl_next;
866 }
867
868
869 while ((fl = *before) && posix_same_owner(request, fl)) {
870
871
872 if (request->fl_type == fl->fl_type) {
873
874
875
876
877 if (fl->fl_end < request->fl_start - 1)
878 goto next_lock;
879
880
881
882 if (fl->fl_start - 1 > request->fl_end)
883 break;
884
885
886
887
888
889
890 if (fl->fl_start > request->fl_start)
891 fl->fl_start = request->fl_start;
892 else
893 request->fl_start = fl->fl_start;
894 if (fl->fl_end < request->fl_end)
895 fl->fl_end = request->fl_end;
896 else
897 request->fl_end = fl->fl_end;
898 if (added) {
899 locks_delete_lock(before);
900 continue;
901 }
902 request = fl;
903 added = 1;
904 }
905 else {
906
907
908
909 if (fl->fl_end < request->fl_start)
910 goto next_lock;
911 if (fl->fl_start > request->fl_end)
912 break;
913 if (request->fl_type == F_UNLCK)
914 added = 1;
915 if (fl->fl_start < request->fl_start)
916 left = fl;
917
918
919
920 if (fl->fl_end > request->fl_end) {
921 right = fl;
922 break;
923 }
924 if (fl->fl_start >= request->fl_start) {
925
926
927
928 if (added) {
929 locks_delete_lock(before);
930 continue;
931 }
932
933
934
935
936
937 locks_wake_up_blocks(fl);
938 fl->fl_start = request->fl_start;
939 fl->fl_end = request->fl_end;
940 fl->fl_type = request->fl_type;
941 locks_release_private(fl);
942 locks_copy_private(fl, request);
943 request = fl;
944 added = 1;
945 }
946 }
947
948
949 next_lock:
950 before = &fl->fl_next;
951 }
952
953
954
955
956
957
958
959 error = -ENOLCK;
960 if (right && left == right && !new_fl2)
961 goto out;
962
963 error = 0;
964 if (!added) {
965 if (request->fl_type == F_UNLCK) {
966 if (request->fl_flags & FL_EXISTS)
967 error = -ENOENT;
968 goto out;
969 }
970
971 if (!new_fl) {
972 error = -ENOLCK;
973 goto out;
974 }
975 locks_copy_lock(new_fl, request);
976 locks_insert_lock(before, new_fl);
977 new_fl = NULL;
978 }
979 if (right) {
980 if (left == right) {
981
982
983
984 left = new_fl2;
985 new_fl2 = NULL;
986 locks_copy_lock(left, right);
987 locks_insert_lock(before, left);
988 }
989 right->fl_start = request->fl_end + 1;
990 locks_wake_up_blocks(right);
991 }
992 if (left) {
993 left->fl_end = request->fl_start - 1;
994 locks_wake_up_blocks(left);
995 }
996 out:
997 unlock_flocks();
998
999
1000
1001 if (new_fl)
1002 locks_free_lock(new_fl);
1003 if (new_fl2)
1004 locks_free_lock(new_fl2);
1005 return error;
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022int posix_lock_file(struct file *filp, struct file_lock *fl,
1023 struct file_lock *conflock)
1024{
1025 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1026}
1027EXPORT_SYMBOL(posix_lock_file);
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1039{
1040 int error;
1041 might_sleep ();
1042 for (;;) {
1043 error = posix_lock_file(filp, fl, NULL);
1044 if (error != FILE_LOCK_DEFERRED)
1045 break;
1046 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1047 if (!error)
1048 continue;
1049
1050 locks_delete_block(fl);
1051 break;
1052 }
1053 return error;
1054}
1055EXPORT_SYMBOL(posix_lock_file_wait);
1056
1057
1058
1059
1060
1061
1062
1063
1064int locks_mandatory_locked(struct inode *inode)
1065{
1066 fl_owner_t owner = current->files;
1067 struct file_lock *fl;
1068
1069
1070
1071
1072 lock_flocks();
1073 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1074 if (!IS_POSIX(fl))
1075 continue;
1076 if (fl->fl_owner != owner)
1077 break;
1078 }
1079 unlock_flocks();
1080 return fl ? -EAGAIN : 0;
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096int locks_mandatory_area(int read_write, struct inode *inode,
1097 struct file *filp, loff_t offset,
1098 size_t count)
1099{
1100 struct file_lock fl;
1101 int error;
1102
1103 locks_init_lock(&fl);
1104 fl.fl_owner = current->files;
1105 fl.fl_pid = current->tgid;
1106 fl.fl_file = filp;
1107 fl.fl_flags = FL_POSIX | FL_ACCESS;
1108 if (filp && !(filp->f_flags & O_NONBLOCK))
1109 fl.fl_flags |= FL_SLEEP;
1110 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1111 fl.fl_start = offset;
1112 fl.fl_end = offset + count - 1;
1113
1114 for (;;) {
1115 error = __posix_lock_file(inode, &fl, NULL);
1116 if (error != FILE_LOCK_DEFERRED)
1117 break;
1118 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1119 if (!error) {
1120
1121
1122
1123
1124 if (__mandatory_lock(inode))
1125 continue;
1126 }
1127
1128 locks_delete_block(&fl);
1129 break;
1130 }
1131
1132 return error;
1133}
1134
1135EXPORT_SYMBOL(locks_mandatory_area);
1136
1137static void lease_clear_pending(struct file_lock *fl, int arg)
1138{
1139 switch (arg) {
1140 case F_UNLCK:
1141 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1142
1143 case F_RDLCK:
1144 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1145 }
1146}
1147
1148
1149int lease_modify(struct file_lock **before, int arg)
1150{
1151 struct file_lock *fl = *before;
1152 int error = assign_type(fl, arg);
1153
1154 if (error)
1155 return error;
1156 lease_clear_pending(fl, arg);
1157 locks_wake_up_blocks(fl);
1158 if (arg == F_UNLCK)
1159 locks_delete_lock(before);
1160 return 0;
1161}
1162
1163EXPORT_SYMBOL(lease_modify);
1164
1165static bool past_time(unsigned long then)
1166{
1167 if (!then)
1168
1169 return false;
1170 return time_after(jiffies, then);
1171}
1172
1173static void time_out_leases(struct inode *inode)
1174{
1175 struct file_lock **before;
1176 struct file_lock *fl;
1177
1178 before = &inode->i_flock;
1179 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1180 if (past_time(fl->fl_downgrade_time))
1181 lease_modify(before, F_RDLCK);
1182 if (past_time(fl->fl_break_time))
1183 lease_modify(before, F_UNLCK);
1184 if (fl == *before)
1185 before = &fl->fl_next;
1186 }
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199int __break_lease(struct inode *inode, unsigned int mode)
1200{
1201 int error = 0;
1202 struct file_lock *new_fl, *flock;
1203 struct file_lock *fl;
1204 unsigned long break_time;
1205 int i_have_this_lease = 0;
1206 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1207
1208 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1209 if (IS_ERR(new_fl))
1210 return PTR_ERR(new_fl);
1211
1212 lock_flocks();
1213
1214 time_out_leases(inode);
1215
1216 flock = inode->i_flock;
1217 if ((flock == NULL) || !IS_LEASE(flock))
1218 goto out;
1219
1220 if (!locks_conflict(flock, new_fl))
1221 goto out;
1222
1223 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1224 if (fl->fl_owner == current->files)
1225 i_have_this_lease = 1;
1226
1227 break_time = 0;
1228 if (lease_break_time > 0) {
1229 break_time = jiffies + lease_break_time * HZ;
1230 if (break_time == 0)
1231 break_time++;
1232 }
1233
1234 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1235 if (want_write) {
1236 if (fl->fl_flags & FL_UNLOCK_PENDING)
1237 continue;
1238 fl->fl_flags |= FL_UNLOCK_PENDING;
1239 fl->fl_break_time = break_time;
1240 } else {
1241 if (lease_breaking(flock))
1242 continue;
1243 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1244 fl->fl_downgrade_time = break_time;
1245 }
1246 fl->fl_lmops->lm_break(fl);
1247 }
1248
1249 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1250 error = -EWOULDBLOCK;
1251 goto out;
1252 }
1253
1254restart:
1255 break_time = flock->fl_break_time;
1256 if (break_time != 0) {
1257 break_time -= jiffies;
1258 if (break_time == 0)
1259 break_time++;
1260 }
1261 locks_insert_block(flock, new_fl);
1262 unlock_flocks();
1263 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1264 !new_fl->fl_next, break_time);
1265 lock_flocks();
1266 __locks_delete_block(new_fl);
1267 if (error >= 0) {
1268 if (error == 0)
1269 time_out_leases(inode);
1270
1271
1272
1273
1274 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1275 flock = flock->fl_next) {
1276 if (locks_conflict(new_fl, flock))
1277 goto restart;
1278 }
1279 error = 0;
1280 }
1281
1282out:
1283 unlock_flocks();
1284 locks_free_lock(new_fl);
1285 return error;
1286}
1287
1288EXPORT_SYMBOL(__break_lease);
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299void lease_get_mtime(struct inode *inode, struct timespec *time)
1300{
1301 struct file_lock *flock = inode->i_flock;
1302 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1303 *time = current_fs_time(inode->i_sb);
1304 else
1305 *time = inode->i_mtime;
1306}
1307
1308EXPORT_SYMBOL(lease_get_mtime);
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333int fcntl_getlease(struct file *filp)
1334{
1335 struct file_lock *fl;
1336 int type = F_UNLCK;
1337
1338 lock_flocks();
1339 time_out_leases(filp->f_path.dentry->d_inode);
1340 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1341 fl = fl->fl_next) {
1342 if (fl->fl_file == filp) {
1343 type = target_leasetype(fl);
1344 break;
1345 }
1346 }
1347 unlock_flocks();
1348 return type;
1349}
1350
1351int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1352{
1353 struct file_lock *fl, **before, **my_before = NULL, *lease;
1354 struct dentry *dentry = filp->f_path.dentry;
1355 struct inode *inode = dentry->d_inode;
1356 int error;
1357
1358 lease = *flp;
1359
1360 error = -EAGAIN;
1361 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1362 goto out;
1363 if ((arg == F_WRLCK)
1364 && ((dentry->d_count > 1)
1365 || (atomic_read(&inode->i_count) > 1)))
1366 goto out;
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 error = -EAGAIN;
1377 for (before = &inode->i_flock;
1378 ((fl = *before) != NULL) && IS_LEASE(fl);
1379 before = &fl->fl_next) {
1380 if (fl->fl_file == filp) {
1381 my_before = before;
1382 continue;
1383 }
1384
1385
1386
1387
1388 if (arg == F_WRLCK)
1389 goto out;
1390
1391
1392
1393
1394 if (fl->fl_flags & FL_UNLOCK_PENDING)
1395 goto out;
1396 }
1397
1398 if (my_before != NULL) {
1399 error = lease->fl_lmops->lm_change(my_before, arg);
1400 if (!error)
1401 *flp = *my_before;
1402 goto out;
1403 }
1404
1405 error = -EINVAL;
1406 if (!leases_enable)
1407 goto out;
1408
1409 locks_insert_lock(before, lease);
1410 return 0;
1411
1412out:
1413 return error;
1414}
1415
1416int generic_delete_lease(struct file *filp, struct file_lock **flp)
1417{
1418 struct file_lock *fl, **before;
1419 struct dentry *dentry = filp->f_path.dentry;
1420 struct inode *inode = dentry->d_inode;
1421
1422 for (before = &inode->i_flock;
1423 ((fl = *before) != NULL) && IS_LEASE(fl);
1424 before = &fl->fl_next) {
1425 if (fl->fl_file != filp)
1426 continue;
1427 return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1428 }
1429 return -EAGAIN;
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1444{
1445 struct dentry *dentry = filp->f_path.dentry;
1446 struct inode *inode = dentry->d_inode;
1447 int error;
1448
1449 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1450 return -EACCES;
1451 if (!S_ISREG(inode->i_mode))
1452 return -EINVAL;
1453 error = security_file_lock(filp, arg);
1454 if (error)
1455 return error;
1456
1457 time_out_leases(inode);
1458
1459 BUG_ON(!(*flp)->fl_lmops->lm_break);
1460
1461 switch (arg) {
1462 case F_UNLCK:
1463 return generic_delete_lease(filp, flp);
1464 case F_RDLCK:
1465 case F_WRLCK:
1466 return generic_add_lease(filp, arg, flp);
1467 default:
1468 BUG();
1469 }
1470}
1471EXPORT_SYMBOL(generic_setlease);
1472
1473static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1474{
1475 if (filp->f_op && filp->f_op->setlease)
1476 return filp->f_op->setlease(filp, arg, lease);
1477 else
1478 return generic_setlease(filp, arg, lease);
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1509{
1510 int error;
1511
1512 lock_flocks();
1513 error = __vfs_setlease(filp, arg, lease);
1514 unlock_flocks();
1515
1516 return error;
1517}
1518EXPORT_SYMBOL_GPL(vfs_setlease);
1519
1520static int do_fcntl_delete_lease(struct file *filp)
1521{
1522 struct file_lock fl, *flp = &fl;
1523
1524 lease_init(filp, F_UNLCK, flp);
1525
1526 return vfs_setlease(filp, F_UNLCK, &flp);
1527}
1528
1529static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1530{
1531 struct file_lock *fl, *ret;
1532 struct fasync_struct *new;
1533 int error;
1534
1535 fl = lease_alloc(filp, arg);
1536 if (IS_ERR(fl))
1537 return PTR_ERR(fl);
1538
1539 new = fasync_alloc();
1540 if (!new) {
1541 locks_free_lock(fl);
1542 return -ENOMEM;
1543 }
1544 ret = fl;
1545 lock_flocks();
1546 error = __vfs_setlease(filp, arg, &ret);
1547 if (error) {
1548 unlock_flocks();
1549 locks_free_lock(fl);
1550 goto out_free_fasync;
1551 }
1552 if (ret != fl)
1553 locks_free_lock(fl);
1554
1555
1556
1557
1558
1559
1560
1561 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1562 new = NULL;
1563
1564 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1565 unlock_flocks();
1566
1567out_free_fasync:
1568 if (new)
1569 fasync_free(new);
1570 return error;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1584{
1585 if (arg == F_UNLCK)
1586 return do_fcntl_delete_lease(filp);
1587 return do_fcntl_add_lease(fd, filp, arg);
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1598{
1599 int error;
1600 might_sleep();
1601 for (;;) {
1602 error = flock_lock_file(filp, fl);
1603 if (error != FILE_LOCK_DEFERRED)
1604 break;
1605 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1606 if (!error)
1607 continue;
1608
1609 locks_delete_block(fl);
1610 break;
1611 }
1612 return error;
1613}
1614
1615EXPORT_SYMBOL(flock_lock_file_wait);
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1637{
1638 struct file *filp;
1639 struct file_lock *lock;
1640 int can_sleep, unlock;
1641 int error;
1642
1643 error = -EBADF;
1644 filp = fget(fd);
1645 if (!filp)
1646 goto out;
1647
1648 can_sleep = !(cmd & LOCK_NB);
1649 cmd &= ~LOCK_NB;
1650 unlock = (cmd == LOCK_UN);
1651
1652 if (!unlock && !(cmd & LOCK_MAND) &&
1653 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1654 goto out_putf;
1655
1656 error = flock_make_lock(filp, &lock, cmd);
1657 if (error)
1658 goto out_putf;
1659 if (can_sleep)
1660 lock->fl_flags |= FL_SLEEP;
1661
1662 error = security_file_lock(filp, lock->fl_type);
1663 if (error)
1664 goto out_free;
1665
1666 if (filp->f_op && filp->f_op->flock)
1667 error = filp->f_op->flock(filp,
1668 (can_sleep) ? F_SETLKW : F_SETLK,
1669 lock);
1670 else
1671 error = flock_lock_file_wait(filp, lock);
1672
1673 out_free:
1674 locks_free_lock(lock);
1675
1676 out_putf:
1677 fput(filp);
1678 out:
1679 return error;
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690int vfs_test_lock(struct file *filp, struct file_lock *fl)
1691{
1692 if (filp->f_op && filp->f_op->lock)
1693 return filp->f_op->lock(filp, F_GETLK, fl);
1694 posix_test_lock(filp, fl);
1695 return 0;
1696}
1697EXPORT_SYMBOL_GPL(vfs_test_lock);
1698
1699static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1700{
1701 flock->l_pid = fl->fl_pid;
1702#if BITS_PER_LONG == 32
1703
1704
1705
1706
1707 if (fl->fl_start > OFFT_OFFSET_MAX)
1708 return -EOVERFLOW;
1709 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1710 return -EOVERFLOW;
1711#endif
1712 flock->l_start = fl->fl_start;
1713 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1714 fl->fl_end - fl->fl_start + 1;
1715 flock->l_whence = 0;
1716 flock->l_type = fl->fl_type;
1717 return 0;
1718}
1719
1720#if BITS_PER_LONG == 32
1721static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1722{
1723 flock->l_pid = fl->fl_pid;
1724 flock->l_start = fl->fl_start;
1725 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1726 fl->fl_end - fl->fl_start + 1;
1727 flock->l_whence = 0;
1728 flock->l_type = fl->fl_type;
1729}
1730#endif
1731
1732
1733
1734
1735int fcntl_getlk(struct file *filp, struct flock __user *l)
1736{
1737 struct file_lock file_lock;
1738 struct flock flock;
1739 int error;
1740
1741 error = -EFAULT;
1742 if (copy_from_user(&flock, l, sizeof(flock)))
1743 goto out;
1744 error = -EINVAL;
1745 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1746 goto out;
1747
1748 error = flock_to_posix_lock(filp, &file_lock, &flock);
1749 if (error)
1750 goto out;
1751
1752 error = vfs_test_lock(filp, &file_lock);
1753 if (error)
1754 goto out;
1755
1756 flock.l_type = file_lock.fl_type;
1757 if (file_lock.fl_type != F_UNLCK) {
1758 error = posix_lock_to_flock(&flock, &file_lock);
1759 if (error)
1760 goto out;
1761 }
1762 error = -EFAULT;
1763 if (!copy_to_user(l, &flock, sizeof(flock)))
1764 error = 0;
1765out:
1766 return error;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1803{
1804 if (filp->f_op && filp->f_op->lock)
1805 return filp->f_op->lock(filp, cmd, fl);
1806 else
1807 return posix_lock_file(filp, fl, conf);
1808}
1809EXPORT_SYMBOL_GPL(vfs_lock_file);
1810
1811static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1812 struct file_lock *fl)
1813{
1814 int error;
1815
1816 error = security_file_lock(filp, fl->fl_type);
1817 if (error)
1818 return error;
1819
1820 for (;;) {
1821 error = vfs_lock_file(filp, cmd, fl, NULL);
1822 if (error != FILE_LOCK_DEFERRED)
1823 break;
1824 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1825 if (!error)
1826 continue;
1827
1828 locks_delete_block(fl);
1829 break;
1830 }
1831
1832 return error;
1833}
1834
1835
1836
1837
1838int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1839 struct flock __user *l)
1840{
1841 struct file_lock *file_lock = locks_alloc_lock();
1842 struct flock flock;
1843 struct inode *inode;
1844 struct file *f;
1845 int error;
1846
1847 if (file_lock == NULL)
1848 return -ENOLCK;
1849
1850
1851
1852
1853 error = -EFAULT;
1854 if (copy_from_user(&flock, l, sizeof(flock)))
1855 goto out;
1856
1857 inode = filp->f_path.dentry->d_inode;
1858
1859
1860
1861
1862 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1863 error = -EAGAIN;
1864 goto out;
1865 }
1866
1867again:
1868 error = flock_to_posix_lock(filp, file_lock, &flock);
1869 if (error)
1870 goto out;
1871 if (cmd == F_SETLKW) {
1872 file_lock->fl_flags |= FL_SLEEP;
1873 }
1874
1875 error = -EBADF;
1876 switch (flock.l_type) {
1877 case F_RDLCK:
1878 if (!(filp->f_mode & FMODE_READ))
1879 goto out;
1880 break;
1881 case F_WRLCK:
1882 if (!(filp->f_mode & FMODE_WRITE))
1883 goto out;
1884 break;
1885 case F_UNLCK:
1886 break;
1887 default:
1888 error = -EINVAL;
1889 goto out;
1890 }
1891
1892 error = do_lock_file_wait(filp, cmd, file_lock);
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903 spin_lock(¤t->files->file_lock);
1904 f = fcheck(fd);
1905 spin_unlock(¤t->files->file_lock);
1906 if (!error && f != filp && flock.l_type != F_UNLCK) {
1907 flock.l_type = F_UNLCK;
1908 goto again;
1909 }
1910
1911out:
1912 locks_free_lock(file_lock);
1913 return error;
1914}
1915
1916#if BITS_PER_LONG == 32
1917
1918
1919
1920int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1921{
1922 struct file_lock file_lock;
1923 struct flock64 flock;
1924 int error;
1925
1926 error = -EFAULT;
1927 if (copy_from_user(&flock, l, sizeof(flock)))
1928 goto out;
1929 error = -EINVAL;
1930 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1931 goto out;
1932
1933 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1934 if (error)
1935 goto out;
1936
1937 error = vfs_test_lock(filp, &file_lock);
1938 if (error)
1939 goto out;
1940
1941 flock.l_type = file_lock.fl_type;
1942 if (file_lock.fl_type != F_UNLCK)
1943 posix_lock_to_flock64(&flock, &file_lock);
1944
1945 error = -EFAULT;
1946 if (!copy_to_user(l, &flock, sizeof(flock)))
1947 error = 0;
1948
1949out:
1950 return error;
1951}
1952
1953
1954
1955
1956int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1957 struct flock64 __user *l)
1958{
1959 struct file_lock *file_lock = locks_alloc_lock();
1960 struct flock64 flock;
1961 struct inode *inode;
1962 struct file *f;
1963 int error;
1964
1965 if (file_lock == NULL)
1966 return -ENOLCK;
1967
1968
1969
1970
1971 error = -EFAULT;
1972 if (copy_from_user(&flock, l, sizeof(flock)))
1973 goto out;
1974
1975 inode = filp->f_path.dentry->d_inode;
1976
1977
1978
1979
1980 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1981 error = -EAGAIN;
1982 goto out;
1983 }
1984
1985again:
1986 error = flock64_to_posix_lock(filp, file_lock, &flock);
1987 if (error)
1988 goto out;
1989 if (cmd == F_SETLKW64) {
1990 file_lock->fl_flags |= FL_SLEEP;
1991 }
1992
1993 error = -EBADF;
1994 switch (flock.l_type) {
1995 case F_RDLCK:
1996 if (!(filp->f_mode & FMODE_READ))
1997 goto out;
1998 break;
1999 case F_WRLCK:
2000 if (!(filp->f_mode & FMODE_WRITE))
2001 goto out;
2002 break;
2003 case F_UNLCK:
2004 break;
2005 default:
2006 error = -EINVAL;
2007 goto out;
2008 }
2009
2010 error = do_lock_file_wait(filp, cmd, file_lock);
2011
2012
2013
2014
2015
2016 spin_lock(¤t->files->file_lock);
2017 f = fcheck(fd);
2018 spin_unlock(¤t->files->file_lock);
2019 if (!error && f != filp && flock.l_type != F_UNLCK) {
2020 flock.l_type = F_UNLCK;
2021 goto again;
2022 }
2023
2024out:
2025 locks_free_lock(file_lock);
2026 return error;
2027}
2028#endif
2029
2030
2031
2032
2033
2034
2035void locks_remove_posix(struct file *filp, fl_owner_t owner)
2036{
2037 struct file_lock lock;
2038
2039
2040
2041
2042
2043
2044 if (!filp->f_path.dentry->d_inode->i_flock)
2045 return;
2046
2047 lock.fl_type = F_UNLCK;
2048 lock.fl_flags = FL_POSIX | FL_CLOSE;
2049 lock.fl_start = 0;
2050 lock.fl_end = OFFSET_MAX;
2051 lock.fl_owner = owner;
2052 lock.fl_pid = current->tgid;
2053 lock.fl_file = filp;
2054 lock.fl_ops = NULL;
2055 lock.fl_lmops = NULL;
2056
2057 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2058
2059 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2060 lock.fl_ops->fl_release_private(&lock);
2061}
2062
2063EXPORT_SYMBOL(locks_remove_posix);
2064
2065
2066
2067
2068void locks_remove_flock(struct file *filp)
2069{
2070 struct inode * inode = filp->f_path.dentry->d_inode;
2071 struct file_lock *fl;
2072 struct file_lock **before;
2073
2074 if (!inode->i_flock)
2075 return;
2076
2077 if (filp->f_op && filp->f_op->flock) {
2078 struct file_lock fl = {
2079 .fl_pid = current->tgid,
2080 .fl_file = filp,
2081 .fl_flags = FL_FLOCK,
2082 .fl_type = F_UNLCK,
2083 .fl_end = OFFSET_MAX,
2084 };
2085 filp->f_op->flock(filp, F_SETLKW, &fl);
2086 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2087 fl.fl_ops->fl_release_private(&fl);
2088 }
2089
2090 lock_flocks();
2091 before = &inode->i_flock;
2092
2093 while ((fl = *before) != NULL) {
2094 if (fl->fl_file == filp) {
2095 if (IS_FLOCK(fl)) {
2096 locks_delete_lock(before);
2097 continue;
2098 }
2099 if (IS_LEASE(fl)) {
2100 lease_modify(before, F_UNLCK);
2101 continue;
2102 }
2103
2104 BUG();
2105 }
2106 before = &fl->fl_next;
2107 }
2108 unlock_flocks();
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118int
2119posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2120{
2121 int status = 0;
2122
2123 lock_flocks();
2124 if (waiter->fl_next)
2125 __locks_delete_block(waiter);
2126 else
2127 status = -ENOENT;
2128 unlock_flocks();
2129 return status;
2130}
2131
2132EXPORT_SYMBOL(posix_unblock_lock);
2133
2134
2135
2136
2137
2138
2139
2140
2141int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2142{
2143 if (filp->f_op && filp->f_op->lock)
2144 return filp->f_op->lock(filp, F_CANCELLK, fl);
2145 return 0;
2146}
2147
2148EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2149
2150#ifdef CONFIG_PROC_FS
2151#include <linux/proc_fs.h>
2152#include <linux/seq_file.h>
2153
2154static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2155 loff_t id, char *pfx)
2156{
2157 struct inode *inode = NULL;
2158 unsigned int fl_pid;
2159
2160 if (fl->fl_nspid)
2161 fl_pid = pid_vnr(fl->fl_nspid);
2162 else
2163 fl_pid = fl->fl_pid;
2164
2165 if (fl->fl_file != NULL)
2166 inode = fl->fl_file->f_path.dentry->d_inode;
2167
2168 seq_printf(f, "%lld:%s ", id, pfx);
2169 if (IS_POSIX(fl)) {
2170 seq_printf(f, "%6s %s ",
2171 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2172 (inode == NULL) ? "*NOINODE*" :
2173 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2174 } else if (IS_FLOCK(fl)) {
2175 if (fl->fl_type & LOCK_MAND) {
2176 seq_printf(f, "FLOCK MSNFS ");
2177 } else {
2178 seq_printf(f, "FLOCK ADVISORY ");
2179 }
2180 } else if (IS_LEASE(fl)) {
2181 seq_printf(f, "LEASE ");
2182 if (lease_breaking(fl))
2183 seq_printf(f, "BREAKING ");
2184 else if (fl->fl_file)
2185 seq_printf(f, "ACTIVE ");
2186 else
2187 seq_printf(f, "BREAKER ");
2188 } else {
2189 seq_printf(f, "UNKNOWN UNKNOWN ");
2190 }
2191 if (fl->fl_type & LOCK_MAND) {
2192 seq_printf(f, "%s ",
2193 (fl->fl_type & LOCK_READ)
2194 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2195 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2196 } else {
2197 seq_printf(f, "%s ",
2198 (lease_breaking(fl))
2199 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2200 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2201 }
2202 if (inode) {
2203#ifdef WE_CAN_BREAK_LSLK_NOW
2204 seq_printf(f, "%d %s:%ld ", fl_pid,
2205 inode->i_sb->s_id, inode->i_ino);
2206#else
2207
2208 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2209 MAJOR(inode->i_sb->s_dev),
2210 MINOR(inode->i_sb->s_dev), inode->i_ino);
2211#endif
2212 } else {
2213 seq_printf(f, "%d <none>:0 ", fl_pid);
2214 }
2215 if (IS_POSIX(fl)) {
2216 if (fl->fl_end == OFFSET_MAX)
2217 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2218 else
2219 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2220 } else {
2221 seq_printf(f, "0 EOF\n");
2222 }
2223}
2224
2225static int locks_show(struct seq_file *f, void *v)
2226{
2227 struct file_lock *fl, *bfl;
2228
2229 fl = list_entry(v, struct file_lock, fl_link);
2230
2231 lock_get_status(f, fl, *((loff_t *)f->private), "");
2232
2233 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2234 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2235
2236 return 0;
2237}
2238
2239static void *locks_start(struct seq_file *f, loff_t *pos)
2240{
2241 loff_t *p = f->private;
2242
2243 lock_flocks();
2244 *p = (*pos + 1);
2245 return seq_list_start(&file_lock_list, *pos);
2246}
2247
2248static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2249{
2250 loff_t *p = f->private;
2251 ++*p;
2252 return seq_list_next(v, &file_lock_list, pos);
2253}
2254
2255static void locks_stop(struct seq_file *f, void *v)
2256{
2257 unlock_flocks();
2258}
2259
2260static const struct seq_operations locks_seq_operations = {
2261 .start = locks_start,
2262 .next = locks_next,
2263 .stop = locks_stop,
2264 .show = locks_show,
2265};
2266
2267static int locks_open(struct inode *inode, struct file *filp)
2268{
2269 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2270}
2271
2272static const struct file_operations proc_locks_operations = {
2273 .open = locks_open,
2274 .read = seq_read,
2275 .llseek = seq_lseek,
2276 .release = seq_release_private,
2277};
2278
2279static int __init proc_locks_init(void)
2280{
2281 proc_create("locks", 0, NULL, &proc_locks_operations);
2282 return 0;
2283}
2284module_init(proc_locks_init);
2285#endif
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2301{
2302 struct file_lock *fl;
2303 int result = 1;
2304 lock_flocks();
2305 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2306 if (IS_POSIX(fl)) {
2307 if (fl->fl_type == F_RDLCK)
2308 continue;
2309 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2310 continue;
2311 } else if (IS_FLOCK(fl)) {
2312 if (!(fl->fl_type & LOCK_MAND))
2313 continue;
2314 if (fl->fl_type & LOCK_READ)
2315 continue;
2316 } else
2317 continue;
2318 result = 0;
2319 break;
2320 }
2321 unlock_flocks();
2322 return result;
2323}
2324
2325EXPORT_SYMBOL(lock_may_read);
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2341{
2342 struct file_lock *fl;
2343 int result = 1;
2344 lock_flocks();
2345 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2346 if (IS_POSIX(fl)) {
2347 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2348 continue;
2349 } else if (IS_FLOCK(fl)) {
2350 if (!(fl->fl_type & LOCK_MAND))
2351 continue;
2352 if (fl->fl_type & LOCK_WRITE)
2353 continue;
2354 } else
2355 continue;
2356 result = 0;
2357 break;
2358 }
2359 unlock_flocks();
2360 return result;
2361}
2362
2363EXPORT_SYMBOL(lock_may_write);
2364
2365static int __init filelock_init(void)
2366{
2367 filelock_cache = kmem_cache_create("file_lock_cache",
2368 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2369
2370 return 0;
2371}
2372
2373core_initcall(filelock_init);
2374