1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/smp_lock.h>
126#include <linux/syscalls.h>
127#include <linux/time.h>
128#include <linux/rcupdate.h>
129#include <linux/pid_namespace.h>
130
131#include <asm/uaccess.h>
132
133#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
134#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
135#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
136
137int leases_enable = 1;
138int lease_break_time = 45;
139
140#define for_each_lock(inode, lockp) \
141 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142
143static LIST_HEAD(file_lock_list);
144static LIST_HEAD(blocked_list);
145
146static struct kmem_cache *filelock_cache __read_mostly;
147
148
149static struct file_lock *locks_alloc_lock(void)
150{
151 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
152}
153
154static void locks_release_private(struct file_lock *fl)
155{
156 if (fl->fl_ops) {
157 if (fl->fl_ops->fl_release_private)
158 fl->fl_ops->fl_release_private(fl);
159 fl->fl_ops = NULL;
160 }
161 if (fl->fl_lmops) {
162 if (fl->fl_lmops->fl_release_private)
163 fl->fl_lmops->fl_release_private(fl);
164 fl->fl_lmops = NULL;
165 }
166
167}
168
169
170static void locks_free_lock(struct file_lock *fl)
171{
172 BUG_ON(waitqueue_active(&fl->fl_wait));
173 BUG_ON(!list_empty(&fl->fl_block));
174 BUG_ON(!list_empty(&fl->fl_link));
175
176 locks_release_private(fl);
177 kmem_cache_free(filelock_cache, fl);
178}
179
180void locks_init_lock(struct file_lock *fl)
181{
182 INIT_LIST_HEAD(&fl->fl_link);
183 INIT_LIST_HEAD(&fl->fl_block);
184 init_waitqueue_head(&fl->fl_wait);
185 fl->fl_next = NULL;
186 fl->fl_fasync = NULL;
187 fl->fl_owner = NULL;
188 fl->fl_pid = 0;
189 fl->fl_nspid = NULL;
190 fl->fl_file = NULL;
191 fl->fl_flags = 0;
192 fl->fl_type = 0;
193 fl->fl_start = fl->fl_end = 0;
194 fl->fl_ops = NULL;
195 fl->fl_lmops = NULL;
196}
197
198EXPORT_SYMBOL(locks_init_lock);
199
200
201
202
203
204static void init_once(void *foo)
205{
206 struct file_lock *lock = (struct file_lock *) foo;
207
208 locks_init_lock(lock);
209}
210
211static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
212{
213 if (fl->fl_ops) {
214 if (fl->fl_ops->fl_copy_lock)
215 fl->fl_ops->fl_copy_lock(new, fl);
216 new->fl_ops = fl->fl_ops;
217 }
218 if (fl->fl_lmops) {
219 if (fl->fl_lmops->fl_copy_lock)
220 fl->fl_lmops->fl_copy_lock(new, fl);
221 new->fl_lmops = fl->fl_lmops;
222 }
223}
224
225
226
227
228void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
229{
230 new->fl_owner = fl->fl_owner;
231 new->fl_pid = fl->fl_pid;
232 new->fl_file = NULL;
233 new->fl_flags = fl->fl_flags;
234 new->fl_type = fl->fl_type;
235 new->fl_start = fl->fl_start;
236 new->fl_end = fl->fl_end;
237 new->fl_ops = NULL;
238 new->fl_lmops = NULL;
239}
240EXPORT_SYMBOL(__locks_copy_lock);
241
242void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
243{
244 locks_release_private(new);
245
246 __locks_copy_lock(new, fl);
247 new->fl_file = fl->fl_file;
248 new->fl_ops = fl->fl_ops;
249 new->fl_lmops = fl->fl_lmops;
250
251 locks_copy_private(new, fl);
252}
253
254EXPORT_SYMBOL(locks_copy_lock);
255
256static inline int flock_translate_cmd(int cmd) {
257 if (cmd & LOCK_MAND)
258 return cmd & (LOCK_MAND | LOCK_RW);
259 switch (cmd) {
260 case LOCK_SH:
261 return F_RDLCK;
262 case LOCK_EX:
263 return F_WRLCK;
264 case LOCK_UN:
265 return F_UNLCK;
266 }
267 return -EINVAL;
268}
269
270
271static int flock_make_lock(struct file *filp, struct file_lock **lock,
272 unsigned int cmd)
273{
274 struct file_lock *fl;
275 int type = flock_translate_cmd(cmd);
276 if (type < 0)
277 return type;
278
279 fl = locks_alloc_lock();
280 if (fl == NULL)
281 return -ENOMEM;
282
283 fl->fl_file = filp;
284 fl->fl_pid = current->tgid;
285 fl->fl_flags = FL_FLOCK;
286 fl->fl_type = type;
287 fl->fl_end = OFFSET_MAX;
288
289 *lock = fl;
290 return 0;
291}
292
293static int assign_type(struct file_lock *fl, int type)
294{
295 switch (type) {
296 case F_RDLCK:
297 case F_WRLCK:
298 case F_UNLCK:
299 fl->fl_type = type;
300 break;
301 default:
302 return -EINVAL;
303 }
304 return 0;
305}
306
307
308
309
310static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
311 struct flock *l)
312{
313 off_t start, end;
314
315 switch (l->l_whence) {
316 case SEEK_SET:
317 start = 0;
318 break;
319 case SEEK_CUR:
320 start = filp->f_pos;
321 break;
322 case SEEK_END:
323 start = i_size_read(filp->f_path.dentry->d_inode);
324 break;
325 default:
326 return -EINVAL;
327 }
328
329
330
331 start += l->l_start;
332 if (start < 0)
333 return -EINVAL;
334 fl->fl_end = OFFSET_MAX;
335 if (l->l_len > 0) {
336 end = start + l->l_len - 1;
337 fl->fl_end = end;
338 } else if (l->l_len < 0) {
339 end = start - 1;
340 fl->fl_end = end;
341 start += l->l_len;
342 if (start < 0)
343 return -EINVAL;
344 }
345 fl->fl_start = start;
346 if (fl->fl_end < fl->fl_start)
347 return -EOVERFLOW;
348
349 fl->fl_owner = current->files;
350 fl->fl_pid = current->tgid;
351 fl->fl_file = filp;
352 fl->fl_flags = FL_POSIX;
353 fl->fl_ops = NULL;
354 fl->fl_lmops = NULL;
355
356 return assign_type(fl, l->l_type);
357}
358
359#if BITS_PER_LONG == 32
360static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
361 struct flock64 *l)
362{
363 loff_t start;
364
365 switch (l->l_whence) {
366 case SEEK_SET:
367 start = 0;
368 break;
369 case SEEK_CUR:
370 start = filp->f_pos;
371 break;
372 case SEEK_END:
373 start = i_size_read(filp->f_path.dentry->d_inode);
374 break;
375 default:
376 return -EINVAL;
377 }
378
379 start += l->l_start;
380 if (start < 0)
381 return -EINVAL;
382 fl->fl_end = OFFSET_MAX;
383 if (l->l_len > 0) {
384 fl->fl_end = start + l->l_len - 1;
385 } else if (l->l_len < 0) {
386 fl->fl_end = start - 1;
387 start += l->l_len;
388 if (start < 0)
389 return -EINVAL;
390 }
391 fl->fl_start = start;
392 if (fl->fl_end < fl->fl_start)
393 return -EOVERFLOW;
394
395 fl->fl_owner = current->files;
396 fl->fl_pid = current->tgid;
397 fl->fl_file = filp;
398 fl->fl_flags = FL_POSIX;
399 fl->fl_ops = NULL;
400 fl->fl_lmops = NULL;
401
402 switch (l->l_type) {
403 case F_RDLCK:
404 case F_WRLCK:
405 case F_UNLCK:
406 fl->fl_type = l->l_type;
407 break;
408 default:
409 return -EINVAL;
410 }
411
412 return (0);
413}
414#endif
415
416
417static void lease_break_callback(struct file_lock *fl)
418{
419 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
420}
421
422static void lease_release_private_callback(struct file_lock *fl)
423{
424 if (!fl->fl_file)
425 return;
426
427 f_delown(fl->fl_file);
428 fl->fl_file->f_owner.signum = 0;
429}
430
431static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
432{
433 return fl->fl_file == try->fl_file;
434}
435
436static struct lock_manager_operations lease_manager_ops = {
437 .fl_break = lease_break_callback,
438 .fl_release_private = lease_release_private_callback,
439 .fl_mylease = lease_mylease_callback,
440 .fl_change = lease_modify,
441};
442
443
444
445
446static int lease_init(struct file *filp, int type, struct file_lock *fl)
447 {
448 if (assign_type(fl, type) != 0)
449 return -EINVAL;
450
451 fl->fl_owner = current->files;
452 fl->fl_pid = current->tgid;
453
454 fl->fl_file = filp;
455 fl->fl_flags = FL_LEASE;
456 fl->fl_start = 0;
457 fl->fl_end = OFFSET_MAX;
458 fl->fl_ops = NULL;
459 fl->fl_lmops = &lease_manager_ops;
460 return 0;
461}
462
463
464static struct file_lock *lease_alloc(struct file *filp, int type)
465{
466 struct file_lock *fl = locks_alloc_lock();
467 int error = -ENOMEM;
468
469 if (fl == NULL)
470 return ERR_PTR(error);
471
472 error = lease_init(filp, type, fl);
473 if (error) {
474 locks_free_lock(fl);
475 return ERR_PTR(error);
476 }
477 return fl;
478}
479
480
481
482static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
483{
484 return ((fl1->fl_end >= fl2->fl_start) &&
485 (fl2->fl_end >= fl1->fl_start));
486}
487
488
489
490
491static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
492{
493 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
494 return fl2->fl_lmops == fl1->fl_lmops &&
495 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
496 return fl1->fl_owner == fl2->fl_owner;
497}
498
499
500
501
502static void __locks_delete_block(struct file_lock *waiter)
503{
504 list_del_init(&waiter->fl_block);
505 list_del_init(&waiter->fl_link);
506 waiter->fl_next = NULL;
507}
508
509
510
511static void locks_delete_block(struct file_lock *waiter)
512{
513 lock_kernel();
514 __locks_delete_block(waiter);
515 unlock_kernel();
516}
517
518
519
520
521
522
523static void locks_insert_block(struct file_lock *blocker,
524 struct file_lock *waiter)
525{
526 BUG_ON(!list_empty(&waiter->fl_block));
527 list_add_tail(&waiter->fl_block, &blocker->fl_block);
528 waiter->fl_next = blocker;
529 if (IS_POSIX(blocker))
530 list_add(&waiter->fl_link, &blocked_list);
531}
532
533
534
535
536
537static void locks_wake_up_blocks(struct file_lock *blocker)
538{
539 while (!list_empty(&blocker->fl_block)) {
540 struct file_lock *waiter;
541
542 waiter = list_first_entry(&blocker->fl_block,
543 struct file_lock, fl_block);
544 __locks_delete_block(waiter);
545 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
546 waiter->fl_lmops->fl_notify(waiter);
547 else
548 wake_up(&waiter->fl_wait);
549 }
550}
551
552
553
554
555static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
556{
557 list_add(&fl->fl_link, &file_lock_list);
558
559 fl->fl_nspid = get_pid(task_tgid(current));
560
561
562 fl->fl_next = *pos;
563 *pos = fl;
564}
565
566
567
568
569
570
571
572static void locks_delete_lock(struct file_lock **thisfl_p)
573{
574 struct file_lock *fl = *thisfl_p;
575
576 *thisfl_p = fl->fl_next;
577 fl->fl_next = NULL;
578 list_del_init(&fl->fl_link);
579
580 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
581 if (fl->fl_fasync != NULL) {
582 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
583 fl->fl_fasync = NULL;
584 }
585
586 if (fl->fl_nspid) {
587 put_pid(fl->fl_nspid);
588 fl->fl_nspid = NULL;
589 }
590
591 locks_wake_up_blocks(fl);
592 locks_free_lock(fl);
593}
594
595
596
597
598static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
599{
600 if (sys_fl->fl_type == F_WRLCK)
601 return 1;
602 if (caller_fl->fl_type == F_WRLCK)
603 return 1;
604 return 0;
605}
606
607
608
609
610static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
611{
612
613
614
615 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
616 return (0);
617
618
619 if (!locks_overlap(caller_fl, sys_fl))
620 return 0;
621
622 return (locks_conflict(caller_fl, sys_fl));
623}
624
625
626
627
628static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
629{
630
631
632
633 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
634 return (0);
635 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
636 return 0;
637
638 return (locks_conflict(caller_fl, sys_fl));
639}
640
641void
642posix_test_lock(struct file *filp, struct file_lock *fl)
643{
644 struct file_lock *cfl;
645
646 lock_kernel();
647 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
648 if (!IS_POSIX(cfl))
649 continue;
650 if (posix_locks_conflict(fl, cfl))
651 break;
652 }
653 if (cfl) {
654 __locks_copy_lock(fl, cfl);
655 if (cfl->fl_nspid)
656 fl->fl_pid = pid_vnr(cfl->fl_nspid);
657 } else
658 fl->fl_type = F_UNLCK;
659 unlock_kernel();
660 return;
661}
662EXPORT_SYMBOL(posix_test_lock);
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689#define MAX_DEADLK_ITERATIONS 10
690
691
692static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
693{
694 struct file_lock *fl;
695
696 list_for_each_entry(fl, &blocked_list, fl_link) {
697 if (posix_same_owner(fl, block_fl))
698 return fl->fl_next;
699 }
700 return NULL;
701}
702
703static int posix_locks_deadlock(struct file_lock *caller_fl,
704 struct file_lock *block_fl)
705{
706 int i = 0;
707
708 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
709 if (i++ > MAX_DEADLK_ITERATIONS)
710 return 0;
711 if (posix_same_owner(caller_fl, block_fl))
712 return 1;
713 }
714 return 0;
715}
716
717
718
719
720
721
722
723
724static int flock_lock_file(struct file *filp, struct file_lock *request)
725{
726 struct file_lock *new_fl = NULL;
727 struct file_lock **before;
728 struct inode * inode = filp->f_path.dentry->d_inode;
729 int error = 0;
730 int found = 0;
731
732 lock_kernel();
733 if (request->fl_flags & FL_ACCESS)
734 goto find_conflict;
735
736 if (request->fl_type != F_UNLCK) {
737 error = -ENOMEM;
738 new_fl = locks_alloc_lock();
739 if (new_fl == NULL)
740 goto out;
741 error = 0;
742 }
743
744 for_each_lock(inode, before) {
745 struct file_lock *fl = *before;
746 if (IS_POSIX(fl))
747 break;
748 if (IS_LEASE(fl))
749 continue;
750 if (filp != fl->fl_file)
751 continue;
752 if (request->fl_type == fl->fl_type)
753 goto out;
754 found = 1;
755 locks_delete_lock(before);
756 break;
757 }
758
759 if (request->fl_type == F_UNLCK) {
760 if ((request->fl_flags & FL_EXISTS) && !found)
761 error = -ENOENT;
762 goto out;
763 }
764
765
766
767
768
769 if (found)
770 cond_resched_bkl();
771
772find_conflict:
773 for_each_lock(inode, before) {
774 struct file_lock *fl = *before;
775 if (IS_POSIX(fl))
776 break;
777 if (IS_LEASE(fl))
778 continue;
779 if (!flock_locks_conflict(request, fl))
780 continue;
781 error = -EAGAIN;
782 if (!(request->fl_flags & FL_SLEEP))
783 goto out;
784 error = FILE_LOCK_DEFERRED;
785 locks_insert_block(fl, request);
786 goto out;
787 }
788 if (request->fl_flags & FL_ACCESS)
789 goto out;
790 locks_copy_lock(new_fl, request);
791 locks_insert_lock(before, new_fl);
792 new_fl = NULL;
793 error = 0;
794
795out:
796 unlock_kernel();
797 if (new_fl)
798 locks_free_lock(new_fl);
799 return error;
800}
801
802static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
803{
804 struct file_lock *fl;
805 struct file_lock *new_fl = NULL;
806 struct file_lock *new_fl2 = NULL;
807 struct file_lock *left = NULL;
808 struct file_lock *right = NULL;
809 struct file_lock **before;
810 int error, added = 0;
811
812
813
814
815
816
817
818 if (!(request->fl_flags & FL_ACCESS) &&
819 (request->fl_type != F_UNLCK ||
820 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
821 new_fl = locks_alloc_lock();
822 new_fl2 = locks_alloc_lock();
823 }
824
825 lock_kernel();
826 if (request->fl_type != F_UNLCK) {
827 for_each_lock(inode, before) {
828 fl = *before;
829 if (!IS_POSIX(fl))
830 continue;
831 if (!posix_locks_conflict(request, fl))
832 continue;
833 if (conflock)
834 __locks_copy_lock(conflock, fl);
835 error = -EAGAIN;
836 if (!(request->fl_flags & FL_SLEEP))
837 goto out;
838 error = -EDEADLK;
839 if (posix_locks_deadlock(request, fl))
840 goto out;
841 error = FILE_LOCK_DEFERRED;
842 locks_insert_block(fl, request);
843 goto out;
844 }
845 }
846
847
848 error = 0;
849 if (request->fl_flags & FL_ACCESS)
850 goto out;
851
852
853
854
855
856 before = &inode->i_flock;
857
858
859 while ((fl = *before) && (!IS_POSIX(fl) ||
860 !posix_same_owner(request, fl))) {
861 before = &fl->fl_next;
862 }
863
864
865 while ((fl = *before) && posix_same_owner(request, fl)) {
866
867
868 if (request->fl_type == fl->fl_type) {
869
870
871
872
873 if (fl->fl_end < request->fl_start - 1)
874 goto next_lock;
875
876
877
878 if (fl->fl_start - 1 > request->fl_end)
879 break;
880
881
882
883
884
885
886 if (fl->fl_start > request->fl_start)
887 fl->fl_start = request->fl_start;
888 else
889 request->fl_start = fl->fl_start;
890 if (fl->fl_end < request->fl_end)
891 fl->fl_end = request->fl_end;
892 else
893 request->fl_end = fl->fl_end;
894 if (added) {
895 locks_delete_lock(before);
896 continue;
897 }
898 request = fl;
899 added = 1;
900 }
901 else {
902
903
904
905 if (fl->fl_end < request->fl_start)
906 goto next_lock;
907 if (fl->fl_start > request->fl_end)
908 break;
909 if (request->fl_type == F_UNLCK)
910 added = 1;
911 if (fl->fl_start < request->fl_start)
912 left = fl;
913
914
915
916 if (fl->fl_end > request->fl_end) {
917 right = fl;
918 break;
919 }
920 if (fl->fl_start >= request->fl_start) {
921
922
923
924 if (added) {
925 locks_delete_lock(before);
926 continue;
927 }
928
929
930
931
932
933 locks_wake_up_blocks(fl);
934 fl->fl_start = request->fl_start;
935 fl->fl_end = request->fl_end;
936 fl->fl_type = request->fl_type;
937 locks_release_private(fl);
938 locks_copy_private(fl, request);
939 request = fl;
940 added = 1;
941 }
942 }
943
944
945 next_lock:
946 before = &fl->fl_next;
947 }
948
949
950
951
952
953
954
955 error = -ENOLCK;
956 if (right && left == right && !new_fl2)
957 goto out;
958
959 error = 0;
960 if (!added) {
961 if (request->fl_type == F_UNLCK) {
962 if (request->fl_flags & FL_EXISTS)
963 error = -ENOENT;
964 goto out;
965 }
966
967 if (!new_fl) {
968 error = -ENOLCK;
969 goto out;
970 }
971 locks_copy_lock(new_fl, request);
972 locks_insert_lock(before, new_fl);
973 new_fl = NULL;
974 }
975 if (right) {
976 if (left == right) {
977
978
979
980 left = new_fl2;
981 new_fl2 = NULL;
982 locks_copy_lock(left, right);
983 locks_insert_lock(before, left);
984 }
985 right->fl_start = request->fl_end + 1;
986 locks_wake_up_blocks(right);
987 }
988 if (left) {
989 left->fl_end = request->fl_start - 1;
990 locks_wake_up_blocks(left);
991 }
992 out:
993 unlock_kernel();
994
995
996
997 if (new_fl)
998 locks_free_lock(new_fl);
999 if (new_fl2)
1000 locks_free_lock(new_fl2);
1001 return error;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018int posix_lock_file(struct file *filp, struct file_lock *fl,
1019 struct file_lock *conflock)
1020{
1021 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1022}
1023EXPORT_SYMBOL(posix_lock_file);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1035{
1036 int error;
1037 might_sleep ();
1038 for (;;) {
1039 error = posix_lock_file(filp, fl, NULL);
1040 if (error != FILE_LOCK_DEFERRED)
1041 break;
1042 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1043 if (!error)
1044 continue;
1045
1046 locks_delete_block(fl);
1047 break;
1048 }
1049 return error;
1050}
1051EXPORT_SYMBOL(posix_lock_file_wait);
1052
1053
1054
1055
1056
1057
1058
1059
1060int locks_mandatory_locked(struct inode *inode)
1061{
1062 fl_owner_t owner = current->files;
1063 struct file_lock *fl;
1064
1065
1066
1067
1068 lock_kernel();
1069 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1070 if (!IS_POSIX(fl))
1071 continue;
1072 if (fl->fl_owner != owner)
1073 break;
1074 }
1075 unlock_kernel();
1076 return fl ? -EAGAIN : 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092int locks_mandatory_area(int read_write, struct inode *inode,
1093 struct file *filp, loff_t offset,
1094 size_t count)
1095{
1096 struct file_lock fl;
1097 int error;
1098
1099 locks_init_lock(&fl);
1100 fl.fl_owner = current->files;
1101 fl.fl_pid = current->tgid;
1102 fl.fl_file = filp;
1103 fl.fl_flags = FL_POSIX | FL_ACCESS;
1104 if (filp && !(filp->f_flags & O_NONBLOCK))
1105 fl.fl_flags |= FL_SLEEP;
1106 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1107 fl.fl_start = offset;
1108 fl.fl_end = offset + count - 1;
1109
1110 for (;;) {
1111 error = __posix_lock_file(inode, &fl, NULL);
1112 if (error != FILE_LOCK_DEFERRED)
1113 break;
1114 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1115 if (!error) {
1116
1117
1118
1119
1120 if (__mandatory_lock(inode))
1121 continue;
1122 }
1123
1124 locks_delete_block(&fl);
1125 break;
1126 }
1127
1128 return error;
1129}
1130
1131EXPORT_SYMBOL(locks_mandatory_area);
1132
1133
1134int lease_modify(struct file_lock **before, int arg)
1135{
1136 struct file_lock *fl = *before;
1137 int error = assign_type(fl, arg);
1138
1139 if (error)
1140 return error;
1141 locks_wake_up_blocks(fl);
1142 if (arg == F_UNLCK)
1143 locks_delete_lock(before);
1144 return 0;
1145}
1146
1147EXPORT_SYMBOL(lease_modify);
1148
1149static void time_out_leases(struct inode *inode)
1150{
1151 struct file_lock **before;
1152 struct file_lock *fl;
1153
1154 before = &inode->i_flock;
1155 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1156 if ((fl->fl_break_time == 0)
1157 || time_before(jiffies, fl->fl_break_time)) {
1158 before = &fl->fl_next;
1159 continue;
1160 }
1161 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1162 if (fl == *before)
1163 before = &fl->fl_next;
1164 }
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177int __break_lease(struct inode *inode, unsigned int mode)
1178{
1179 int error = 0, future;
1180 struct file_lock *new_fl, *flock;
1181 struct file_lock *fl;
1182 unsigned long break_time;
1183 int i_have_this_lease = 0;
1184
1185 new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
1186
1187 lock_kernel();
1188
1189 time_out_leases(inode);
1190
1191 flock = inode->i_flock;
1192 if ((flock == NULL) || !IS_LEASE(flock))
1193 goto out;
1194
1195 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1196 if (fl->fl_owner == current->files)
1197 i_have_this_lease = 1;
1198
1199 if (mode & FMODE_WRITE) {
1200
1201 future = F_UNLCK | F_INPROGRESS;
1202 } else if (flock->fl_type & F_INPROGRESS) {
1203
1204 future = flock->fl_type;
1205 } else if (flock->fl_type & F_WRLCK) {
1206
1207 future = F_RDLCK | F_INPROGRESS;
1208 } else {
1209
1210 goto out;
1211 }
1212
1213 if (IS_ERR(new_fl) && !i_have_this_lease
1214 && ((mode & O_NONBLOCK) == 0)) {
1215 error = PTR_ERR(new_fl);
1216 goto out;
1217 }
1218
1219 break_time = 0;
1220 if (lease_break_time > 0) {
1221 break_time = jiffies + lease_break_time * HZ;
1222 if (break_time == 0)
1223 break_time++;
1224 }
1225
1226 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1227 if (fl->fl_type != future) {
1228 fl->fl_type = future;
1229 fl->fl_break_time = break_time;
1230
1231 fl->fl_lmops->fl_break(fl);
1232 }
1233 }
1234
1235 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1236 error = -EWOULDBLOCK;
1237 goto out;
1238 }
1239
1240restart:
1241 break_time = flock->fl_break_time;
1242 if (break_time != 0) {
1243 break_time -= jiffies;
1244 if (break_time == 0)
1245 break_time++;
1246 }
1247 locks_insert_block(flock, new_fl);
1248 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1249 !new_fl->fl_next, break_time);
1250 __locks_delete_block(new_fl);
1251 if (error >= 0) {
1252 if (error == 0)
1253 time_out_leases(inode);
1254
1255 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1256 flock = flock->fl_next) {
1257 if (flock->fl_type & F_INPROGRESS)
1258 goto restart;
1259 }
1260 error = 0;
1261 }
1262
1263out:
1264 unlock_kernel();
1265 if (!IS_ERR(new_fl))
1266 locks_free_lock(new_fl);
1267 return error;
1268}
1269
1270EXPORT_SYMBOL(__break_lease);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281void lease_get_mtime(struct inode *inode, struct timespec *time)
1282{
1283 struct file_lock *flock = inode->i_flock;
1284 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1285 *time = current_fs_time(inode->i_sb);
1286 else
1287 *time = inode->i_mtime;
1288}
1289
1290EXPORT_SYMBOL(lease_get_mtime);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315int fcntl_getlease(struct file *filp)
1316{
1317 struct file_lock *fl;
1318 int type = F_UNLCK;
1319
1320 lock_kernel();
1321 time_out_leases(filp->f_path.dentry->d_inode);
1322 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1323 fl = fl->fl_next) {
1324 if (fl->fl_file == filp) {
1325 type = fl->fl_type & ~F_INPROGRESS;
1326 break;
1327 }
1328 }
1329 unlock_kernel();
1330 return type;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1345{
1346 struct file_lock *fl, **before, **my_before = NULL, *lease;
1347 struct file_lock *new_fl = NULL;
1348 struct dentry *dentry = filp->f_path.dentry;
1349 struct inode *inode = dentry->d_inode;
1350 int error, rdlease_count = 0, wrlease_count = 0;
1351
1352 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1353 return -EACCES;
1354 if (!S_ISREG(inode->i_mode))
1355 return -EINVAL;
1356 error = security_file_lock(filp, arg);
1357 if (error)
1358 return error;
1359
1360 time_out_leases(inode);
1361
1362 BUG_ON(!(*flp)->fl_lmops->fl_break);
1363
1364 lease = *flp;
1365
1366 if (arg != F_UNLCK) {
1367 error = -ENOMEM;
1368 new_fl = locks_alloc_lock();
1369 if (new_fl == NULL)
1370 goto out;
1371
1372 error = -EAGAIN;
1373 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1374 goto out;
1375 if ((arg == F_WRLCK)
1376 && ((atomic_read(&dentry->d_count) > 1)
1377 || (atomic_read(&inode->i_count) > 1)))
1378 goto out;
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 for (before = &inode->i_flock;
1390 ((fl = *before) != NULL) && IS_LEASE(fl);
1391 before = &fl->fl_next) {
1392 if (lease->fl_lmops->fl_mylease(fl, lease))
1393 my_before = before;
1394 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1395
1396
1397
1398
1399
1400 wrlease_count++;
1401 else
1402 rdlease_count++;
1403 }
1404
1405 error = -EAGAIN;
1406 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1407 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1408 goto out;
1409
1410 if (my_before != NULL) {
1411 *flp = *my_before;
1412 error = lease->fl_lmops->fl_change(my_before, arg);
1413 goto out;
1414 }
1415
1416 error = 0;
1417 if (arg == F_UNLCK)
1418 goto out;
1419
1420 error = -EINVAL;
1421 if (!leases_enable)
1422 goto out;
1423
1424 locks_copy_lock(new_fl, lease);
1425 locks_insert_lock(before, new_fl);
1426
1427 *flp = new_fl;
1428 return 0;
1429
1430out:
1431 if (new_fl != NULL)
1432 locks_free_lock(new_fl);
1433 return error;
1434}
1435EXPORT_SYMBOL(generic_setlease);
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1465{
1466 int error;
1467
1468 lock_kernel();
1469 if (filp->f_op && filp->f_op->setlease)
1470 error = filp->f_op->setlease(filp, arg, lease);
1471 else
1472 error = generic_setlease(filp, arg, lease);
1473 unlock_kernel();
1474
1475 return error;
1476}
1477EXPORT_SYMBOL_GPL(vfs_setlease);
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1490{
1491 struct file_lock fl, *flp = &fl;
1492 struct inode *inode = filp->f_path.dentry->d_inode;
1493 int error;
1494
1495 locks_init_lock(&fl);
1496 error = lease_init(filp, arg, &fl);
1497 if (error)
1498 return error;
1499
1500 lock_kernel();
1501
1502 error = vfs_setlease(filp, arg, &flp);
1503 if (error || arg == F_UNLCK)
1504 goto out_unlock;
1505
1506 error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
1507 if (error < 0) {
1508
1509 flp->fl_type = F_UNLCK | F_INPROGRESS;
1510 flp->fl_break_time = jiffies - 10;
1511 time_out_leases(inode);
1512 goto out_unlock;
1513 }
1514
1515 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1516out_unlock:
1517 unlock_kernel();
1518 return error;
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1529{
1530 int error;
1531 might_sleep();
1532 for (;;) {
1533 error = flock_lock_file(filp, fl);
1534 if (error != FILE_LOCK_DEFERRED)
1535 break;
1536 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1537 if (!error)
1538 continue;
1539
1540 locks_delete_block(fl);
1541 break;
1542 }
1543 return error;
1544}
1545
1546EXPORT_SYMBOL(flock_lock_file_wait);
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1568{
1569 struct file *filp;
1570 struct file_lock *lock;
1571 int can_sleep, unlock;
1572 int error;
1573
1574 error = -EBADF;
1575 filp = fget(fd);
1576 if (!filp)
1577 goto out;
1578
1579 can_sleep = !(cmd & LOCK_NB);
1580 cmd &= ~LOCK_NB;
1581 unlock = (cmd == LOCK_UN);
1582
1583 if (!unlock && !(cmd & LOCK_MAND) &&
1584 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1585 goto out_putf;
1586
1587 error = flock_make_lock(filp, &lock, cmd);
1588 if (error)
1589 goto out_putf;
1590 if (can_sleep)
1591 lock->fl_flags |= FL_SLEEP;
1592
1593 error = security_file_lock(filp, cmd);
1594 if (error)
1595 goto out_free;
1596
1597 if (filp->f_op && filp->f_op->flock)
1598 error = filp->f_op->flock(filp,
1599 (can_sleep) ? F_SETLKW : F_SETLK,
1600 lock);
1601 else
1602 error = flock_lock_file_wait(filp, lock);
1603
1604 out_free:
1605 locks_free_lock(lock);
1606
1607 out_putf:
1608 fput(filp);
1609 out:
1610 return error;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int vfs_test_lock(struct file *filp, struct file_lock *fl)
1622{
1623 if (filp->f_op && filp->f_op->lock)
1624 return filp->f_op->lock(filp, F_GETLK, fl);
1625 posix_test_lock(filp, fl);
1626 return 0;
1627}
1628EXPORT_SYMBOL_GPL(vfs_test_lock);
1629
1630static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1631{
1632 flock->l_pid = fl->fl_pid;
1633#if BITS_PER_LONG == 32
1634
1635
1636
1637
1638 if (fl->fl_start > OFFT_OFFSET_MAX)
1639 return -EOVERFLOW;
1640 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1641 return -EOVERFLOW;
1642#endif
1643 flock->l_start = fl->fl_start;
1644 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1645 fl->fl_end - fl->fl_start + 1;
1646 flock->l_whence = 0;
1647 flock->l_type = fl->fl_type;
1648 return 0;
1649}
1650
1651#if BITS_PER_LONG == 32
1652static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1653{
1654 flock->l_pid = fl->fl_pid;
1655 flock->l_start = fl->fl_start;
1656 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1657 fl->fl_end - fl->fl_start + 1;
1658 flock->l_whence = 0;
1659 flock->l_type = fl->fl_type;
1660}
1661#endif
1662
1663
1664
1665
1666int fcntl_getlk(struct file *filp, struct flock __user *l)
1667{
1668 struct file_lock file_lock;
1669 struct flock flock;
1670 int error;
1671
1672 error = -EFAULT;
1673 if (copy_from_user(&flock, l, sizeof(flock)))
1674 goto out;
1675 error = -EINVAL;
1676 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1677 goto out;
1678
1679 error = flock_to_posix_lock(filp, &file_lock, &flock);
1680 if (error)
1681 goto out;
1682
1683 error = vfs_test_lock(filp, &file_lock);
1684 if (error)
1685 goto out;
1686
1687 flock.l_type = file_lock.fl_type;
1688 if (file_lock.fl_type != F_UNLCK) {
1689 error = posix_lock_to_flock(&flock, &file_lock);
1690 if (error)
1691 goto out;
1692 }
1693 error = -EFAULT;
1694 if (!copy_to_user(l, &flock, sizeof(flock)))
1695 error = 0;
1696out:
1697 return error;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1734{
1735 if (filp->f_op && filp->f_op->lock)
1736 return filp->f_op->lock(filp, cmd, fl);
1737 else
1738 return posix_lock_file(filp, fl, conf);
1739}
1740EXPORT_SYMBOL_GPL(vfs_lock_file);
1741
1742static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1743 struct file_lock *fl)
1744{
1745 int error;
1746
1747 error = security_file_lock(filp, fl->fl_type);
1748 if (error)
1749 return error;
1750
1751 for (;;) {
1752 error = vfs_lock_file(filp, cmd, fl, NULL);
1753 if (error != FILE_LOCK_DEFERRED)
1754 break;
1755 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1756 if (!error)
1757 continue;
1758
1759 locks_delete_block(fl);
1760 break;
1761 }
1762
1763 return error;
1764}
1765
1766
1767
1768
1769int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1770 struct flock __user *l)
1771{
1772 struct file_lock *file_lock = locks_alloc_lock();
1773 struct flock flock;
1774 struct inode *inode;
1775 struct file *f;
1776 int error;
1777
1778 if (file_lock == NULL)
1779 return -ENOLCK;
1780
1781
1782
1783
1784 error = -EFAULT;
1785 if (copy_from_user(&flock, l, sizeof(flock)))
1786 goto out;
1787
1788 inode = filp->f_path.dentry->d_inode;
1789
1790
1791
1792
1793 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1794 error = -EAGAIN;
1795 goto out;
1796 }
1797
1798again:
1799 error = flock_to_posix_lock(filp, file_lock, &flock);
1800 if (error)
1801 goto out;
1802 if (cmd == F_SETLKW) {
1803 file_lock->fl_flags |= FL_SLEEP;
1804 }
1805
1806 error = -EBADF;
1807 switch (flock.l_type) {
1808 case F_RDLCK:
1809 if (!(filp->f_mode & FMODE_READ))
1810 goto out;
1811 break;
1812 case F_WRLCK:
1813 if (!(filp->f_mode & FMODE_WRITE))
1814 goto out;
1815 break;
1816 case F_UNLCK:
1817 break;
1818 default:
1819 error = -EINVAL;
1820 goto out;
1821 }
1822
1823 error = do_lock_file_wait(filp, cmd, file_lock);
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 spin_lock(¤t->files->file_lock);
1835 f = fcheck(fd);
1836 spin_unlock(¤t->files->file_lock);
1837 if (!error && f != filp && flock.l_type != F_UNLCK) {
1838 flock.l_type = F_UNLCK;
1839 goto again;
1840 }
1841
1842out:
1843 locks_free_lock(file_lock);
1844 return error;
1845}
1846
1847#if BITS_PER_LONG == 32
1848
1849
1850
1851int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1852{
1853 struct file_lock file_lock;
1854 struct flock64 flock;
1855 int error;
1856
1857 error = -EFAULT;
1858 if (copy_from_user(&flock, l, sizeof(flock)))
1859 goto out;
1860 error = -EINVAL;
1861 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1862 goto out;
1863
1864 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1865 if (error)
1866 goto out;
1867
1868 error = vfs_test_lock(filp, &file_lock);
1869 if (error)
1870 goto out;
1871
1872 flock.l_type = file_lock.fl_type;
1873 if (file_lock.fl_type != F_UNLCK)
1874 posix_lock_to_flock64(&flock, &file_lock);
1875
1876 error = -EFAULT;
1877 if (!copy_to_user(l, &flock, sizeof(flock)))
1878 error = 0;
1879
1880out:
1881 return error;
1882}
1883
1884
1885
1886
1887int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1888 struct flock64 __user *l)
1889{
1890 struct file_lock *file_lock = locks_alloc_lock();
1891 struct flock64 flock;
1892 struct inode *inode;
1893 struct file *f;
1894 int error;
1895
1896 if (file_lock == NULL)
1897 return -ENOLCK;
1898
1899
1900
1901
1902 error = -EFAULT;
1903 if (copy_from_user(&flock, l, sizeof(flock)))
1904 goto out;
1905
1906 inode = filp->f_path.dentry->d_inode;
1907
1908
1909
1910
1911 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1912 error = -EAGAIN;
1913 goto out;
1914 }
1915
1916again:
1917 error = flock64_to_posix_lock(filp, file_lock, &flock);
1918 if (error)
1919 goto out;
1920 if (cmd == F_SETLKW64) {
1921 file_lock->fl_flags |= FL_SLEEP;
1922 }
1923
1924 error = -EBADF;
1925 switch (flock.l_type) {
1926 case F_RDLCK:
1927 if (!(filp->f_mode & FMODE_READ))
1928 goto out;
1929 break;
1930 case F_WRLCK:
1931 if (!(filp->f_mode & FMODE_WRITE))
1932 goto out;
1933 break;
1934 case F_UNLCK:
1935 break;
1936 default:
1937 error = -EINVAL;
1938 goto out;
1939 }
1940
1941 error = do_lock_file_wait(filp, cmd, file_lock);
1942
1943
1944
1945
1946
1947 spin_lock(¤t->files->file_lock);
1948 f = fcheck(fd);
1949 spin_unlock(¤t->files->file_lock);
1950 if (!error && f != filp && flock.l_type != F_UNLCK) {
1951 flock.l_type = F_UNLCK;
1952 goto again;
1953 }
1954
1955out:
1956 locks_free_lock(file_lock);
1957 return error;
1958}
1959#endif
1960
1961
1962
1963
1964
1965
1966void locks_remove_posix(struct file *filp, fl_owner_t owner)
1967{
1968 struct file_lock lock;
1969
1970
1971
1972
1973
1974
1975 if (!filp->f_path.dentry->d_inode->i_flock)
1976 return;
1977
1978 lock.fl_type = F_UNLCK;
1979 lock.fl_flags = FL_POSIX | FL_CLOSE;
1980 lock.fl_start = 0;
1981 lock.fl_end = OFFSET_MAX;
1982 lock.fl_owner = owner;
1983 lock.fl_pid = current->tgid;
1984 lock.fl_file = filp;
1985 lock.fl_ops = NULL;
1986 lock.fl_lmops = NULL;
1987
1988 vfs_lock_file(filp, F_SETLK, &lock, NULL);
1989
1990 if (lock.fl_ops && lock.fl_ops->fl_release_private)
1991 lock.fl_ops->fl_release_private(&lock);
1992}
1993
1994EXPORT_SYMBOL(locks_remove_posix);
1995
1996
1997
1998
1999void locks_remove_flock(struct file *filp)
2000{
2001 struct inode * inode = filp->f_path.dentry->d_inode;
2002 struct file_lock *fl;
2003 struct file_lock **before;
2004
2005 if (!inode->i_flock)
2006 return;
2007
2008 if (filp->f_op && filp->f_op->flock) {
2009 struct file_lock fl = {
2010 .fl_pid = current->tgid,
2011 .fl_file = filp,
2012 .fl_flags = FL_FLOCK,
2013 .fl_type = F_UNLCK,
2014 .fl_end = OFFSET_MAX,
2015 };
2016 filp->f_op->flock(filp, F_SETLKW, &fl);
2017 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2018 fl.fl_ops->fl_release_private(&fl);
2019 }
2020
2021 lock_kernel();
2022 before = &inode->i_flock;
2023
2024 while ((fl = *before) != NULL) {
2025 if (fl->fl_file == filp) {
2026 if (IS_FLOCK(fl)) {
2027 locks_delete_lock(before);
2028 continue;
2029 }
2030 if (IS_LEASE(fl)) {
2031 lease_modify(before, F_UNLCK);
2032 continue;
2033 }
2034
2035 BUG();
2036 }
2037 before = &fl->fl_next;
2038 }
2039 unlock_kernel();
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049int
2050posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2051{
2052 int status = 0;
2053
2054 lock_kernel();
2055 if (waiter->fl_next)
2056 __locks_delete_block(waiter);
2057 else
2058 status = -ENOENT;
2059 unlock_kernel();
2060 return status;
2061}
2062
2063EXPORT_SYMBOL(posix_unblock_lock);
2064
2065
2066
2067
2068
2069
2070
2071
2072int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2073{
2074 if (filp->f_op && filp->f_op->lock)
2075 return filp->f_op->lock(filp, F_CANCELLK, fl);
2076 return 0;
2077}
2078
2079EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2080
2081#ifdef CONFIG_PROC_FS
2082#include <linux/proc_fs.h>
2083#include <linux/seq_file.h>
2084
2085static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2086 int id, char *pfx)
2087{
2088 struct inode *inode = NULL;
2089 unsigned int fl_pid;
2090
2091 if (fl->fl_nspid)
2092 fl_pid = pid_vnr(fl->fl_nspid);
2093 else
2094 fl_pid = fl->fl_pid;
2095
2096 if (fl->fl_file != NULL)
2097 inode = fl->fl_file->f_path.dentry->d_inode;
2098
2099 seq_printf(f, "%d:%s ", id, pfx);
2100 if (IS_POSIX(fl)) {
2101 seq_printf(f, "%6s %s ",
2102 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2103 (inode == NULL) ? "*NOINODE*" :
2104 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2105 } else if (IS_FLOCK(fl)) {
2106 if (fl->fl_type & LOCK_MAND) {
2107 seq_printf(f, "FLOCK MSNFS ");
2108 } else {
2109 seq_printf(f, "FLOCK ADVISORY ");
2110 }
2111 } else if (IS_LEASE(fl)) {
2112 seq_printf(f, "LEASE ");
2113 if (fl->fl_type & F_INPROGRESS)
2114 seq_printf(f, "BREAKING ");
2115 else if (fl->fl_file)
2116 seq_printf(f, "ACTIVE ");
2117 else
2118 seq_printf(f, "BREAKER ");
2119 } else {
2120 seq_printf(f, "UNKNOWN UNKNOWN ");
2121 }
2122 if (fl->fl_type & LOCK_MAND) {
2123 seq_printf(f, "%s ",
2124 (fl->fl_type & LOCK_READ)
2125 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2126 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2127 } else {
2128 seq_printf(f, "%s ",
2129 (fl->fl_type & F_INPROGRESS)
2130 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2131 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2132 }
2133 if (inode) {
2134#ifdef WE_CAN_BREAK_LSLK_NOW
2135 seq_printf(f, "%d %s:%ld ", fl_pid,
2136 inode->i_sb->s_id, inode->i_ino);
2137#else
2138
2139 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2140 MAJOR(inode->i_sb->s_dev),
2141 MINOR(inode->i_sb->s_dev), inode->i_ino);
2142#endif
2143 } else {
2144 seq_printf(f, "%d <none>:0 ", fl_pid);
2145 }
2146 if (IS_POSIX(fl)) {
2147 if (fl->fl_end == OFFSET_MAX)
2148 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2149 else
2150 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2151 } else {
2152 seq_printf(f, "0 EOF\n");
2153 }
2154}
2155
2156static int locks_show(struct seq_file *f, void *v)
2157{
2158 struct file_lock *fl, *bfl;
2159
2160 fl = list_entry(v, struct file_lock, fl_link);
2161
2162 lock_get_status(f, fl, (long)f->private, "");
2163
2164 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2165 lock_get_status(f, bfl, (long)f->private, " ->");
2166
2167 f->private++;
2168 return 0;
2169}
2170
2171static void *locks_start(struct seq_file *f, loff_t *pos)
2172{
2173 lock_kernel();
2174 f->private = (void *)1;
2175 return seq_list_start(&file_lock_list, *pos);
2176}
2177
2178static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2179{
2180 return seq_list_next(v, &file_lock_list, pos);
2181}
2182
2183static void locks_stop(struct seq_file *f, void *v)
2184{
2185 unlock_kernel();
2186}
2187
2188static const struct seq_operations locks_seq_operations = {
2189 .start = locks_start,
2190 .next = locks_next,
2191 .stop = locks_stop,
2192 .show = locks_show,
2193};
2194
2195static int locks_open(struct inode *inode, struct file *filp)
2196{
2197 return seq_open(filp, &locks_seq_operations);
2198}
2199
2200static const struct file_operations proc_locks_operations = {
2201 .open = locks_open,
2202 .read = seq_read,
2203 .llseek = seq_lseek,
2204 .release = seq_release,
2205};
2206
2207static int __init proc_locks_init(void)
2208{
2209 proc_create("locks", 0, NULL, &proc_locks_operations);
2210 return 0;
2211}
2212module_init(proc_locks_init);
2213#endif
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2229{
2230 struct file_lock *fl;
2231 int result = 1;
2232 lock_kernel();
2233 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2234 if (IS_POSIX(fl)) {
2235 if (fl->fl_type == F_RDLCK)
2236 continue;
2237 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2238 continue;
2239 } else if (IS_FLOCK(fl)) {
2240 if (!(fl->fl_type & LOCK_MAND))
2241 continue;
2242 if (fl->fl_type & LOCK_READ)
2243 continue;
2244 } else
2245 continue;
2246 result = 0;
2247 break;
2248 }
2249 unlock_kernel();
2250 return result;
2251}
2252
2253EXPORT_SYMBOL(lock_may_read);
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2269{
2270 struct file_lock *fl;
2271 int result = 1;
2272 lock_kernel();
2273 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2274 if (IS_POSIX(fl)) {
2275 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2276 continue;
2277 } else if (IS_FLOCK(fl)) {
2278 if (!(fl->fl_type & LOCK_MAND))
2279 continue;
2280 if (fl->fl_type & LOCK_WRITE)
2281 continue;
2282 } else
2283 continue;
2284 result = 0;
2285 break;
2286 }
2287 unlock_kernel();
2288 return result;
2289}
2290
2291EXPORT_SYMBOL(lock_may_write);
2292
2293static int __init filelock_init(void)
2294{
2295 filelock_cache = kmem_cache_create("file_lock_cache",
2296 sizeof(struct file_lock), 0, SLAB_PANIC,
2297 init_once);
2298 return 0;
2299}
2300
2301core_initcall(filelock_init);
2302