1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/sched/signal.h>
29
30#include "vmwgfx_drv.h"
31
32#define VMW_FENCE_WRAP (1 << 31)
33
34struct vmw_fence_manager {
35 int num_fence_objects;
36 struct vmw_private *dev_priv;
37 spinlock_t lock;
38 struct list_head fence_list;
39 struct work_struct work;
40 u32 user_fence_size;
41 u32 fence_size;
42 u32 event_fence_action_size;
43 bool fifo_down;
44 struct list_head cleanup_list;
45 uint32_t pending_actions[VMW_ACTION_MAX];
46 struct mutex goal_irq_mutex;
47 bool goal_irq_on;
48 bool seqno_valid;
49
50 u64 ctx;
51};
52
53struct vmw_user_fence {
54 struct ttm_base_object base;
55 struct vmw_fence_obj fence;
56};
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71struct vmw_event_fence_action {
72 struct vmw_fence_action action;
73
74 struct drm_pending_event *event;
75 struct vmw_fence_obj *fence;
76 struct drm_device *dev;
77
78 uint32_t *tv_sec;
79 uint32_t *tv_usec;
80};
81
82static struct vmw_fence_manager *
83fman_from_fence(struct vmw_fence_obj *fence)
84{
85 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
86}
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110static void vmw_fence_obj_destroy(struct dma_fence *f)
111{
112 struct vmw_fence_obj *fence =
113 container_of(f, struct vmw_fence_obj, base);
114
115 struct vmw_fence_manager *fman = fman_from_fence(fence);
116
117 spin_lock(&fman->lock);
118 list_del_init(&fence->head);
119 --fman->num_fence_objects;
120 spin_unlock(&fman->lock);
121 fence->destroy(fence);
122}
123
124static const char *vmw_fence_get_driver_name(struct dma_fence *f)
125{
126 return "vmwgfx";
127}
128
129static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
130{
131 return "svga";
132}
133
134static bool vmw_fence_enable_signaling(struct dma_fence *f)
135{
136 struct vmw_fence_obj *fence =
137 container_of(f, struct vmw_fence_obj, base);
138
139 struct vmw_fence_manager *fman = fman_from_fence(fence);
140 struct vmw_private *dev_priv = fman->dev_priv;
141
142 u32 seqno = vmw_fence_read(dev_priv);
143 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
144 return false;
145
146 return true;
147}
148
149struct vmwgfx_wait_cb {
150 struct dma_fence_cb base;
151 struct task_struct *task;
152};
153
154static void
155vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
156{
157 struct vmwgfx_wait_cb *wait =
158 container_of(cb, struct vmwgfx_wait_cb, base);
159
160 wake_up_process(wait->task);
161}
162
163static void __vmw_fences_update(struct vmw_fence_manager *fman);
164
165static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
166{
167 struct vmw_fence_obj *fence =
168 container_of(f, struct vmw_fence_obj, base);
169
170 struct vmw_fence_manager *fman = fman_from_fence(fence);
171 struct vmw_private *dev_priv = fman->dev_priv;
172 struct vmwgfx_wait_cb cb;
173 long ret = timeout;
174
175 if (likely(vmw_fence_obj_signaled(fence)))
176 return timeout;
177
178 vmw_seqno_waiter_add(dev_priv);
179
180 spin_lock(f->lock);
181
182 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
183 goto out;
184
185 if (intr && signal_pending(current)) {
186 ret = -ERESTARTSYS;
187 goto out;
188 }
189
190 cb.base.func = vmwgfx_wait_cb;
191 cb.task = current;
192 list_add(&cb.base.node, &f->cb_list);
193
194 for (;;) {
195 __vmw_fences_update(fman);
196
197
198
199
200
201
202 if (intr)
203 __set_current_state(TASK_INTERRUPTIBLE);
204 else
205 __set_current_state(TASK_UNINTERRUPTIBLE);
206
207 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
208 if (ret == 0 && timeout > 0)
209 ret = 1;
210 break;
211 }
212
213 if (intr && signal_pending(current)) {
214 ret = -ERESTARTSYS;
215 break;
216 }
217
218 if (ret == 0)
219 break;
220
221 spin_unlock(f->lock);
222
223 ret = schedule_timeout(ret);
224
225 spin_lock(f->lock);
226 }
227 __set_current_state(TASK_RUNNING);
228 if (!list_empty(&cb.base.node))
229 list_del(&cb.base.node);
230
231out:
232 spin_unlock(f->lock);
233
234 vmw_seqno_waiter_remove(dev_priv);
235
236 return ret;
237}
238
239static const struct dma_fence_ops vmw_fence_ops = {
240 .get_driver_name = vmw_fence_get_driver_name,
241 .get_timeline_name = vmw_fence_get_timeline_name,
242 .enable_signaling = vmw_fence_enable_signaling,
243 .wait = vmw_fence_wait,
244 .release = vmw_fence_obj_destroy,
245};
246
247
248
249
250
251
252
253
254static void vmw_fence_work_func(struct work_struct *work)
255{
256 struct vmw_fence_manager *fman =
257 container_of(work, struct vmw_fence_manager, work);
258 struct list_head list;
259 struct vmw_fence_action *action, *next_action;
260 bool seqno_valid;
261
262 do {
263 INIT_LIST_HEAD(&list);
264 mutex_lock(&fman->goal_irq_mutex);
265
266 spin_lock(&fman->lock);
267 list_splice_init(&fman->cleanup_list, &list);
268 seqno_valid = fman->seqno_valid;
269 spin_unlock(&fman->lock);
270
271 if (!seqno_valid && fman->goal_irq_on) {
272 fman->goal_irq_on = false;
273 vmw_goal_waiter_remove(fman->dev_priv);
274 }
275 mutex_unlock(&fman->goal_irq_mutex);
276
277 if (list_empty(&list))
278 return;
279
280
281
282
283
284
285
286 list_for_each_entry_safe(action, next_action, &list, head) {
287 list_del_init(&action->head);
288 if (action->cleanup)
289 action->cleanup(action);
290 }
291 } while (1);
292}
293
294struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
295{
296 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
297
298 if (unlikely(!fman))
299 return NULL;
300
301 fman->dev_priv = dev_priv;
302 spin_lock_init(&fman->lock);
303 INIT_LIST_HEAD(&fman->fence_list);
304 INIT_LIST_HEAD(&fman->cleanup_list);
305 INIT_WORK(&fman->work, &vmw_fence_work_func);
306 fman->fifo_down = true;
307 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
308 TTM_OBJ_EXTRA_SIZE;
309 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
310 fman->event_fence_action_size =
311 ttm_round_pot(sizeof(struct vmw_event_fence_action));
312 mutex_init(&fman->goal_irq_mutex);
313 fman->ctx = dma_fence_context_alloc(1);
314
315 return fman;
316}
317
318void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
319{
320 bool lists_empty;
321
322 (void) cancel_work_sync(&fman->work);
323
324 spin_lock(&fman->lock);
325 lists_empty = list_empty(&fman->fence_list) &&
326 list_empty(&fman->cleanup_list);
327 spin_unlock(&fman->lock);
328
329 BUG_ON(!lists_empty);
330 kfree(fman);
331}
332
333static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
334 struct vmw_fence_obj *fence, u32 seqno,
335 void (*destroy) (struct vmw_fence_obj *fence))
336{
337 int ret = 0;
338
339 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
340 fman->ctx, seqno);
341 INIT_LIST_HEAD(&fence->seq_passed_actions);
342 fence->destroy = destroy;
343
344 spin_lock(&fman->lock);
345 if (unlikely(fman->fifo_down)) {
346 ret = -EBUSY;
347 goto out_unlock;
348 }
349 list_add_tail(&fence->head, &fman->fence_list);
350 ++fman->num_fence_objects;
351
352out_unlock:
353 spin_unlock(&fman->lock);
354 return ret;
355
356}
357
358static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
359 struct list_head *list)
360{
361 struct vmw_fence_action *action, *next_action;
362
363 list_for_each_entry_safe(action, next_action, list, head) {
364 list_del_init(&action->head);
365 fman->pending_actions[action->type]--;
366 if (action->seq_passed != NULL)
367 action->seq_passed(action);
368
369
370
371
372
373
374 list_add_tail(&action->head, &fman->cleanup_list);
375 }
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
395 u32 passed_seqno)
396{
397 u32 goal_seqno;
398 struct vmw_fence_obj *fence;
399
400 if (likely(!fman->seqno_valid))
401 return false;
402
403 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
404 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
405 return false;
406
407 fman->seqno_valid = false;
408 list_for_each_entry(fence, &fman->fence_list, head) {
409 if (!list_empty(&fence->seq_passed_actions)) {
410 fman->seqno_valid = true;
411 vmw_fifo_mem_write(fman->dev_priv,
412 SVGA_FIFO_FENCE_GOAL,
413 fence->base.seqno);
414 break;
415 }
416 }
417
418 return true;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
438{
439 struct vmw_fence_manager *fman = fman_from_fence(fence);
440 u32 goal_seqno;
441
442 if (dma_fence_is_signaled_locked(&fence->base))
443 return false;
444
445 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
446 if (likely(fman->seqno_valid &&
447 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
448 return false;
449
450 vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
451 fence->base.seqno);
452 fman->seqno_valid = true;
453
454 return true;
455}
456
457static void __vmw_fences_update(struct vmw_fence_manager *fman)
458{
459 struct vmw_fence_obj *fence, *next_fence;
460 struct list_head action_list;
461 bool needs_rerun;
462 uint32_t seqno, new_seqno;
463
464 seqno = vmw_fence_read(fman->dev_priv);
465rerun:
466 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
467 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
468 list_del_init(&fence->head);
469 dma_fence_signal_locked(&fence->base);
470 INIT_LIST_HEAD(&action_list);
471 list_splice_init(&fence->seq_passed_actions,
472 &action_list);
473 vmw_fences_perform_actions(fman, &action_list);
474 } else
475 break;
476 }
477
478
479
480
481
482
483
484 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
485 if (unlikely(needs_rerun)) {
486 new_seqno = vmw_fence_read(fman->dev_priv);
487 if (new_seqno != seqno) {
488 seqno = new_seqno;
489 goto rerun;
490 }
491 }
492
493 if (!list_empty(&fman->cleanup_list))
494 (void) schedule_work(&fman->work);
495}
496
497void vmw_fences_update(struct vmw_fence_manager *fman)
498{
499 spin_lock(&fman->lock);
500 __vmw_fences_update(fman);
501 spin_unlock(&fman->lock);
502}
503
504bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
505{
506 struct vmw_fence_manager *fman = fman_from_fence(fence);
507
508 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
509 return true;
510
511 vmw_fences_update(fman);
512
513 return dma_fence_is_signaled(&fence->base);
514}
515
516int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
517 bool interruptible, unsigned long timeout)
518{
519 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
520
521 if (likely(ret > 0))
522 return 0;
523 else if (ret == 0)
524 return -EBUSY;
525 else
526 return ret;
527}
528
529static void vmw_fence_destroy(struct vmw_fence_obj *fence)
530{
531 dma_fence_free(&fence->base);
532}
533
534int vmw_fence_create(struct vmw_fence_manager *fman,
535 uint32_t seqno,
536 struct vmw_fence_obj **p_fence)
537{
538 struct vmw_fence_obj *fence;
539 int ret;
540
541 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
542 if (unlikely(!fence))
543 return -ENOMEM;
544
545 ret = vmw_fence_obj_init(fman, fence, seqno,
546 vmw_fence_destroy);
547 if (unlikely(ret != 0))
548 goto out_err_init;
549
550 *p_fence = fence;
551 return 0;
552
553out_err_init:
554 kfree(fence);
555 return ret;
556}
557
558
559static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
560{
561 struct vmw_user_fence *ufence =
562 container_of(fence, struct vmw_user_fence, fence);
563 struct vmw_fence_manager *fman = fman_from_fence(fence);
564
565 ttm_base_object_kfree(ufence, base);
566
567
568
569 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
570 fman->user_fence_size);
571}
572
573static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
574{
575 struct ttm_base_object *base = *p_base;
576 struct vmw_user_fence *ufence =
577 container_of(base, struct vmw_user_fence, base);
578 struct vmw_fence_obj *fence = &ufence->fence;
579
580 *p_base = NULL;
581 vmw_fence_obj_unreference(&fence);
582}
583
584int vmw_user_fence_create(struct drm_file *file_priv,
585 struct vmw_fence_manager *fman,
586 uint32_t seqno,
587 struct vmw_fence_obj **p_fence,
588 uint32_t *p_handle)
589{
590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591 struct vmw_user_fence *ufence;
592 struct vmw_fence_obj *tmp;
593 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
594 struct ttm_operation_ctx ctx = {
595 .interruptible = false,
596 .no_wait_gpu = false
597 };
598 int ret;
599
600
601
602
603
604
605 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
606 &ctx);
607 if (unlikely(ret != 0))
608 return ret;
609
610 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
611 if (unlikely(!ufence)) {
612 ret = -ENOMEM;
613 goto out_no_object;
614 }
615
616 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
617 vmw_user_fence_destroy);
618 if (unlikely(ret != 0)) {
619 kfree(ufence);
620 goto out_no_object;
621 }
622
623
624
625
626
627 tmp = vmw_fence_obj_reference(&ufence->fence);
628 ret = ttm_base_object_init(tfile, &ufence->base, false,
629 VMW_RES_FENCE,
630 &vmw_user_fence_base_release, NULL);
631
632
633 if (unlikely(ret != 0)) {
634
635
636
637 vmw_fence_obj_unreference(&tmp);
638 goto out_err;
639 }
640
641 *p_fence = &ufence->fence;
642 *p_handle = ufence->base.handle;
643
644 return 0;
645out_err:
646 tmp = &ufence->fence;
647 vmw_fence_obj_unreference(&tmp);
648out_no_object:
649 ttm_mem_global_free(mem_glob, fman->user_fence_size);
650 return ret;
651}
652
653
654
655
656
657
658
659
660
661
662
663int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
664 struct dma_fence *fence)
665{
666 struct dma_fence_array *fence_array;
667 int ret = 0;
668 int i;
669
670
671 if (dma_fence_is_signaled(fence))
672 return 0;
673
674 if (!dma_fence_is_array(fence))
675 return dma_fence_wait(fence, true);
676
677
678
679
680
681
682
683
684
685 fence_array = to_dma_fence_array(fence);
686 for (i = 0; i < fence_array->num_fences; i++) {
687 struct dma_fence *child = fence_array->fences[i];
688
689 ret = dma_fence_wait(child, true);
690
691 if (ret < 0)
692 return ret;
693 }
694
695 return 0;
696}
697
698
699
700
701
702
703void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
704{
705 struct list_head action_list;
706 int ret;
707
708
709
710
711
712
713 spin_lock(&fman->lock);
714 fman->fifo_down = true;
715 while (!list_empty(&fman->fence_list)) {
716 struct vmw_fence_obj *fence =
717 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
718 head);
719 dma_fence_get(&fence->base);
720 spin_unlock(&fman->lock);
721
722 ret = vmw_fence_obj_wait(fence, false, false,
723 VMW_FENCE_WAIT_TIMEOUT);
724
725 if (unlikely(ret != 0)) {
726 list_del_init(&fence->head);
727 dma_fence_signal(&fence->base);
728 INIT_LIST_HEAD(&action_list);
729 list_splice_init(&fence->seq_passed_actions,
730 &action_list);
731 vmw_fences_perform_actions(fman, &action_list);
732 }
733
734 BUG_ON(!list_empty(&fence->head));
735 dma_fence_put(&fence->base);
736 spin_lock(&fman->lock);
737 }
738 spin_unlock(&fman->lock);
739}
740
741void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
742{
743 spin_lock(&fman->lock);
744 fman->fifo_down = false;
745 spin_unlock(&fman->lock);
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762static struct ttm_base_object *
763vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
764{
765 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
766
767 if (!base) {
768 pr_err("Invalid fence object handle 0x%08lx.\n",
769 (unsigned long)handle);
770 return ERR_PTR(-EINVAL);
771 }
772
773 if (base->refcount_release != vmw_user_fence_base_release) {
774 pr_err("Invalid fence object handle 0x%08lx.\n",
775 (unsigned long)handle);
776 ttm_base_object_unref(&base);
777 return ERR_PTR(-EINVAL);
778 }
779
780 return base;
781}
782
783
784int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
785 struct drm_file *file_priv)
786{
787 struct drm_vmw_fence_wait_arg *arg =
788 (struct drm_vmw_fence_wait_arg *)data;
789 unsigned long timeout;
790 struct ttm_base_object *base;
791 struct vmw_fence_obj *fence;
792 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
793 int ret;
794 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
795
796
797
798
799
800
801 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
802 (wait_timeout >> 26);
803
804 if (!arg->cookie_valid) {
805 arg->cookie_valid = 1;
806 arg->kernel_cookie = jiffies + wait_timeout;
807 }
808
809 base = vmw_fence_obj_lookup(tfile, arg->handle);
810 if (IS_ERR(base))
811 return PTR_ERR(base);
812
813 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
814
815 timeout = jiffies;
816 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
817 ret = ((vmw_fence_obj_signaled(fence)) ?
818 0 : -EBUSY);
819 goto out;
820 }
821
822 timeout = (unsigned long)arg->kernel_cookie - timeout;
823
824 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
825
826out:
827 ttm_base_object_unref(&base);
828
829
830
831
832
833 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
834 return ttm_ref_object_base_unref(tfile, arg->handle,
835 TTM_REF_USAGE);
836 return ret;
837}
838
839int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
840 struct drm_file *file_priv)
841{
842 struct drm_vmw_fence_signaled_arg *arg =
843 (struct drm_vmw_fence_signaled_arg *) data;
844 struct ttm_base_object *base;
845 struct vmw_fence_obj *fence;
846 struct vmw_fence_manager *fman;
847 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
848 struct vmw_private *dev_priv = vmw_priv(dev);
849
850 base = vmw_fence_obj_lookup(tfile, arg->handle);
851 if (IS_ERR(base))
852 return PTR_ERR(base);
853
854 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
855 fman = fman_from_fence(fence);
856
857 arg->signaled = vmw_fence_obj_signaled(fence);
858
859 arg->signaled_flags = arg->flags;
860 spin_lock(&fman->lock);
861 arg->passed_seqno = dev_priv->last_read_seqno;
862 spin_unlock(&fman->lock);
863
864 ttm_base_object_unref(&base);
865
866 return 0;
867}
868
869
870int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv)
872{
873 struct drm_vmw_fence_arg *arg =
874 (struct drm_vmw_fence_arg *) data;
875
876 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
877 arg->handle,
878 TTM_REF_USAGE);
879}
880
881
882
883
884
885
886
887
888
889
890
891static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
892{
893 struct vmw_event_fence_action *eaction =
894 container_of(action, struct vmw_event_fence_action, action);
895 struct drm_device *dev = eaction->dev;
896 struct drm_pending_event *event = eaction->event;
897
898 if (unlikely(event == NULL))
899 return;
900
901 spin_lock_irq(&dev->event_lock);
902
903 if (likely(eaction->tv_sec != NULL)) {
904 struct timespec64 ts;
905
906 ktime_get_ts64(&ts);
907
908 *eaction->tv_sec = ts.tv_sec;
909 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
910 }
911
912 drm_send_event_locked(dev, eaction->event);
913 eaction->event = NULL;
914 spin_unlock_irq(&dev->event_lock);
915}
916
917
918
919
920
921
922
923
924
925
926static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
927{
928 struct vmw_event_fence_action *eaction =
929 container_of(action, struct vmw_event_fence_action, action);
930
931 vmw_fence_obj_unreference(&eaction->fence);
932 kfree(eaction);
933}
934
935
936
937
938
939
940
941
942
943
944
945static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
946 struct vmw_fence_action *action)
947{
948 struct vmw_fence_manager *fman = fman_from_fence(fence);
949 bool run_update = false;
950
951 mutex_lock(&fman->goal_irq_mutex);
952 spin_lock(&fman->lock);
953
954 fman->pending_actions[action->type]++;
955 if (dma_fence_is_signaled_locked(&fence->base)) {
956 struct list_head action_list;
957
958 INIT_LIST_HEAD(&action_list);
959 list_add_tail(&action->head, &action_list);
960 vmw_fences_perform_actions(fman, &action_list);
961 } else {
962 list_add_tail(&action->head, &fence->seq_passed_actions);
963
964
965
966
967
968 run_update = vmw_fence_goal_check_locked(fence);
969 }
970
971 spin_unlock(&fman->lock);
972
973 if (run_update) {
974 if (!fman->goal_irq_on) {
975 fman->goal_irq_on = true;
976 vmw_goal_waiter_add(fman->dev_priv);
977 }
978 vmw_fences_update(fman);
979 }
980 mutex_unlock(&fman->goal_irq_mutex);
981
982}
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003int vmw_event_fence_action_queue(struct drm_file *file_priv,
1004 struct vmw_fence_obj *fence,
1005 struct drm_pending_event *event,
1006 uint32_t *tv_sec,
1007 uint32_t *tv_usec,
1008 bool interruptible)
1009{
1010 struct vmw_event_fence_action *eaction;
1011 struct vmw_fence_manager *fman = fman_from_fence(fence);
1012
1013 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1014 if (unlikely(!eaction))
1015 return -ENOMEM;
1016
1017 eaction->event = event;
1018
1019 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1020 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1021 eaction->action.type = VMW_ACTION_EVENT;
1022
1023 eaction->fence = vmw_fence_obj_reference(fence);
1024 eaction->dev = &fman->dev_priv->drm;
1025 eaction->tv_sec = tv_sec;
1026 eaction->tv_usec = tv_usec;
1027
1028 vmw_fence_obj_add_action(fence, &eaction->action);
1029
1030 return 0;
1031}
1032
1033struct vmw_event_fence_pending {
1034 struct drm_pending_event base;
1035 struct drm_vmw_event_fence event;
1036};
1037
1038static int vmw_event_fence_action_create(struct drm_file *file_priv,
1039 struct vmw_fence_obj *fence,
1040 uint32_t flags,
1041 uint64_t user_data,
1042 bool interruptible)
1043{
1044 struct vmw_event_fence_pending *event;
1045 struct vmw_fence_manager *fman = fman_from_fence(fence);
1046 struct drm_device *dev = &fman->dev_priv->drm;
1047 int ret;
1048
1049 event = kzalloc(sizeof(*event), GFP_KERNEL);
1050 if (unlikely(!event)) {
1051 DRM_ERROR("Failed to allocate an event.\n");
1052 ret = -ENOMEM;
1053 goto out_no_space;
1054 }
1055
1056 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1057 event->event.base.length = sizeof(*event);
1058 event->event.user_data = user_data;
1059
1060 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1061
1062 if (unlikely(ret != 0)) {
1063 DRM_ERROR("Failed to allocate event space for this file.\n");
1064 kfree(event);
1065 goto out_no_space;
1066 }
1067
1068 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1069 ret = vmw_event_fence_action_queue(file_priv, fence,
1070 &event->base,
1071 &event->event.tv_sec,
1072 &event->event.tv_usec,
1073 interruptible);
1074 else
1075 ret = vmw_event_fence_action_queue(file_priv, fence,
1076 &event->base,
1077 NULL,
1078 NULL,
1079 interruptible);
1080 if (ret != 0)
1081 goto out_no_queue;
1082
1083 return 0;
1084
1085out_no_queue:
1086 drm_event_cancel_free(dev, &event->base);
1087out_no_space:
1088 return ret;
1089}
1090
1091int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1092 struct drm_file *file_priv)
1093{
1094 struct vmw_private *dev_priv = vmw_priv(dev);
1095 struct drm_vmw_fence_event_arg *arg =
1096 (struct drm_vmw_fence_event_arg *) data;
1097 struct vmw_fence_obj *fence = NULL;
1098 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1099 struct ttm_object_file *tfile = vmw_fp->tfile;
1100 struct drm_vmw_fence_rep __user *user_fence_rep =
1101 (struct drm_vmw_fence_rep __user *)(unsigned long)
1102 arg->fence_rep;
1103 uint32_t handle;
1104 int ret;
1105
1106
1107
1108
1109
1110
1111 if (arg->handle) {
1112 struct ttm_base_object *base =
1113 vmw_fence_obj_lookup(tfile, arg->handle);
1114
1115 if (IS_ERR(base))
1116 return PTR_ERR(base);
1117
1118 fence = &(container_of(base, struct vmw_user_fence,
1119 base)->fence);
1120 (void) vmw_fence_obj_reference(fence);
1121
1122 if (user_fence_rep != NULL) {
1123 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1124 TTM_REF_USAGE, NULL, false);
1125 if (unlikely(ret != 0)) {
1126 DRM_ERROR("Failed to reference a fence "
1127 "object.\n");
1128 goto out_no_ref_obj;
1129 }
1130 handle = base->handle;
1131 }
1132 ttm_base_object_unref(&base);
1133 }
1134
1135
1136
1137
1138 if (!fence) {
1139 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1140 &fence,
1141 (user_fence_rep) ?
1142 &handle : NULL);
1143 if (unlikely(ret != 0)) {
1144 DRM_ERROR("Fence event failed to create fence.\n");
1145 return ret;
1146 }
1147 }
1148
1149 BUG_ON(fence == NULL);
1150
1151 ret = vmw_event_fence_action_create(file_priv, fence,
1152 arg->flags,
1153 arg->user_data,
1154 true);
1155 if (unlikely(ret != 0)) {
1156 if (ret != -ERESTARTSYS)
1157 DRM_ERROR("Failed to attach event to fence.\n");
1158 goto out_no_create;
1159 }
1160
1161 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1162 handle, -1, NULL);
1163 vmw_fence_obj_unreference(&fence);
1164 return 0;
1165out_no_create:
1166 if (user_fence_rep != NULL)
1167 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1168out_no_ref_obj:
1169 vmw_fence_obj_unreference(&fence);
1170 return ret;
1171}
1172