1
2
3
4
5
6
7#include <linux/debugobjects.h>
8
9#include "gt/intel_context.h"
10#include "gt/intel_engine_heartbeat.h"
11#include "gt/intel_engine_pm.h"
12#include "gt/intel_ring.h"
13
14#include "i915_drv.h"
15#include "i915_active.h"
16#include "i915_globals.h"
17
18
19
20
21
22
23
24
25static struct i915_global_active {
26 struct i915_global base;
27 struct kmem_cache *slab_cache;
28} global;
29
30struct active_node {
31 struct rb_node node;
32 struct i915_active_fence base;
33 struct i915_active *ref;
34 u64 timeline;
35};
36
37#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
38
39static inline struct active_node *
40node_from_active(struct i915_active_fence *active)
41{
42 return container_of(active, struct active_node, base);
43}
44
45#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
46
47static inline bool is_barrier(const struct i915_active_fence *active)
48{
49 return IS_ERR(rcu_access_pointer(active->fence));
50}
51
52static inline struct llist_node *barrier_to_ll(struct active_node *node)
53{
54 GEM_BUG_ON(!is_barrier(&node->base));
55 return (struct llist_node *)&node->base.cb.node;
56}
57
58static inline struct intel_engine_cs *
59__barrier_to_engine(struct active_node *node)
60{
61 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62}
63
64static inline struct intel_engine_cs *
65barrier_to_engine(struct active_node *node)
66{
67 GEM_BUG_ON(!is_barrier(&node->base));
68 return __barrier_to_engine(node);
69}
70
71static inline struct active_node *barrier_from_ll(struct llist_node *x)
72{
73 return container_of((struct list_head *)x,
74 struct active_node, base.cb.node);
75}
76
77#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
78
79static void *active_debug_hint(void *addr)
80{
81 struct i915_active *ref = addr;
82
83 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84}
85
86static const struct debug_obj_descr active_debug_desc = {
87 .name = "i915_active",
88 .debug_hint = active_debug_hint,
89};
90
91static void debug_active_init(struct i915_active *ref)
92{
93 debug_object_init(ref, &active_debug_desc);
94}
95
96static void debug_active_activate(struct i915_active *ref)
97{
98 lockdep_assert_held(&ref->tree_lock);
99 if (!atomic_read(&ref->count))
100 debug_object_activate(ref, &active_debug_desc);
101}
102
103static void debug_active_deactivate(struct i915_active *ref)
104{
105 lockdep_assert_held(&ref->tree_lock);
106 if (!atomic_read(&ref->count))
107 debug_object_deactivate(ref, &active_debug_desc);
108}
109
110static void debug_active_fini(struct i915_active *ref)
111{
112 debug_object_free(ref, &active_debug_desc);
113}
114
115static void debug_active_assert(struct i915_active *ref)
116{
117 debug_object_assert_init(ref, &active_debug_desc);
118}
119
120#else
121
122static inline void debug_active_init(struct i915_active *ref) { }
123static inline void debug_active_activate(struct i915_active *ref) { }
124static inline void debug_active_deactivate(struct i915_active *ref) { }
125static inline void debug_active_fini(struct i915_active *ref) { }
126static inline void debug_active_assert(struct i915_active *ref) { }
127
128#endif
129
130static void
131__active_retire(struct i915_active *ref)
132{
133 struct rb_root root = RB_ROOT;
134 struct active_node *it, *n;
135 unsigned long flags;
136
137 GEM_BUG_ON(i915_active_is_idle(ref));
138
139
140 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141 return;
142
143 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144 debug_active_deactivate(ref);
145
146
147 if (!ref->cache)
148 ref->cache = fetch_node(ref->tree.rb_node);
149
150
151 if (ref->cache) {
152
153 rb_erase(&ref->cache->node, &ref->tree);
154 root = ref->tree;
155
156
157 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158 rb_insert_color(&ref->cache->node, &ref->tree);
159 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160
161
162 ref->cache->timeline = 0;
163 }
164
165 spin_unlock_irqrestore(&ref->tree_lock, flags);
166
167
168 if (ref->retire)
169 ref->retire(ref);
170
171
172 wake_up_var(ref);
173
174
175 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
176 GEM_BUG_ON(i915_active_fence_isset(&it->base));
177 kmem_cache_free(global.slab_cache, it);
178 }
179}
180
181static void
182active_work(struct work_struct *wrk)
183{
184 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
185
186 GEM_BUG_ON(!atomic_read(&ref->count));
187 if (atomic_add_unless(&ref->count, -1, 1))
188 return;
189
190 __active_retire(ref);
191}
192
193static void
194active_retire(struct i915_active *ref)
195{
196 GEM_BUG_ON(!atomic_read(&ref->count));
197 if (atomic_add_unless(&ref->count, -1, 1))
198 return;
199
200 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
201 queue_work(system_unbound_wq, &ref->work);
202 return;
203 }
204
205 __active_retire(ref);
206}
207
208static inline struct dma_fence **
209__active_fence_slot(struct i915_active_fence *active)
210{
211 return (struct dma_fence ** __force)&active->fence;
212}
213
214static inline bool
215active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
216{
217 struct i915_active_fence *active =
218 container_of(cb, typeof(*active), cb);
219
220 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
221}
222
223static void
224node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
225{
226 if (active_fence_cb(fence, cb))
227 active_retire(container_of(cb, struct active_node, base.cb)->ref);
228}
229
230static void
231excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
232{
233 if (active_fence_cb(fence, cb))
234 active_retire(container_of(cb, struct i915_active, excl.cb));
235}
236
237static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
238{
239 struct active_node *it;
240
241 GEM_BUG_ON(idx == 0);
242
243
244
245
246
247
248
249
250 it = READ_ONCE(ref->cache);
251 if (it) {
252 u64 cached = READ_ONCE(it->timeline);
253
254
255 if (cached == idx)
256 return it;
257
258
259
260
261
262
263
264
265
266
267
268 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
269 return it;
270 }
271
272 BUILD_BUG_ON(offsetof(typeof(*it), node));
273
274
275 GEM_BUG_ON(i915_active_is_idle(ref));
276
277 it = fetch_node(ref->tree.rb_node);
278 while (it) {
279 if (it->timeline < idx) {
280 it = fetch_node(it->node.rb_right);
281 } else if (it->timeline > idx) {
282 it = fetch_node(it->node.rb_left);
283 } else {
284 WRITE_ONCE(ref->cache, it);
285 break;
286 }
287 }
288
289
290 return it;
291}
292
293static struct i915_active_fence *
294active_instance(struct i915_active *ref, u64 idx)
295{
296 struct active_node *node;
297 struct rb_node **p, *parent;
298
299 node = __active_lookup(ref, idx);
300 if (likely(node))
301 return &node->base;
302
303 spin_lock_irq(&ref->tree_lock);
304 GEM_BUG_ON(i915_active_is_idle(ref));
305
306 parent = NULL;
307 p = &ref->tree.rb_node;
308 while (*p) {
309 parent = *p;
310
311 node = rb_entry(parent, struct active_node, node);
312 if (node->timeline == idx)
313 goto out;
314
315 if (node->timeline < idx)
316 p = &parent->rb_right;
317 else
318 p = &parent->rb_left;
319 }
320
321
322
323
324
325 node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
326 if (!node)
327 goto out;
328
329 __i915_active_fence_init(&node->base, NULL, node_retire);
330 node->ref = ref;
331 node->timeline = idx;
332
333 rb_link_node(&node->node, parent, p);
334 rb_insert_color(&node->node, &ref->tree);
335
336out:
337 WRITE_ONCE(ref->cache, node);
338 spin_unlock_irq(&ref->tree_lock);
339
340 return &node->base;
341}
342
343void __i915_active_init(struct i915_active *ref,
344 int (*active)(struct i915_active *ref),
345 void (*retire)(struct i915_active *ref),
346 unsigned long flags,
347 struct lock_class_key *mkey,
348 struct lock_class_key *wkey)
349{
350 debug_active_init(ref);
351
352 ref->flags = flags;
353 ref->active = active;
354 ref->retire = retire;
355
356 spin_lock_init(&ref->tree_lock);
357 ref->tree = RB_ROOT;
358 ref->cache = NULL;
359
360 init_llist_head(&ref->preallocated_barriers);
361 atomic_set(&ref->count, 0);
362 __mutex_init(&ref->mutex, "i915_active", mkey);
363 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
364 INIT_WORK(&ref->work, active_work);
365#if IS_ENABLED(CONFIG_LOCKDEP)
366 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
367#endif
368}
369
370static bool ____active_del_barrier(struct i915_active *ref,
371 struct active_node *node,
372 struct intel_engine_cs *engine)
373
374{
375 struct llist_node *head = NULL, *tail = NULL;
376 struct llist_node *pos, *next;
377
378 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
396 if (node == barrier_from_ll(pos)) {
397 node = NULL;
398 continue;
399 }
400
401 pos->next = head;
402 head = pos;
403 if (!tail)
404 tail = pos;
405 }
406 if (head)
407 llist_add_batch(head, tail, &engine->barrier_tasks);
408
409 return !node;
410}
411
412static bool
413__active_del_barrier(struct i915_active *ref, struct active_node *node)
414{
415 return ____active_del_barrier(ref, node, barrier_to_engine(node));
416}
417
418static bool
419replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
420{
421 if (!is_barrier(active))
422 return false;
423
424
425
426
427
428
429 __active_del_barrier(ref, node_from_active(active));
430 return true;
431}
432
433int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
434{
435 struct i915_active_fence *active;
436 int err;
437
438
439 err = i915_active_acquire(ref);
440 if (err)
441 return err;
442
443 active = active_instance(ref, idx);
444 if (!active) {
445 err = -ENOMEM;
446 goto out;
447 }
448
449 if (replace_barrier(ref, active)) {
450 RCU_INIT_POINTER(active->fence, NULL);
451 atomic_dec(&ref->count);
452 }
453 if (!__i915_active_fence_set(active, fence))
454 __i915_active_acquire(ref);
455
456out:
457 i915_active_release(ref);
458 return err;
459}
460
461static struct dma_fence *
462__i915_active_set_fence(struct i915_active *ref,
463 struct i915_active_fence *active,
464 struct dma_fence *fence)
465{
466 struct dma_fence *prev;
467
468 if (replace_barrier(ref, active)) {
469 RCU_INIT_POINTER(active->fence, fence);
470 return NULL;
471 }
472
473 rcu_read_lock();
474 prev = __i915_active_fence_set(active, fence);
475 if (prev)
476 prev = dma_fence_get_rcu(prev);
477 else
478 __i915_active_acquire(ref);
479 rcu_read_unlock();
480
481 return prev;
482}
483
484static struct i915_active_fence *
485__active_fence(struct i915_active *ref, u64 idx)
486{
487 struct active_node *it;
488
489 it = __active_lookup(ref, idx);
490 if (unlikely(!it)) {
491 spin_lock_irq(&ref->tree_lock);
492 it = __active_lookup(ref, idx);
493 spin_unlock_irq(&ref->tree_lock);
494 }
495 GEM_BUG_ON(!it);
496
497 return &it->base;
498}
499
500struct dma_fence *
501__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
502{
503
504 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
505}
506
507struct dma_fence *
508i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
509{
510
511 return __i915_active_set_fence(ref, &ref->excl, f);
512}
513
514bool i915_active_acquire_if_busy(struct i915_active *ref)
515{
516 debug_active_assert(ref);
517 return atomic_add_unless(&ref->count, 1, 0);
518}
519
520static void __i915_active_activate(struct i915_active *ref)
521{
522 spin_lock_irq(&ref->tree_lock);
523 if (!atomic_fetch_inc(&ref->count))
524 debug_active_activate(ref);
525 spin_unlock_irq(&ref->tree_lock);
526}
527
528int i915_active_acquire(struct i915_active *ref)
529{
530 int err;
531
532 if (i915_active_acquire_if_busy(ref))
533 return 0;
534
535 if (!ref->active) {
536 __i915_active_activate(ref);
537 return 0;
538 }
539
540 err = mutex_lock_interruptible(&ref->mutex);
541 if (err)
542 return err;
543
544 if (likely(!i915_active_acquire_if_busy(ref))) {
545 err = ref->active(ref);
546 if (!err)
547 __i915_active_activate(ref);
548 }
549
550 mutex_unlock(&ref->mutex);
551
552 return err;
553}
554
555int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
556{
557 struct i915_active_fence *active;
558 int err;
559
560 err = i915_active_acquire(ref);
561 if (err)
562 return err;
563
564 active = active_instance(ref, idx);
565 if (!active) {
566 i915_active_release(ref);
567 return -ENOMEM;
568 }
569
570 return 0;
571}
572
573void i915_active_release(struct i915_active *ref)
574{
575 debug_active_assert(ref);
576 active_retire(ref);
577}
578
579static void enable_signaling(struct i915_active_fence *active)
580{
581 struct dma_fence *fence;
582
583 if (unlikely(is_barrier(active)))
584 return;
585
586 fence = i915_active_fence_get(active);
587 if (!fence)
588 return;
589
590 dma_fence_enable_sw_signaling(fence);
591 dma_fence_put(fence);
592}
593
594static int flush_barrier(struct active_node *it)
595{
596 struct intel_engine_cs *engine;
597
598 if (likely(!is_barrier(&it->base)))
599 return 0;
600
601 engine = __barrier_to_engine(it);
602 smp_rmb();
603 if (!is_barrier(&it->base))
604 return 0;
605
606 return intel_engine_flush_barriers(engine);
607}
608
609static int flush_lazy_signals(struct i915_active *ref)
610{
611 struct active_node *it, *n;
612 int err = 0;
613
614 enable_signaling(&ref->excl);
615 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
616 err = flush_barrier(it);
617 if (err)
618 break;
619
620 enable_signaling(&it->base);
621 }
622
623 return err;
624}
625
626int __i915_active_wait(struct i915_active *ref, int state)
627{
628 might_sleep();
629
630
631 if (i915_active_acquire_if_busy(ref)) {
632 int err;
633
634 err = flush_lazy_signals(ref);
635 i915_active_release(ref);
636 if (err)
637 return err;
638
639 if (___wait_var_event(ref, i915_active_is_idle(ref),
640 state, 0, 0, schedule()))
641 return -EINTR;
642 }
643
644
645
646
647
648 flush_work(&ref->work);
649 return 0;
650}
651
652static int __await_active(struct i915_active_fence *active,
653 int (*fn)(void *arg, struct dma_fence *fence),
654 void *arg)
655{
656 struct dma_fence *fence;
657
658 if (is_barrier(active))
659 return 0;
660
661 fence = i915_active_fence_get(active);
662 if (fence) {
663 int err;
664
665 err = fn(arg, fence);
666 dma_fence_put(fence);
667 if (err < 0)
668 return err;
669 }
670
671 return 0;
672}
673
674struct wait_barrier {
675 struct wait_queue_entry base;
676 struct i915_active *ref;
677};
678
679static int
680barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
681{
682 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
683
684 if (i915_active_is_idle(wb->ref)) {
685 list_del(&wq->entry);
686 i915_sw_fence_complete(wq->private);
687 kfree(wq);
688 }
689
690 return 0;
691}
692
693static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
694{
695 struct wait_barrier *wb;
696
697 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
698 if (unlikely(!wb))
699 return -ENOMEM;
700
701 GEM_BUG_ON(i915_active_is_idle(ref));
702 if (!i915_sw_fence_await(fence)) {
703 kfree(wb);
704 return -EINVAL;
705 }
706
707 wb->base.flags = 0;
708 wb->base.func = barrier_wake;
709 wb->base.private = fence;
710 wb->ref = ref;
711
712 add_wait_queue(__var_waitqueue(ref), &wb->base);
713 return 0;
714}
715
716static int await_active(struct i915_active *ref,
717 unsigned int flags,
718 int (*fn)(void *arg, struct dma_fence *fence),
719 void *arg, struct i915_sw_fence *barrier)
720{
721 int err = 0;
722
723 if (!i915_active_acquire_if_busy(ref))
724 return 0;
725
726 if (flags & I915_ACTIVE_AWAIT_EXCL &&
727 rcu_access_pointer(ref->excl.fence)) {
728 err = __await_active(&ref->excl, fn, arg);
729 if (err)
730 goto out;
731 }
732
733 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
734 struct active_node *it, *n;
735
736 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
737 err = __await_active(&it->base, fn, arg);
738 if (err)
739 goto out;
740 }
741 }
742
743 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
744 err = flush_lazy_signals(ref);
745 if (err)
746 goto out;
747
748 err = __await_barrier(ref, barrier);
749 if (err)
750 goto out;
751 }
752
753out:
754 i915_active_release(ref);
755 return err;
756}
757
758static int rq_await_fence(void *arg, struct dma_fence *fence)
759{
760 return i915_request_await_dma_fence(arg, fence);
761}
762
763int i915_request_await_active(struct i915_request *rq,
764 struct i915_active *ref,
765 unsigned int flags)
766{
767 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
768}
769
770static int sw_await_fence(void *arg, struct dma_fence *fence)
771{
772 return i915_sw_fence_await_dma_fence(arg, fence, 0,
773 GFP_NOWAIT | __GFP_NOWARN);
774}
775
776int i915_sw_fence_await_active(struct i915_sw_fence *fence,
777 struct i915_active *ref,
778 unsigned int flags)
779{
780 return await_active(ref, flags, sw_await_fence, fence, fence);
781}
782
783void i915_active_fini(struct i915_active *ref)
784{
785 debug_active_fini(ref);
786 GEM_BUG_ON(atomic_read(&ref->count));
787 GEM_BUG_ON(work_pending(&ref->work));
788 mutex_destroy(&ref->mutex);
789
790 if (ref->cache)
791 kmem_cache_free(global.slab_cache, ref->cache);
792}
793
794static inline bool is_idle_barrier(struct active_node *node, u64 idx)
795{
796 return node->timeline == idx && !i915_active_fence_isset(&node->base);
797}
798
799static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
800{
801 struct rb_node *prev, *p;
802
803 if (RB_EMPTY_ROOT(&ref->tree))
804 return NULL;
805
806 GEM_BUG_ON(i915_active_is_idle(ref));
807
808
809
810
811
812
813
814
815 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
816 p = &ref->cache->node;
817 goto match;
818 }
819
820 prev = NULL;
821 p = ref->tree.rb_node;
822 while (p) {
823 struct active_node *node =
824 rb_entry(p, struct active_node, node);
825
826 if (is_idle_barrier(node, idx))
827 goto match;
828
829 prev = p;
830 if (node->timeline < idx)
831 p = READ_ONCE(p->rb_right);
832 else
833 p = READ_ONCE(p->rb_left);
834 }
835
836
837
838
839
840
841
842 for (p = prev; p; p = rb_next(p)) {
843 struct active_node *node =
844 rb_entry(p, struct active_node, node);
845 struct intel_engine_cs *engine;
846
847 if (node->timeline > idx)
848 break;
849
850 if (node->timeline < idx)
851 continue;
852
853 if (is_idle_barrier(node, idx))
854 goto match;
855
856
857
858
859
860
861
862
863 engine = __barrier_to_engine(node);
864 smp_rmb();
865 if (is_barrier(&node->base) &&
866 ____active_del_barrier(ref, node, engine))
867 goto match;
868 }
869
870 return NULL;
871
872match:
873 spin_lock_irq(&ref->tree_lock);
874 rb_erase(p, &ref->tree);
875 if (p == &ref->cache->node)
876 WRITE_ONCE(ref->cache, NULL);
877 spin_unlock_irq(&ref->tree_lock);
878
879 return rb_entry(p, struct active_node, node);
880}
881
882int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
883 struct intel_engine_cs *engine)
884{
885 intel_engine_mask_t tmp, mask = engine->mask;
886 struct llist_node *first = NULL, *last = NULL;
887 struct intel_gt *gt = engine->gt;
888
889 GEM_BUG_ON(i915_active_is_idle(ref));
890
891
892 while (!llist_empty(&ref->preallocated_barriers))
893 cond_resched();
894
895
896
897
898
899
900
901 GEM_BUG_ON(!mask);
902 for_each_engine_masked(engine, gt, mask, tmp) {
903 u64 idx = engine->kernel_context->timeline->fence_context;
904 struct llist_node *prev = first;
905 struct active_node *node;
906
907 rcu_read_lock();
908 node = reuse_idle_barrier(ref, idx);
909 rcu_read_unlock();
910 if (!node) {
911 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
912 if (!node)
913 goto unwind;
914
915 RCU_INIT_POINTER(node->base.fence, NULL);
916 node->base.cb.func = node_retire;
917 node->timeline = idx;
918 node->ref = ref;
919 }
920
921 if (!i915_active_fence_isset(&node->base)) {
922
923
924
925
926
927
928
929
930
931 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
932 node->base.cb.node.prev = (void *)engine;
933 __i915_active_acquire(ref);
934 }
935 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
936
937 GEM_BUG_ON(barrier_to_engine(node) != engine);
938 first = barrier_to_ll(node);
939 first->next = prev;
940 if (!last)
941 last = first;
942 intel_engine_pm_get(engine);
943 }
944
945 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
946 llist_add_batch(first, last, &ref->preallocated_barriers);
947
948 return 0;
949
950unwind:
951 while (first) {
952 struct active_node *node = barrier_from_ll(first);
953
954 first = first->next;
955
956 atomic_dec(&ref->count);
957 intel_engine_pm_put(barrier_to_engine(node));
958
959 kmem_cache_free(global.slab_cache, node);
960 }
961 return -ENOMEM;
962}
963
964void i915_active_acquire_barrier(struct i915_active *ref)
965{
966 struct llist_node *pos, *next;
967 unsigned long flags;
968
969 GEM_BUG_ON(i915_active_is_idle(ref));
970
971
972
973
974
975
976
977 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
978 struct active_node *node = barrier_from_ll(pos);
979 struct intel_engine_cs *engine = barrier_to_engine(node);
980 struct rb_node **p, *parent;
981
982 spin_lock_irqsave_nested(&ref->tree_lock, flags,
983 SINGLE_DEPTH_NESTING);
984 parent = NULL;
985 p = &ref->tree.rb_node;
986 while (*p) {
987 struct active_node *it;
988
989 parent = *p;
990
991 it = rb_entry(parent, struct active_node, node);
992 if (it->timeline < node->timeline)
993 p = &parent->rb_right;
994 else
995 p = &parent->rb_left;
996 }
997 rb_link_node(&node->node, parent, p);
998 rb_insert_color(&node->node, &ref->tree);
999 spin_unlock_irqrestore(&ref->tree_lock, flags);
1000
1001 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1002 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1003 intel_engine_pm_put_delay(engine, 1);
1004 }
1005}
1006
1007static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1008{
1009 return __active_fence_slot(&barrier_from_ll(node)->base);
1010}
1011
1012void i915_request_add_active_barriers(struct i915_request *rq)
1013{
1014 struct intel_engine_cs *engine = rq->engine;
1015 struct llist_node *node, *next;
1016 unsigned long flags;
1017
1018 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1019 GEM_BUG_ON(intel_engine_is_virtual(engine));
1020 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1021
1022 node = llist_del_all(&engine->barrier_tasks);
1023 if (!node)
1024 return;
1025
1026
1027
1028
1029
1030 spin_lock_irqsave(&rq->lock, flags);
1031 llist_for_each_safe(node, next, node) {
1032
1033 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1034 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1035 }
1036 spin_unlock_irqrestore(&rq->lock, flags);
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051struct dma_fence *
1052__i915_active_fence_set(struct i915_active_fence *active,
1053 struct dma_fence *fence)
1054{
1055 struct dma_fence *prev;
1056 unsigned long flags;
1057
1058 if (fence == rcu_access_pointer(active->fence))
1059 return fence;
1060
1061 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 spin_lock_irqsave(fence->lock, flags);
1084 prev = xchg(__active_fence_slot(active), fence);
1085 if (prev) {
1086 GEM_BUG_ON(prev == fence);
1087 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1088 __list_del_entry(&active->cb.node);
1089 spin_unlock(prev->lock);
1090 }
1091 list_add_tail(&active->cb.node, &fence->cb_list);
1092 spin_unlock_irqrestore(fence->lock, flags);
1093
1094 return prev;
1095}
1096
1097int i915_active_fence_set(struct i915_active_fence *active,
1098 struct i915_request *rq)
1099{
1100 struct dma_fence *fence;
1101 int err = 0;
1102
1103
1104 rcu_read_lock();
1105 fence = __i915_active_fence_set(active, &rq->fence);
1106 if (fence)
1107 fence = dma_fence_get_rcu(fence);
1108 rcu_read_unlock();
1109 if (fence) {
1110 err = i915_request_await_dma_fence(rq, fence);
1111 dma_fence_put(fence);
1112 }
1113
1114 return err;
1115}
1116
1117void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1118{
1119 active_fence_cb(fence, cb);
1120}
1121
1122struct auto_active {
1123 struct i915_active base;
1124 struct kref ref;
1125};
1126
1127struct i915_active *i915_active_get(struct i915_active *ref)
1128{
1129 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1130
1131 kref_get(&aa->ref);
1132 return &aa->base;
1133}
1134
1135static void auto_release(struct kref *ref)
1136{
1137 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1138
1139 i915_active_fini(&aa->base);
1140 kfree(aa);
1141}
1142
1143void i915_active_put(struct i915_active *ref)
1144{
1145 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1146
1147 kref_put(&aa->ref, auto_release);
1148}
1149
1150static int auto_active(struct i915_active *ref)
1151{
1152 i915_active_get(ref);
1153 return 0;
1154}
1155
1156static void auto_retire(struct i915_active *ref)
1157{
1158 i915_active_put(ref);
1159}
1160
1161struct i915_active *i915_active_create(void)
1162{
1163 struct auto_active *aa;
1164
1165 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1166 if (!aa)
1167 return NULL;
1168
1169 kref_init(&aa->ref);
1170 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1171
1172 return &aa->base;
1173}
1174
1175#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1176#include "selftests/i915_active.c"
1177#endif
1178
1179static void i915_global_active_shrink(void)
1180{
1181 kmem_cache_shrink(global.slab_cache);
1182}
1183
1184static void i915_global_active_exit(void)
1185{
1186 kmem_cache_destroy(global.slab_cache);
1187}
1188
1189static struct i915_global_active global = { {
1190 .shrink = i915_global_active_shrink,
1191 .exit = i915_global_active_exit,
1192} };
1193
1194int __init i915_global_active_init(void)
1195{
1196 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1197 if (!global.slab_cache)
1198 return -ENOMEM;
1199
1200 i915_global_register(&global.base);
1201 return 0;
1202}
1203