1
2
3
4
5
6
7#include <linux/mutex.h>
8
9#include "i915_drv.h"
10#include "i915_globals.h"
11#include "i915_request.h"
12#include "i915_scheduler.h"
13
14static struct i915_global_scheduler {
15 struct i915_global base;
16 struct kmem_cache *slab_dependencies;
17 struct kmem_cache *slab_priorities;
18} global;
19
20static DEFINE_SPINLOCK(schedule_lock);
21
22static const struct i915_request *
23node_to_request(const struct i915_sched_node *node)
24{
25 return container_of(node, const struct i915_request, sched);
26}
27
28static inline bool node_started(const struct i915_sched_node *node)
29{
30 return i915_request_started(node_to_request(node));
31}
32
33static inline bool node_signaled(const struct i915_sched_node *node)
34{
35 return i915_request_completed(node_to_request(node));
36}
37
38static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39{
40 return rb_entry(rb, struct i915_priolist, node);
41}
42
43static void assert_priolists(struct intel_engine_execlists * const execlists)
44{
45 struct rb_node *rb;
46 long last_prio;
47
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49 return;
50
51 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 rb_first(&execlists->queue.rb_root));
53
54 last_prio = INT_MAX;
55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 const struct i915_priolist *p = to_priolist(rb);
57
58 GEM_BUG_ON(p->priority > last_prio);
59 last_prio = p->priority;
60 }
61}
62
63struct list_head *
64i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
65{
66 struct intel_engine_execlists * const execlists = &engine->execlists;
67 struct i915_priolist *p;
68 struct rb_node **parent, *rb;
69 bool first = true;
70
71 lockdep_assert_held(&engine->active.lock);
72 assert_priolists(execlists);
73
74 if (unlikely(execlists->no_priolist))
75 prio = I915_PRIORITY_NORMAL;
76
77find_priolist:
78
79 rb = NULL;
80 parent = &execlists->queue.rb_root.rb_node;
81 while (*parent) {
82 rb = *parent;
83 p = to_priolist(rb);
84 if (prio > p->priority) {
85 parent = &rb->rb_left;
86 } else if (prio < p->priority) {
87 parent = &rb->rb_right;
88 first = false;
89 } else {
90 return &p->requests;
91 }
92 }
93
94 if (prio == I915_PRIORITY_NORMAL) {
95 p = &execlists->default_priolist;
96 } else {
97 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
98
99 if (unlikely(!p)) {
100 prio = I915_PRIORITY_NORMAL;
101
102
103
104
105
106
107
108
109
110 execlists->no_priolist = true;
111 goto find_priolist;
112 }
113 }
114
115 p->priority = prio;
116 INIT_LIST_HEAD(&p->requests);
117
118 rb_link_node(&p->node, rb, parent);
119 rb_insert_color_cached(&p->node, &execlists->queue, first);
120
121 return &p->requests;
122}
123
124void __i915_priolist_free(struct i915_priolist *p)
125{
126 kmem_cache_free(global.slab_priorities, p);
127}
128
129struct sched_cache {
130 struct list_head *priolist;
131};
132
133static struct intel_engine_cs *
134sched_lock_engine(const struct i915_sched_node *node,
135 struct intel_engine_cs *locked,
136 struct sched_cache *cache)
137{
138 const struct i915_request *rq = node_to_request(node);
139 struct intel_engine_cs *engine;
140
141 GEM_BUG_ON(!locked);
142
143
144
145
146
147
148
149 while (locked != (engine = READ_ONCE(rq->engine))) {
150 spin_unlock(&locked->active.lock);
151 memset(cache, 0, sizeof(*cache));
152 spin_lock(&engine->active.lock);
153 locked = engine;
154 }
155
156 GEM_BUG_ON(locked != engine);
157 return locked;
158}
159
160static inline int rq_prio(const struct i915_request *rq)
161{
162 return rq->sched.attr.priority;
163}
164
165static inline bool need_preempt(int prio, int active)
166{
167
168
169
170
171
172
173
174 return prio >= max(I915_PRIORITY_NORMAL, active);
175}
176
177static void kick_submission(struct intel_engine_cs *engine,
178 const struct i915_request *rq,
179 int prio)
180{
181 const struct i915_request *inflight;
182
183
184
185
186
187 if (prio <= engine->execlists.queue_priority_hint)
188 return;
189
190 rcu_read_lock();
191
192
193 inflight = execlists_active(&engine->execlists);
194 if (!inflight)
195 goto unlock;
196
197
198
199
200
201 if (inflight->context == rq->context)
202 goto unlock;
203
204 ENGINE_TRACE(engine,
205 "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
206 prio,
207 rq->fence.context, rq->fence.seqno,
208 inflight->fence.context, inflight->fence.seqno,
209 inflight->sched.attr.priority);
210
211 engine->execlists.queue_priority_hint = prio;
212 if (need_preempt(prio, rq_prio(inflight)))
213 tasklet_hi_schedule(&engine->execlists.tasklet);
214
215unlock:
216 rcu_read_unlock();
217}
218
219static void __i915_schedule(struct i915_sched_node *node,
220 const struct i915_sched_attr *attr)
221{
222 const int prio = max(attr->priority, node->attr.priority);
223 struct intel_engine_cs *engine;
224 struct i915_dependency *dep, *p;
225 struct i915_dependency stack;
226 struct sched_cache cache;
227 LIST_HEAD(dfs);
228
229
230 lockdep_assert_held(&schedule_lock);
231 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
232
233 if (node_signaled(node))
234 return;
235
236 stack.signaler = node;
237 list_add(&stack.dfs_link, &dfs);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257 list_for_each_entry(dep, &dfs, dfs_link) {
258 struct i915_sched_node *node = dep->signaler;
259
260
261 if (node_started(node))
262 continue;
263
264
265
266
267
268
269
270 list_for_each_entry(p, &node->signalers_list, signal_link) {
271 GEM_BUG_ON(p == dep);
272
273 if (node_signaled(p->signaler))
274 continue;
275
276 if (prio > READ_ONCE(p->signaler->attr.priority))
277 list_move_tail(&p->dfs_link, &dfs);
278 }
279 }
280
281
282
283
284
285
286
287 if (node->attr.priority == I915_PRIORITY_INVALID) {
288 GEM_BUG_ON(!list_empty(&node->link));
289 node->attr = *attr;
290
291 if (stack.dfs_link.next == stack.dfs_link.prev)
292 return;
293
294 __list_del_entry(&stack.dfs_link);
295 }
296
297 memset(&cache, 0, sizeof(cache));
298 engine = node_to_request(node)->engine;
299 spin_lock(&engine->active.lock);
300
301
302 engine = sched_lock_engine(node, engine, &cache);
303 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
304 INIT_LIST_HEAD(&dep->dfs_link);
305
306 node = dep->signaler;
307 engine = sched_lock_engine(node, engine, &cache);
308 lockdep_assert_held(&engine->active.lock);
309
310
311 if (prio <= node->attr.priority || node_signaled(node))
312 continue;
313
314 GEM_BUG_ON(node_to_request(node)->engine != engine);
315
316 WRITE_ONCE(node->attr.priority, prio);
317
318
319
320
321
322
323
324
325
326 if (list_empty(&node->link))
327 continue;
328
329 if (i915_request_in_priority_queue(node_to_request(node))) {
330 if (!cache.priolist)
331 cache.priolist =
332 i915_sched_lookup_priolist(engine,
333 prio);
334 list_move_tail(&node->link, cache.priolist);
335 }
336
337
338 kick_submission(engine, node_to_request(node), prio);
339 }
340
341 spin_unlock(&engine->active.lock);
342}
343
344void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
345{
346 spin_lock_irq(&schedule_lock);
347 __i915_schedule(&rq->sched, attr);
348 spin_unlock_irq(&schedule_lock);
349}
350
351void i915_sched_node_init(struct i915_sched_node *node)
352{
353 INIT_LIST_HEAD(&node->signalers_list);
354 INIT_LIST_HEAD(&node->waiters_list);
355 INIT_LIST_HEAD(&node->link);
356
357 i915_sched_node_reinit(node);
358}
359
360void i915_sched_node_reinit(struct i915_sched_node *node)
361{
362 node->attr.priority = I915_PRIORITY_INVALID;
363 node->semaphores = 0;
364 node->flags = 0;
365
366 GEM_BUG_ON(!list_empty(&node->signalers_list));
367 GEM_BUG_ON(!list_empty(&node->waiters_list));
368 GEM_BUG_ON(!list_empty(&node->link));
369}
370
371static struct i915_dependency *
372i915_dependency_alloc(void)
373{
374 return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
375}
376
377static void
378i915_dependency_free(struct i915_dependency *dep)
379{
380 kmem_cache_free(global.slab_dependencies, dep);
381}
382
383bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
384 struct i915_sched_node *signal,
385 struct i915_dependency *dep,
386 unsigned long flags)
387{
388 bool ret = false;
389
390 spin_lock_irq(&schedule_lock);
391
392 if (!node_signaled(signal)) {
393 INIT_LIST_HEAD(&dep->dfs_link);
394 dep->signaler = signal;
395 dep->waiter = node;
396 dep->flags = flags;
397
398
399 list_add_rcu(&dep->signal_link, &node->signalers_list);
400 list_add_rcu(&dep->wait_link, &signal->waiters_list);
401
402
403 node->flags |= signal->flags;
404 ret = true;
405 }
406
407 spin_unlock_irq(&schedule_lock);
408
409 return ret;
410}
411
412int i915_sched_node_add_dependency(struct i915_sched_node *node,
413 struct i915_sched_node *signal,
414 unsigned long flags)
415{
416 struct i915_dependency *dep;
417
418 dep = i915_dependency_alloc();
419 if (!dep)
420 return -ENOMEM;
421
422 if (!__i915_sched_node_add_dependency(node, signal, dep,
423 flags | I915_DEPENDENCY_ALLOC))
424 i915_dependency_free(dep);
425
426 return 0;
427}
428
429void i915_sched_node_fini(struct i915_sched_node *node)
430{
431 struct i915_dependency *dep, *tmp;
432
433 spin_lock_irq(&schedule_lock);
434
435
436
437
438
439
440
441 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
442 GEM_BUG_ON(!list_empty(&dep->dfs_link));
443
444 list_del_rcu(&dep->wait_link);
445 if (dep->flags & I915_DEPENDENCY_ALLOC)
446 i915_dependency_free(dep);
447 }
448 INIT_LIST_HEAD(&node->signalers_list);
449
450
451 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
452 GEM_BUG_ON(dep->signaler != node);
453 GEM_BUG_ON(!list_empty(&dep->dfs_link));
454
455 list_del_rcu(&dep->signal_link);
456 if (dep->flags & I915_DEPENDENCY_ALLOC)
457 i915_dependency_free(dep);
458 }
459 INIT_LIST_HEAD(&node->waiters_list);
460
461 spin_unlock_irq(&schedule_lock);
462}
463
464void i915_request_show_with_schedule(struct drm_printer *m,
465 const struct i915_request *rq,
466 const char *prefix,
467 int indent)
468{
469 struct i915_dependency *dep;
470
471 i915_request_show(m, rq, prefix, indent);
472 if (i915_request_completed(rq))
473 return;
474
475 rcu_read_lock();
476 for_each_signaler(dep, rq) {
477 const struct i915_request *signaler =
478 node_to_request(dep->signaler);
479
480
481 if (signaler->timeline == rq->timeline)
482 continue;
483
484 if (__i915_request_is_complete(signaler))
485 continue;
486
487 i915_request_show(m, signaler, prefix, indent + 2);
488 }
489 rcu_read_unlock();
490}
491
492static void i915_global_scheduler_shrink(void)
493{
494 kmem_cache_shrink(global.slab_dependencies);
495 kmem_cache_shrink(global.slab_priorities);
496}
497
498static void i915_global_scheduler_exit(void)
499{
500 kmem_cache_destroy(global.slab_dependencies);
501 kmem_cache_destroy(global.slab_priorities);
502}
503
504static struct i915_global_scheduler global = { {
505 .shrink = i915_global_scheduler_shrink,
506 .exit = i915_global_scheduler_exit,
507} };
508
509int __init i915_global_scheduler_init(void)
510{
511 global.slab_dependencies = KMEM_CACHE(i915_dependency,
512 SLAB_HWCACHE_ALIGN |
513 SLAB_TYPESAFE_BY_RCU);
514 if (!global.slab_dependencies)
515 return -ENOMEM;
516
517 global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
518 if (!global.slab_priorities)
519 goto err_priorities;
520
521 i915_global_register(&global.base);
522 return 0;
523
524err_priorities:
525 kmem_cache_destroy(global.slab_priorities);
526 return -ENOMEM;
527}
528