1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/sched/signal.h>
29
30#include <drm/ttm/ttm_placement.h>
31
32#include "vmwgfx_drv.h"
33
34bool vmw_supports_3d(struct vmw_private *dev_priv)
35{
36 uint32_t fifo_min, hwversion;
37 const struct vmw_fifo_state *fifo = dev_priv->fifo;
38
39 if (!(dev_priv->capabilities & SVGA_CAP_3D))
40 return false;
41
42 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
43 uint32_t result;
44
45 if (!dev_priv->has_mob)
46 return false;
47
48 spin_lock(&dev_priv->cap_lock);
49 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
50 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
51 spin_unlock(&dev_priv->cap_lock);
52
53 return (result != 0);
54 }
55
56 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
57 return false;
58
59 BUG_ON(vmw_is_svga_v3(dev_priv));
60
61 fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
62 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
63 return false;
64
65 hwversion = vmw_fifo_mem_read(dev_priv,
66 ((fifo->capabilities &
67 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
68 SVGA_FIFO_3D_HWVERSION_REVISED :
69 SVGA_FIFO_3D_HWVERSION));
70
71 if (hwversion == 0)
72 return false;
73
74 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
75 return false;
76
77
78 if (dev_priv->active_display_unit == vmw_du_legacy)
79 return false;
80
81 return true;
82}
83
84bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
85{
86 uint32_t caps;
87
88 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
89 return false;
90
91 caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
92 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
93 return true;
94
95 return false;
96}
97
98struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
99{
100 struct vmw_fifo_state *fifo;
101 uint32_t max;
102 uint32_t min;
103
104 if (!dev_priv->fifo_mem)
105 return NULL;
106
107 fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
108 if (!fifo)
109 return ERR_PTR(-ENOMEM);
110 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
111 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
112 if (unlikely(fifo->static_buffer == NULL)) {
113 kfree(fifo);
114 return ERR_PTR(-ENOMEM);
115 }
116
117 fifo->dynamic_buffer = NULL;
118 fifo->reserved_size = 0;
119 fifo->using_bounce_buffer = false;
120
121 mutex_init(&fifo->fifo_mutex);
122 init_rwsem(&fifo->rwsem);
123 min = 4;
124 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
125 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
126 min <<= 2;
127
128 if (min < PAGE_SIZE)
129 min = PAGE_SIZE;
130
131 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
132 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
133 wmb();
134 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
135 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
136 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
137 mb();
138
139 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
140
141 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
142 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
143 fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
144
145 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
146 (unsigned int) max,
147 (unsigned int) min,
148 (unsigned int) fifo->capabilities);
149 return fifo;
150}
151
152void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
153{
154 u32 *fifo_mem = dev_priv->fifo_mem;
155 if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
156 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
157
158}
159
160void vmw_fifo_destroy(struct vmw_private *dev_priv)
161{
162 struct vmw_fifo_state *fifo = dev_priv->fifo;
163
164 if (!fifo)
165 return;
166
167 if (likely(fifo->static_buffer != NULL)) {
168 vfree(fifo->static_buffer);
169 fifo->static_buffer = NULL;
170 }
171
172 if (likely(fifo->dynamic_buffer != NULL)) {
173 vfree(fifo->dynamic_buffer);
174 fifo->dynamic_buffer = NULL;
175 }
176 kfree(fifo);
177 dev_priv->fifo = NULL;
178}
179
180static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
181{
182 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
183 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
184 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
185 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
186
187 return ((max - next_cmd) + (stop - min) <= bytes);
188}
189
190static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
191 uint32_t bytes, bool interruptible,
192 unsigned long timeout)
193{
194 int ret = 0;
195 unsigned long end_jiffies = jiffies + timeout;
196 DEFINE_WAIT(__wait);
197
198 DRM_INFO("Fifo wait noirq.\n");
199
200 for (;;) {
201 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
202 (interruptible) ?
203 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
204 if (!vmw_fifo_is_full(dev_priv, bytes))
205 break;
206 if (time_after_eq(jiffies, end_jiffies)) {
207 ret = -EBUSY;
208 DRM_ERROR("SVGA device lockup.\n");
209 break;
210 }
211 schedule_timeout(1);
212 if (interruptible && signal_pending(current)) {
213 ret = -ERESTARTSYS;
214 break;
215 }
216 }
217 finish_wait(&dev_priv->fifo_queue, &__wait);
218 wake_up_all(&dev_priv->fifo_queue);
219 DRM_INFO("Fifo noirq exit.\n");
220 return ret;
221}
222
223static int vmw_fifo_wait(struct vmw_private *dev_priv,
224 uint32_t bytes, bool interruptible,
225 unsigned long timeout)
226{
227 long ret = 1L;
228
229 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
230 return 0;
231
232 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
233 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
234 return vmw_fifo_wait_noirq(dev_priv, bytes,
235 interruptible, timeout);
236
237 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
238 &dev_priv->fifo_queue_waiters);
239
240 if (interruptible)
241 ret = wait_event_interruptible_timeout
242 (dev_priv->fifo_queue,
243 !vmw_fifo_is_full(dev_priv, bytes), timeout);
244 else
245 ret = wait_event_timeout
246 (dev_priv->fifo_queue,
247 !vmw_fifo_is_full(dev_priv, bytes), timeout);
248
249 if (unlikely(ret == 0))
250 ret = -EBUSY;
251 else if (likely(ret > 0))
252 ret = 0;
253
254 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
255 &dev_priv->fifo_queue_waiters);
256
257 return ret;
258}
259
260
261
262
263
264
265
266
267
268
269
270static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
271 uint32_t bytes)
272{
273 struct vmw_fifo_state *fifo_state = dev_priv->fifo;
274 u32 *fifo_mem = dev_priv->fifo_mem;
275 uint32_t max;
276 uint32_t min;
277 uint32_t next_cmd;
278 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
279 int ret;
280
281 mutex_lock(&fifo_state->fifo_mutex);
282 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
283 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
284 next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
285
286 if (unlikely(bytes >= (max - min)))
287 goto out_err;
288
289 BUG_ON(fifo_state->reserved_size != 0);
290 BUG_ON(fifo_state->dynamic_buffer != NULL);
291
292 fifo_state->reserved_size = bytes;
293
294 while (1) {
295 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
296 bool need_bounce = false;
297 bool reserve_in_place = false;
298
299 if (next_cmd >= stop) {
300 if (likely((next_cmd + bytes < max ||
301 (next_cmd + bytes == max && stop > min))))
302 reserve_in_place = true;
303
304 else if (vmw_fifo_is_full(dev_priv, bytes)) {
305 ret = vmw_fifo_wait(dev_priv, bytes,
306 false, 3 * HZ);
307 if (unlikely(ret != 0))
308 goto out_err;
309 } else
310 need_bounce = true;
311
312 } else {
313
314 if (likely((next_cmd + bytes < stop)))
315 reserve_in_place = true;
316 else {
317 ret = vmw_fifo_wait(dev_priv, bytes,
318 false, 3 * HZ);
319 if (unlikely(ret != 0))
320 goto out_err;
321 }
322 }
323
324 if (reserve_in_place) {
325 if (reserveable || bytes <= sizeof(uint32_t)) {
326 fifo_state->using_bounce_buffer = false;
327
328 if (reserveable)
329 vmw_fifo_mem_write(dev_priv,
330 SVGA_FIFO_RESERVED,
331 bytes);
332 return (void __force *) (fifo_mem +
333 (next_cmd >> 2));
334 } else {
335 need_bounce = true;
336 }
337 }
338
339 if (need_bounce) {
340 fifo_state->using_bounce_buffer = true;
341 if (bytes < fifo_state->static_buffer_size)
342 return fifo_state->static_buffer;
343 else {
344 fifo_state->dynamic_buffer = vmalloc(bytes);
345 if (!fifo_state->dynamic_buffer)
346 goto out_err;
347 return fifo_state->dynamic_buffer;
348 }
349 }
350 }
351out_err:
352 fifo_state->reserved_size = 0;
353 mutex_unlock(&fifo_state->fifo_mutex);
354
355 return NULL;
356}
357
358void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
359 int ctx_id)
360{
361 void *ret;
362
363 if (dev_priv->cman)
364 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
365 ctx_id, false, NULL);
366 else if (ctx_id == SVGA3D_INVALID_ID)
367 ret = vmw_local_fifo_reserve(dev_priv, bytes);
368 else {
369 WARN(1, "Command buffer has not been allocated.\n");
370 ret = NULL;
371 }
372 if (IS_ERR_OR_NULL(ret))
373 return NULL;
374
375 return ret;
376}
377
378static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
379 struct vmw_private *vmw,
380 uint32_t next_cmd,
381 uint32_t max, uint32_t min, uint32_t bytes)
382{
383 u32 *fifo_mem = vmw->fifo_mem;
384 uint32_t chunk_size = max - next_cmd;
385 uint32_t rest;
386 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
387 fifo_state->dynamic_buffer : fifo_state->static_buffer;
388
389 if (bytes < chunk_size)
390 chunk_size = bytes;
391
392 vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
393 mb();
394 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
395 rest = bytes - chunk_size;
396 if (rest)
397 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
398}
399
400static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
401 struct vmw_private *vmw,
402 uint32_t next_cmd,
403 uint32_t max, uint32_t min, uint32_t bytes)
404{
405 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
406 fifo_state->dynamic_buffer : fifo_state->static_buffer;
407
408 while (bytes > 0) {
409 vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
410 next_cmd += sizeof(uint32_t);
411 if (unlikely(next_cmd == max))
412 next_cmd = min;
413 mb();
414 vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
415 mb();
416 bytes -= sizeof(uint32_t);
417 }
418}
419
420static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
421{
422 struct vmw_fifo_state *fifo_state = dev_priv->fifo;
423 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
424 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
425 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
426 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
427
428 BUG_ON((bytes & 3) != 0);
429 BUG_ON(bytes > fifo_state->reserved_size);
430
431 fifo_state->reserved_size = 0;
432
433 if (fifo_state->using_bounce_buffer) {
434 if (reserveable)
435 vmw_fifo_res_copy(fifo_state, dev_priv,
436 next_cmd, max, min, bytes);
437 else
438 vmw_fifo_slow_copy(fifo_state, dev_priv,
439 next_cmd, max, min, bytes);
440
441 if (fifo_state->dynamic_buffer) {
442 vfree(fifo_state->dynamic_buffer);
443 fifo_state->dynamic_buffer = NULL;
444 }
445
446 }
447
448 down_write(&fifo_state->rwsem);
449 if (fifo_state->using_bounce_buffer || reserveable) {
450 next_cmd += bytes;
451 if (next_cmd >= max)
452 next_cmd -= max - min;
453 mb();
454 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
455 }
456
457 if (reserveable)
458 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
459 mb();
460 up_write(&fifo_state->rwsem);
461 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
462 mutex_unlock(&fifo_state->fifo_mutex);
463}
464
465void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
466{
467 if (dev_priv->cman)
468 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
469 else
470 vmw_local_fifo_commit(dev_priv, bytes);
471}
472
473
474
475
476
477
478
479
480void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
481{
482 if (dev_priv->cman)
483 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
484 else
485 vmw_local_fifo_commit(dev_priv, bytes);
486}
487
488
489
490
491
492
493
494
495int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
496{
497 might_sleep();
498
499 if (dev_priv->cman)
500 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
501 else
502 return 0;
503}
504
505int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
506{
507 struct svga_fifo_cmd_fence *cmd_fence;
508 u32 *fm;
509 int ret = 0;
510 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
511
512 fm = VMW_CMD_RESERVE(dev_priv, bytes);
513 if (unlikely(fm == NULL)) {
514 *seqno = atomic_read(&dev_priv->marker_seq);
515 ret = -ENOMEM;
516 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
517 false, 3*HZ);
518 goto out_err;
519 }
520
521 do {
522 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
523 } while (*seqno == 0);
524
525 if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
526
527
528
529
530
531
532 vmw_cmd_commit(dev_priv, 0);
533 return 0;
534 }
535
536 *fm++ = SVGA_CMD_FENCE;
537 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
538 cmd_fence->fence = *seqno;
539 vmw_cmd_commit_flush(dev_priv, bytes);
540 vmw_update_seqno(dev_priv);
541
542out_err:
543 return ret;
544}
545
546
547
548
549
550
551
552
553
554
555static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
556 uint32_t cid)
557{
558
559
560
561
562
563
564 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
565 struct {
566 SVGA3dCmdHeader header;
567 SVGA3dCmdWaitForQuery body;
568 } *cmd;
569
570 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
571 if (unlikely(cmd == NULL))
572 return -ENOMEM;
573
574 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
575 cmd->header.size = sizeof(cmd->body);
576 cmd->body.cid = cid;
577 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
578
579 if (bo->resource->mem_type == TTM_PL_VRAM) {
580 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
581 cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
582 } else {
583 cmd->body.guestResult.gmrId = bo->resource->start;
584 cmd->body.guestResult.offset = 0;
585 }
586
587 vmw_cmd_commit(dev_priv, sizeof(*cmd));
588
589 return 0;
590}
591
592
593
594
595
596
597
598
599
600
601static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
602 uint32_t cid)
603{
604
605
606
607
608
609
610 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
611 struct {
612 SVGA3dCmdHeader header;
613 SVGA3dCmdWaitForGBQuery body;
614 } *cmd;
615
616 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
617 if (unlikely(cmd == NULL))
618 return -ENOMEM;
619
620 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
621 cmd->header.size = sizeof(cmd->body);
622 cmd->body.cid = cid;
623 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
624 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
625 cmd->body.mobid = bo->resource->start;
626 cmd->body.offset = 0;
627
628 vmw_cmd_commit(dev_priv, sizeof(*cmd));
629
630 return 0;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
653 uint32_t cid)
654{
655 if (dev_priv->has_mob)
656 return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
657
658 return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
659}
660
661
662
663
664
665
666
667
668
669
670bool vmw_cmd_supported(struct vmw_private *vmw)
671{
672 if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
673 SVGA_CAP_CMD_BUFFERS_2)) != 0)
674 return true;
675
676
677
678 return vmw->fifo_mem != NULL;
679}
680