Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/drmP.h>
30 #include <drm/ttm/ttm_placement.h>
31
32 struct vmw_temp_set_context {
33         SVGA3dCmdHeader header;
34         SVGA3dCmdDXTempSetContext body;
35 };
36
37 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
38 {
39         u32 *fifo_mem = dev_priv->mmio_virt;
40         uint32_t fifo_min, hwversion;
41         const struct vmw_fifo_state *fifo = &dev_priv->fifo;
42
43         if (!(dev_priv->capabilities & SVGA_CAP_3D))
44                 return false;
45
46         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
47                 uint32_t result;
48
49                 if (!dev_priv->has_mob)
50                         return false;
51
52                 spin_lock(&dev_priv->cap_lock);
53                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
54                 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
55                 spin_unlock(&dev_priv->cap_lock);
56
57                 return (result != 0);
58         }
59
60         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
61                 return false;
62
63         fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
64         if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
65                 return false;
66
67         hwversion = vmw_mmio_read(fifo_mem +
68                                   ((fifo->capabilities &
69                                     SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
70                                    SVGA_FIFO_3D_HWVERSION_REVISED :
71                                    SVGA_FIFO_3D_HWVERSION));
72
73         if (hwversion == 0)
74                 return false;
75
76         if (hwversion < SVGA3D_HWVERSION_WS8_B1)
77                 return false;
78
79         /* Legacy Display Unit does not support surfaces */
80         if (dev_priv->active_display_unit == vmw_du_legacy)
81                 return false;
82
83         return true;
84 }
85
86 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
87 {
88         u32  *fifo_mem = dev_priv->mmio_virt;
89         uint32_t caps;
90
91         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
92                 return false;
93
94         caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
95         if (caps & SVGA_FIFO_CAP_PITCHLOCK)
96                 return true;
97
98         return false;
99 }
100
101 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
102 {
103         u32  *fifo_mem = dev_priv->mmio_virt;
104         uint32_t max;
105         uint32_t min;
106
107         fifo->dx = false;
108         fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109         fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110         if (unlikely(fifo->static_buffer == NULL))
111                 return -ENOMEM;
112
113         fifo->dynamic_buffer = NULL;
114         fifo->reserved_size = 0;
115         fifo->using_bounce_buffer = false;
116
117         mutex_init(&fifo->fifo_mutex);
118         init_rwsem(&fifo->rwsem);
119
120         DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
121         DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
122         DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
123
124         dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125         dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126         dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
127
128         vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129                   SVGA_REG_ENABLE_HIDE);
130         vmw_write(dev_priv, SVGA_REG_TRACES, 0);
131
132         min = 4;
133         if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
134                 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
135         min <<= 2;
136
137         if (min < PAGE_SIZE)
138                 min = PAGE_SIZE;
139
140         vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
141         vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
142         wmb();
143         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
144         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
145         vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
146         mb();
147
148         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
149
150         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
151         min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
152         fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
153
154         DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
155                  (unsigned int) max,
156                  (unsigned int) min,
157                  (unsigned int) fifo->capabilities);
158
159         atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
160         vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
161         vmw_marker_queue_init(&fifo->marker_queue);
162
163         return 0;
164 }
165
166 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
167 {
168         u32 *fifo_mem = dev_priv->mmio_virt;
169
170         preempt_disable();
171         if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
172                 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
173         preempt_enable();
174 }
175
176 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
177 {
178         u32  *fifo_mem = dev_priv->mmio_virt;
179
180         vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
181         while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
182                 ;
183
184         dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
185
186         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
187                   dev_priv->config_done_state);
188         vmw_write(dev_priv, SVGA_REG_ENABLE,
189                   dev_priv->enable_state);
190         vmw_write(dev_priv, SVGA_REG_TRACES,
191                   dev_priv->traces_state);
192
193         vmw_marker_queue_takedown(&fifo->marker_queue);
194
195         if (likely(fifo->static_buffer != NULL)) {
196                 vfree(fifo->static_buffer);
197                 fifo->static_buffer = NULL;
198         }
199
200         if (likely(fifo->dynamic_buffer != NULL)) {
201                 vfree(fifo->dynamic_buffer);
202                 fifo->dynamic_buffer = NULL;
203         }
204 }
205
206 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
207 {
208         u32  *fifo_mem = dev_priv->mmio_virt;
209         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
210         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
211         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
212         uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
213
214         return ((max - next_cmd) + (stop - min) <= bytes);
215 }
216
217 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
218                                uint32_t bytes, bool interruptible,
219                                unsigned long timeout)
220 {
221         int ret = 0;
222         unsigned long end_jiffies = jiffies + timeout;
223         DEFINE_WAIT(__wait);
224
225         DRM_INFO("Fifo wait noirq.\n");
226
227         for (;;) {
228                 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
229                                 (interruptible) ?
230                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
231                 if (!vmw_fifo_is_full(dev_priv, bytes))
232                         break;
233                 if (time_after_eq(jiffies, end_jiffies)) {
234                         ret = -EBUSY;
235                         DRM_ERROR("SVGA device lockup.\n");
236                         break;
237                 }
238                 schedule_timeout(1);
239                 if (interruptible && signal_pending(current)) {
240                         ret = -ERESTARTSYS;
241                         break;
242                 }
243         }
244         finish_wait(&dev_priv->fifo_queue, &__wait);
245         wake_up_all(&dev_priv->fifo_queue);
246         DRM_INFO("Fifo noirq exit.\n");
247         return ret;
248 }
249
250 static int vmw_fifo_wait(struct vmw_private *dev_priv,
251                          uint32_t bytes, bool interruptible,
252                          unsigned long timeout)
253 {
254         long ret = 1L;
255
256         if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
257                 return 0;
258
259         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
260         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
261                 return vmw_fifo_wait_noirq(dev_priv, bytes,
262                                            interruptible, timeout);
263
264         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
265                                &dev_priv->fifo_queue_waiters);
266
267         if (interruptible)
268                 ret = wait_event_interruptible_timeout
269                     (dev_priv->fifo_queue,
270                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
271         else
272                 ret = wait_event_timeout
273                     (dev_priv->fifo_queue,
274                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
275
276         if (unlikely(ret == 0))
277                 ret = -EBUSY;
278         else if (likely(ret > 0))
279                 ret = 0;
280
281         vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
282                                   &dev_priv->fifo_queue_waiters);
283
284         return ret;
285 }
286
287 /**
288  * Reserve @bytes number of bytes in the fifo.
289  *
290  * This function will return NULL (error) on two conditions:
291  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
292  *   available fifo space.
293  *
294  * Returns:
295  *   Pointer to the fifo, or null on error (possible hardware hang).
296  */
297 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
298                                     uint32_t bytes)
299 {
300         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
301         u32  *fifo_mem = dev_priv->mmio_virt;
302         uint32_t max;
303         uint32_t min;
304         uint32_t next_cmd;
305         uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
306         int ret;
307
308         mutex_lock(&fifo_state->fifo_mutex);
309         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
310         min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
311         next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
312
313         if (unlikely(bytes >= (max - min)))
314                 goto out_err;
315
316         BUG_ON(fifo_state->reserved_size != 0);
317         BUG_ON(fifo_state->dynamic_buffer != NULL);
318
319         fifo_state->reserved_size = bytes;
320
321         while (1) {
322                 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
323                 bool need_bounce = false;
324                 bool reserve_in_place = false;
325
326                 if (next_cmd >= stop) {
327                         if (likely((next_cmd + bytes < max ||
328                                     (next_cmd + bytes == max && stop > min))))
329                                 reserve_in_place = true;
330
331                         else if (vmw_fifo_is_full(dev_priv, bytes)) {
332                                 ret = vmw_fifo_wait(dev_priv, bytes,
333                                                     false, 3 * HZ);
334                                 if (unlikely(ret != 0))
335                                         goto out_err;
336                         } else
337                                 need_bounce = true;
338
339                 } else {
340
341                         if (likely((next_cmd + bytes < stop)))
342                                 reserve_in_place = true;
343                         else {
344                                 ret = vmw_fifo_wait(dev_priv, bytes,
345                                                     false, 3 * HZ);
346                                 if (unlikely(ret != 0))
347                                         goto out_err;
348                         }
349                 }
350
351                 if (reserve_in_place) {
352                         if (reserveable || bytes <= sizeof(uint32_t)) {
353                                 fifo_state->using_bounce_buffer = false;
354
355                                 if (reserveable)
356                                         vmw_mmio_write(bytes, fifo_mem +
357                                                        SVGA_FIFO_RESERVED);
358                                 return (void __force *) (fifo_mem +
359                                                          (next_cmd >> 2));
360                         } else {
361                                 need_bounce = true;
362                         }
363                 }
364
365                 if (need_bounce) {
366                         fifo_state->using_bounce_buffer = true;
367                         if (bytes < fifo_state->static_buffer_size)
368                                 return fifo_state->static_buffer;
369                         else {
370                                 fifo_state->dynamic_buffer = vmalloc(bytes);
371                                 if (!fifo_state->dynamic_buffer)
372                                         goto out_err;
373                                 return fifo_state->dynamic_buffer;
374                         }
375                 }
376         }
377 out_err:
378         fifo_state->reserved_size = 0;
379         mutex_unlock(&fifo_state->fifo_mutex);
380
381         return NULL;
382 }
383
384 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
385                           int ctx_id)
386 {
387         void *ret;
388
389         if (dev_priv->cman)
390                 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
391                                          ctx_id, false, NULL);
392         else if (ctx_id == SVGA3D_INVALID_ID)
393                 ret = vmw_local_fifo_reserve(dev_priv, bytes);
394         else {
395                 WARN(1, "Command buffer has not been allocated.\n");
396                 ret = NULL;
397         }
398         if (IS_ERR_OR_NULL(ret))
399                 return NULL;
400
401         return ret;
402 }
403
404 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
405                               u32  *fifo_mem,
406                               uint32_t next_cmd,
407                               uint32_t max, uint32_t min, uint32_t bytes)
408 {
409         uint32_t chunk_size = max - next_cmd;
410         uint32_t rest;
411         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
412             fifo_state->dynamic_buffer : fifo_state->static_buffer;
413
414         if (bytes < chunk_size)
415                 chunk_size = bytes;
416
417         vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
418         mb();
419         memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
420         rest = bytes - chunk_size;
421         if (rest)
422                 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
423 }
424
425 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
426                                u32  *fifo_mem,
427                                uint32_t next_cmd,
428                                uint32_t max, uint32_t min, uint32_t bytes)
429 {
430         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
431             fifo_state->dynamic_buffer : fifo_state->static_buffer;
432
433         while (bytes > 0) {
434                 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
435                 next_cmd += sizeof(uint32_t);
436                 if (unlikely(next_cmd == max))
437                         next_cmd = min;
438                 mb();
439                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
440                 mb();
441                 bytes -= sizeof(uint32_t);
442         }
443 }
444
445 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
446 {
447         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
448         u32  *fifo_mem = dev_priv->mmio_virt;
449         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
450         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
451         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
452         bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
453
454         if (fifo_state->dx)
455                 bytes += sizeof(struct vmw_temp_set_context);
456
457         fifo_state->dx = false;
458         BUG_ON((bytes & 3) != 0);
459         BUG_ON(bytes > fifo_state->reserved_size);
460
461         fifo_state->reserved_size = 0;
462
463         if (fifo_state->using_bounce_buffer) {
464                 if (reserveable)
465                         vmw_fifo_res_copy(fifo_state, fifo_mem,
466                                           next_cmd, max, min, bytes);
467                 else
468                         vmw_fifo_slow_copy(fifo_state, fifo_mem,
469                                            next_cmd, max, min, bytes);
470
471                 if (fifo_state->dynamic_buffer) {
472                         vfree(fifo_state->dynamic_buffer);
473                         fifo_state->dynamic_buffer = NULL;
474                 }
475
476         }
477
478         down_write(&fifo_state->rwsem);
479         if (fifo_state->using_bounce_buffer || reserveable) {
480                 next_cmd += bytes;
481                 if (next_cmd >= max)
482                         next_cmd -= max - min;
483                 mb();
484                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
485         }
486
487         if (reserveable)
488                 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
489         mb();
490         up_write(&fifo_state->rwsem);
491         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
492         mutex_unlock(&fifo_state->fifo_mutex);
493 }
494
495 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
496 {
497         if (dev_priv->cman)
498                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
499         else
500                 vmw_local_fifo_commit(dev_priv, bytes);
501 }
502
503
504 /**
505  * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
506  *
507  * @dev_priv: Pointer to device private structure.
508  * @bytes: Number of bytes to commit.
509  */
510 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
511 {
512         if (dev_priv->cman)
513                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
514         else
515                 vmw_local_fifo_commit(dev_priv, bytes);
516 }
517
518 /**
519  * vmw_fifo_flush - Flush any buffered commands and make sure command processing
520  * starts.
521  *
522  * @dev_priv: Pointer to device private structure.
523  * @interruptible: Whether to wait interruptible if function needs to sleep.
524  */
525 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
526 {
527         might_sleep();
528
529         if (dev_priv->cman)
530                 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
531         else
532                 return 0;
533 }
534
535 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
536 {
537         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
538         struct svga_fifo_cmd_fence *cmd_fence;
539         u32 *fm;
540         int ret = 0;
541         uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
542
543         fm = VMW_FIFO_RESERVE(dev_priv, bytes);
544         if (unlikely(fm == NULL)) {
545                 *seqno = atomic_read(&dev_priv->marker_seq);
546                 ret = -ENOMEM;
547                 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
548                                         false, 3*HZ);
549                 goto out_err;
550         }
551
552         do {
553                 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
554         } while (*seqno == 0);
555
556         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
557
558                 /*
559                  * Don't request hardware to send a fence. The
560                  * waiting code in vmwgfx_irq.c will emulate this.
561                  */
562
563                 vmw_fifo_commit(dev_priv, 0);
564                 return 0;
565         }
566
567         *fm++ = SVGA_CMD_FENCE;
568         cmd_fence = (struct svga_fifo_cmd_fence *) fm;
569         cmd_fence->fence = *seqno;
570         vmw_fifo_commit_flush(dev_priv, bytes);
571         (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
572         vmw_update_seqno(dev_priv, fifo_state);
573
574 out_err:
575         return ret;
576 }
577
578 /**
579  * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
580  * legacy query commands.
581  *
582  * @dev_priv: The device private structure.
583  * @cid: The hardware context id used for the query.
584  *
585  * See the vmw_fifo_emit_dummy_query documentation.
586  */
587 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
588                                             uint32_t cid)
589 {
590         /*
591          * A query wait without a preceding query end will
592          * actually finish all queries for this cid
593          * without writing to the query result structure.
594          */
595
596         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
597         struct {
598                 SVGA3dCmdHeader header;
599                 SVGA3dCmdWaitForQuery body;
600         } *cmd;
601
602         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
603         if (unlikely(cmd == NULL))
604                 return -ENOMEM;
605
606         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
607         cmd->header.size = sizeof(cmd->body);
608         cmd->body.cid = cid;
609         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
610
611         if (bo->mem.mem_type == TTM_PL_VRAM) {
612                 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
613                 cmd->body.guestResult.offset = bo->offset;
614         } else {
615                 cmd->body.guestResult.gmrId = bo->mem.start;
616                 cmd->body.guestResult.offset = 0;
617         }
618
619         vmw_fifo_commit(dev_priv, sizeof(*cmd));
620
621         return 0;
622 }
623
624 /**
625  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
626  * guest-backed resource query commands.
627  *
628  * @dev_priv: The device private structure.
629  * @cid: The hardware context id used for the query.
630  *
631  * See the vmw_fifo_emit_dummy_query documentation.
632  */
633 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
634                                         uint32_t cid)
635 {
636         /*
637          * A query wait without a preceding query end will
638          * actually finish all queries for this cid
639          * without writing to the query result structure.
640          */
641
642         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
643         struct {
644                 SVGA3dCmdHeader header;
645                 SVGA3dCmdWaitForGBQuery body;
646         } *cmd;
647
648         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
649         if (unlikely(cmd == NULL))
650                 return -ENOMEM;
651
652         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
653         cmd->header.size = sizeof(cmd->body);
654         cmd->body.cid = cid;
655         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
656         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
657         cmd->body.mobid = bo->mem.start;
658         cmd->body.offset = 0;
659
660         vmw_fifo_commit(dev_priv, sizeof(*cmd));
661
662         return 0;
663 }
664
665
666 /**
667  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
668  * appropriate resource query commands.
669  *
670  * @dev_priv: The device private structure.
671  * @cid: The hardware context id used for the query.
672  *
673  * This function is used to emit a dummy occlusion query with
674  * no primitives rendered between query begin and query end.
675  * It's used to provide a query barrier, in order to know that when
676  * this query is finished, all preceding queries are also finished.
677  *
678  * A Query results structure should have been initialized at the start
679  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
680  * must also be either reserved or pinned when this function is called.
681  *
682  * Returns -ENOMEM on failure to reserve fifo space.
683  */
684 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
685                               uint32_t cid)
686 {
687         if (dev_priv->has_mob)
688                 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
689
690         return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
691 }