1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
43 #define VMW_GET_CTX_NODE(__sw_context) \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
51 #define VMW_DECLARE_CMD_VAR(__var, __type) \
53 SVGA3dCmdHeader header; \
58 * struct vmw_relocation - Buffer object relocation
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
65 struct vmw_relocation {
66 struct list_head head;
67 struct vmw_buffer_object *vbo;
70 SVGAGuestPtr *location;
75 * enum vmw_resource_relocation_type - Relocation type for resources
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 enum vmw_resource_relocation_type {
92 * struct vmw_resource_relocation - Relocation info for resources
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
96 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
98 * @rel_type: Type of relocation.
100 struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
104 enum vmw_resource_relocation_type rel_type:3;
108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
115 struct vmw_ctx_validation_info {
116 struct list_head head;
117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
123 * struct vmw_cmd_entry - Describe a command for the verifier
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
129 struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 const char *cmd_name;
138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 (_gb_disable), (_gb_enable), #_cmd}
142 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
148 struct vmw_buffer_object **vmw_bo_p);
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
155 * Returns: The offset in bytes between the two pointers.
157 static size_t vmw_ptr_diff(void *a, void *b)
159 return (unsigned long) b - (unsigned long) a;
163 * vmw_execbuf_bindings_commit - Commit modified binding state
165 * @sw_context: The command submission context
166 * @backoff: Whether this is part of the error path and binding state changes
169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
172 struct vmw_ctx_validation_info *entry;
174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
176 vmw_binding_state_commit(entry->cur, entry->staged);
178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
181 sw_context->staged_bindings_inuse = false;
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
191 * @sw_context: The command submission context
193 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
195 if (sw_context->dx_query_mob)
196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
204 * @dev_priv: Pointer to the device private:
205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
208 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
216 if (unlikely(ret != 0))
219 if (!sw_context->staged_bindings) {
220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
221 if (IS_ERR(sw_context->staged_bindings)) {
222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
228 if (sw_context->staged_bindings_inuse) {
229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
231 ret = PTR_ERR(node->staged);
236 node->staged = sw_context->staged_bindings;
237 sw_context->staged_bindings_inuse = true;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
260 * Returns: The extra size requirement based on resource type.
262 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
275 * @private: Pointer to the execbuf-private space in the resource validation
278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
283 rcache->private = private;
285 rcache->valid_handle = 0;
289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
294 * @dirty: Whether to change dirty status.
296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
300 struct vmw_resource *res,
303 struct vmw_private *dev_priv = res->dev_priv;
305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
309 unsigned int priv_size;
311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
316 vmw_user_resource_noref_release();
320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 dirty, (void **)&ctx_info,
324 vmw_user_resource_noref_release();
328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
347 * @dirty: Whether to change dirty status.
349 * Returns: Zero on success. Negative error code on failure.
351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
352 struct vmw_resource *res,
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
360 rcache = &sw_context->res_cache[res_type];
361 if (likely(rcache->valid && rcache->res == res)) {
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
373 vmw_execbuf_rcache_update(rcache, res, ptr);
379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
385 * Returns 0 if success, negative error code otherwise.
387 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
420 static struct vmw_resource *
421 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
425 struct vmw_resource *view;
429 return ERR_PTR(-EINVAL);
431 view = vmw_view_lookup(sw_context->man, view_type, id);
435 ret = vmw_view_res_val_add(sw_context, view);
443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
453 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
457 struct list_head *binding_list;
458 struct vmw_ctx_bindinfo *entry;
460 struct vmw_resource *res;
463 /* Add all cotables to the validation list. */
464 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
465 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
466 res = vmw_context_cotable(ctx, i);
470 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
472 if (unlikely(ret != 0))
477 /* Add all resources bound to the context to the validation list */
478 mutex_lock(&dev_priv->binding_mutex);
479 binding_list = vmw_context_binding_list(ctx);
481 list_for_each_entry(entry, binding_list, ctx_list) {
482 if (vmw_res_type(entry->res) == vmw_res_view)
483 ret = vmw_view_res_val_add(sw_context, entry->res);
485 ret = vmw_execbuf_res_noctx_val_add
486 (sw_context, entry->res,
487 vmw_binding_dirtying(entry->bt));
488 if (unlikely(ret != 0))
492 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
493 struct vmw_buffer_object *dx_query_mob;
495 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
497 ret = vmw_validation_add_bo(sw_context->ctx,
498 dx_query_mob, true, false);
501 mutex_unlock(&dev_priv->binding_mutex);
506 * vmw_resource_relocation_add - Add a relocation to the relocation list
508 * @list: Pointer to head of relocation list.
509 * @res: The resource.
510 * @offset: Offset into the command buffer currently being parsed where the id
511 * that needs fixup is located. Granularity is one byte.
512 * @rel_type: Relocation type.
514 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
515 const struct vmw_resource *res,
516 unsigned long offset,
517 enum vmw_resource_relocation_type
520 struct vmw_resource_relocation *rel;
522 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
523 if (unlikely(!rel)) {
524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
529 rel->offset = offset;
530 rel->rel_type = rel_type;
531 list_add_tail(&rel->head, &sw_context->res_relocations);
537 * vmw_resource_relocations_free - Free all relocations on a list
539 * @list: Pointer to the head of the relocation list
541 static void vmw_resource_relocations_free(struct list_head *list)
543 /* Memory is validation context memory, so no need to free it */
544 INIT_LIST_HEAD(list);
548 * vmw_resource_relocations_apply - Apply all relocations on a list
550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
551 * the same buffer as the one being parsed when the relocation list was built,
552 * but the contents must be the same modulo the resource ids.
553 * @list: Pointer to the head of the relocation list.
555 static void vmw_resource_relocations_apply(uint32_t *cb,
556 struct list_head *list)
558 struct vmw_resource_relocation *rel;
560 /* Validate the struct vmw_resource_relocation member size */
561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
562 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
564 list_for_each_entry(rel, list, head) {
565 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
566 switch (rel->rel_type) {
567 case vmw_res_rel_normal:
568 *addr = rel->res->id;
570 case vmw_res_rel_nop:
571 *addr = SVGA_3D_CMD_NOP;
574 if (rel->res->id == -1)
575 *addr = SVGA_3D_CMD_NOP;
581 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
582 struct vmw_sw_context *sw_context,
583 SVGA3dCmdHeader *header)
588 static int vmw_cmd_ok(struct vmw_private *dev_priv,
589 struct vmw_sw_context *sw_context,
590 SVGA3dCmdHeader *header)
596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
599 * @sw_context: Pointer to the software context.
601 * Note that since vmware's command submission currently is protected by the
602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603 * only a single thread at once will attempt this.
605 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
609 ret = vmw_validation_res_reserve(sw_context->ctx, true);
613 if (sw_context->dx_query_mob) {
614 struct vmw_buffer_object *expected_dx_query_mob;
616 expected_dx_query_mob =
617 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
618 if (expected_dx_query_mob &&
619 expected_dx_query_mob != sw_context->dx_query_mob) {
628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629 * resource validate list unless it's already there.
631 * @dev_priv: Pointer to a device private structure.
632 * @sw_context: Pointer to the software context.
633 * @res_type: Resource type.
634 * @dirty: Whether to change dirty status.
635 * @converter: User-space visisble type specific information.
636 * @id_loc: Pointer to the location in the command buffer currently being parsed
637 * from where the user-space resource id handle is located.
638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
642 vmw_cmd_res_check(struct vmw_private *dev_priv,
643 struct vmw_sw_context *sw_context,
644 enum vmw_res_type res_type,
646 const struct vmw_user_resource_conv *converter,
648 struct vmw_resource **p_res)
650 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
651 struct vmw_resource *res;
657 if (*id_loc == SVGA3D_INVALID_ID) {
658 if (res_type == vmw_res_context) {
659 VMW_DEBUG_USER("Illegal context invalid id.\n");
665 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
668 vmw_validation_res_set_dirty(sw_context->ctx,
669 rcache->private, dirty);
671 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
673 ret = vmw_validation_preload_res(sw_context->ctx, size);
677 res = vmw_user_resource_noref_lookup_handle
678 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 (unsigned int) *id_loc);
685 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
686 if (unlikely(ret != 0))
689 if (rcache->valid && rcache->res == res) {
690 rcache->valid_handle = true;
691 rcache->handle = *id_loc;
695 ret = vmw_resource_relocation_add(sw_context, res,
696 vmw_ptr_diff(sw_context->buf_start,
706 * vmw_rebind_dx_query - Rebind DX query associated with the context
708 * @ctx_res: context the query belongs to
710 * This function assumes binding_mutex is held.
712 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
714 struct vmw_private *dev_priv = ctx_res->dev_priv;
715 struct vmw_buffer_object *dx_query_mob;
716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
718 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
720 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
723 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
727 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
728 cmd->header.size = sizeof(cmd->body);
729 cmd->body.cid = ctx_res->id;
730 cmd->body.mobid = dx_query_mob->base.mem.start;
731 vmw_fifo_commit(dev_priv, sizeof(*cmd));
733 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
742 * @sw_context: Pointer to the software context.
744 * Rebind context binding points that have been scrubbed because of eviction.
746 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
748 struct vmw_ctx_validation_info *val;
751 list_for_each_entry(val, &sw_context->ctx_list, head) {
752 ret = vmw_binding_rebind_all(val->cur);
753 if (unlikely(ret != 0)) {
754 if (ret != -ERESTARTSYS)
755 VMW_DEBUG_USER("Failed to rebind context.\n");
759 ret = vmw_rebind_all_dx_query(val->ctx);
761 VMW_DEBUG_USER("Failed to rebind queries.\n");
770 * vmw_view_bindings_add - Add an array of view bindings to a context binding
773 * @sw_context: The execbuf state used for this command.
774 * @view_type: View type for the bindings.
775 * @binding_type: Binding type for the bindings.
776 * @shader_slot: The shader slot to user for the bindings.
777 * @view_ids: Array of view ids to be bound.
778 * @num_views: Number of view ids in @view_ids.
779 * @first_slot: The binding slot to be used for the first view id in @view_ids.
781 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
782 enum vmw_view_type view_type,
783 enum vmw_ctx_binding_type binding_type,
785 uint32 view_ids[], u32 num_views,
788 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
794 for (i = 0; i < num_views; ++i) {
795 struct vmw_ctx_bindinfo_view binding;
796 struct vmw_resource *view = NULL;
798 if (view_ids[i] != SVGA3D_INVALID_ID) {
799 view = vmw_view_id_val_add(sw_context, view_type,
802 VMW_DEBUG_USER("View not found.\n");
803 return PTR_ERR(view);
806 binding.bi.ctx = ctx_node->ctx;
807 binding.bi.res = view;
808 binding.bi.bt = binding_type;
809 binding.shader_slot = shader_slot;
810 binding.slot = first_slot + i;
811 vmw_binding_add(ctx_node->staged, &binding.bi,
812 shader_slot, binding.slot);
819 * vmw_cmd_cid_check - Check a command header for valid context information.
821 * @dev_priv: Pointer to a device private structure.
822 * @sw_context: Pointer to the software context.
823 * @header: A command header with an embedded user-space context handle.
825 * Convenience function: Call vmw_cmd_res_check with the user-space context
826 * handle embedded in @header.
828 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
829 struct vmw_sw_context *sw_context,
830 SVGA3dCmdHeader *header)
832 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
833 container_of(header, typeof(*cmd), header);
835 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
836 VMW_RES_DIRTY_SET, user_context_converter,
841 * vmw_execbuf_info_from_res - Get the private validation metadata for a
842 * recently validated resource
844 * @sw_context: Pointer to the command submission context
847 * The resource pointed to by @res needs to be present in the command submission
848 * context's resource cache and hence the last resource of that type to be
849 * processed by the validation code.
851 * Return: a pointer to the private metadata of the resource, or NULL if it
854 static struct vmw_ctx_validation_info *
855 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
856 struct vmw_resource *res)
858 struct vmw_res_cache_entry *rcache =
859 &sw_context->res_cache[vmw_res_type(res)];
861 if (rcache->valid && rcache->res == res)
862 return rcache->private;
868 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
869 struct vmw_sw_context *sw_context,
870 SVGA3dCmdHeader *header)
872 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
873 struct vmw_resource *ctx;
874 struct vmw_resource *res;
877 cmd = container_of(header, typeof(*cmd), header);
879 if (cmd->body.type >= SVGA3D_RT_MAX) {
880 VMW_DEBUG_USER("Illegal render target type %u.\n",
881 (unsigned int) cmd->body.type);
885 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
886 VMW_RES_DIRTY_SET, user_context_converter,
887 &cmd->body.cid, &ctx);
888 if (unlikely(ret != 0))
891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
892 VMW_RES_DIRTY_SET, user_surface_converter,
893 &cmd->body.target.sid, &res);
897 if (dev_priv->has_mob) {
898 struct vmw_ctx_bindinfo_view binding;
899 struct vmw_ctx_validation_info *node;
901 node = vmw_execbuf_info_from_res(sw_context, ctx);
905 binding.bi.ctx = ctx;
906 binding.bi.res = res;
907 binding.bi.bt = vmw_ctx_binding_rt;
908 binding.slot = cmd->body.type;
909 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
915 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
916 struct vmw_sw_context *sw_context,
917 SVGA3dCmdHeader *header)
919 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
922 cmd = container_of(header, typeof(*cmd), header);
924 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
925 VMW_RES_DIRTY_NONE, user_surface_converter,
926 &cmd->body.src.sid, NULL);
930 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
931 VMW_RES_DIRTY_SET, user_surface_converter,
932 &cmd->body.dest.sid, NULL);
935 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
936 struct vmw_sw_context *sw_context,
937 SVGA3dCmdHeader *header)
939 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
942 cmd = container_of(header, typeof(*cmd), header);
943 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
944 VMW_RES_DIRTY_NONE, user_surface_converter,
945 &cmd->body.src, NULL);
949 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
950 VMW_RES_DIRTY_SET, user_surface_converter,
951 &cmd->body.dest, NULL);
954 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
955 struct vmw_sw_context *sw_context,
956 SVGA3dCmdHeader *header)
958 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
961 cmd = container_of(header, typeof(*cmd), header);
962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
963 VMW_RES_DIRTY_NONE, user_surface_converter,
964 &cmd->body.srcSid, NULL);
968 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
969 VMW_RES_DIRTY_SET, user_surface_converter,
970 &cmd->body.dstSid, NULL);
973 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
974 struct vmw_sw_context *sw_context,
975 SVGA3dCmdHeader *header)
977 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
980 cmd = container_of(header, typeof(*cmd), header);
981 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
982 VMW_RES_DIRTY_NONE, user_surface_converter,
983 &cmd->body.src.sid, NULL);
984 if (unlikely(ret != 0))
987 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
988 VMW_RES_DIRTY_SET, user_surface_converter,
989 &cmd->body.dest.sid, NULL);
992 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
993 struct vmw_sw_context *sw_context,
994 SVGA3dCmdHeader *header)
996 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
997 container_of(header, typeof(*cmd), header);
999 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1000 VMW_RES_DIRTY_NONE, user_surface_converter,
1001 &cmd->body.srcImage.sid, NULL);
1004 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1005 struct vmw_sw_context *sw_context,
1006 SVGA3dCmdHeader *header)
1008 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1009 container_of(header, typeof(*cmd), header);
1011 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1012 VMW_RES_DIRTY_NONE, user_surface_converter,
1013 &cmd->body.sid, NULL);
1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1019 * @dev_priv: The device private structure.
1020 * @new_query_bo: The new buffer holding query results.
1021 * @sw_context: The software context used for this command submission.
1023 * This function checks whether @new_query_bo is suitable for holding query
1024 * results, and if another buffer currently is pinned for query results. If so,
1025 * the function prepares the state of @sw_context for switching pinned buffers
1026 * after successful submission of the current command batch.
1028 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1029 struct vmw_buffer_object *new_query_bo,
1030 struct vmw_sw_context *sw_context)
1032 struct vmw_res_cache_entry *ctx_entry =
1033 &sw_context->res_cache[vmw_res_context];
1036 BUG_ON(!ctx_entry->valid);
1037 sw_context->last_query_ctx = ctx_entry->res;
1039 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1041 if (unlikely(new_query_bo->base.num_pages > 4)) {
1042 VMW_DEBUG_USER("Query buffer too large.\n");
1046 if (unlikely(sw_context->cur_query_bo != NULL)) {
1047 sw_context->needs_post_query_barrier = true;
1048 ret = vmw_validation_add_bo(sw_context->ctx,
1049 sw_context->cur_query_bo,
1050 dev_priv->has_mob, false);
1051 if (unlikely(ret != 0))
1054 sw_context->cur_query_bo = new_query_bo;
1056 ret = vmw_validation_add_bo(sw_context->ctx,
1057 dev_priv->dummy_query_bo,
1058 dev_priv->has_mob, false);
1059 if (unlikely(ret != 0))
1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1069 * @dev_priv: The device private structure.
1070 * @sw_context: The software context used for this command submission batch.
1072 * This function will check if we're switching query buffers, and will then,
1073 * issue a dummy occlusion query wait used as a query barrier. When the fence
1074 * object following that query wait has signaled, we are sure that all preceding
1075 * queries have finished, and the old query buffer can be unpinned. However,
1076 * since both the new query buffer and the old one are fenced with that fence,
1077 * we can do an asynchronus unpin now, and be sure that the old query buffer
1078 * won't be moved until the fence has signaled.
1080 * As mentioned above, both the new - and old query buffers need to be fenced
1081 * using a sequence emitted *after* calling this function.
1083 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1084 struct vmw_sw_context *sw_context)
1087 * The validate list should still hold references to all
1090 if (sw_context->needs_post_query_barrier) {
1091 struct vmw_res_cache_entry *ctx_entry =
1092 &sw_context->res_cache[vmw_res_context];
1093 struct vmw_resource *ctx;
1096 BUG_ON(!ctx_entry->valid);
1097 ctx = ctx_entry->res;
1099 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1101 if (unlikely(ret != 0))
1102 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1105 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1106 if (dev_priv->pinned_bo) {
1107 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1108 vmw_bo_unreference(&dev_priv->pinned_bo);
1111 if (!sw_context->needs_post_query_barrier) {
1112 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1115 * We pin also the dummy_query_bo buffer so that we
1116 * don't need to validate it when emitting dummy queries
1117 * in context destroy paths.
1119 if (!dev_priv->dummy_query_bo_pinned) {
1120 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1122 dev_priv->dummy_query_bo_pinned = true;
1125 BUG_ON(sw_context->last_query_ctx == NULL);
1126 dev_priv->query_cid = sw_context->last_query_ctx->id;
1127 dev_priv->query_cid_valid = true;
1128 dev_priv->pinned_bo =
1129 vmw_bo_reference(sw_context->cur_query_bo);
1135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1138 * @dev_priv: Pointer to a device private structure.
1139 * @sw_context: The software context used for this command batch validation.
1140 * @id: Pointer to the user-space handle to be translated.
1141 * @vmw_bo_p: Points to a location that, on successful return will carry a
1142 * non-reference-counted pointer to the buffer object identified by the
1143 * user-space handle in @id.
1145 * This function saves information needed to translate a user-space buffer
1146 * handle to a MOB id. The translation does not take place immediately, but
1147 * during a call to vmw_apply_relocations().
1149 * This function builds a relocation list and a list of buffers to validate. The
1150 * former needs to be freed using either vmw_apply_relocations() or
1151 * vmw_free_relocations(). The latter needs to be freed using
1152 * vmw_clear_validations.
1154 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1155 struct vmw_sw_context *sw_context,
1157 struct vmw_buffer_object **vmw_bo_p)
1159 struct vmw_buffer_object *vmw_bo;
1160 uint32_t handle = *id;
1161 struct vmw_relocation *reloc;
1164 vmw_validation_preload_bo(sw_context->ctx);
1165 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1166 if (IS_ERR(vmw_bo)) {
1167 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1168 return PTR_ERR(vmw_bo);
1171 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1172 vmw_user_bo_noref_release();
1173 if (unlikely(ret != 0))
1176 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1180 reloc->mob_loc = id;
1181 reloc->vbo = vmw_bo;
1184 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191 * to a valid SVGAGuestPtr
1193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
1196 * @vmw_bo_p: Points to a location that, on successful return will carry a
1197 * non-reference-counted pointer to the DMA buffer identified by the user-space
1200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
1204 * This function builds a relocation list and a list of buffers to validate.
1205 * The former needs to be freed using either vmw_apply_relocations() or
1206 * vmw_free_relocations(). The latter needs to be freed using
1207 * vmw_clear_validations.
1209 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1210 struct vmw_sw_context *sw_context,
1212 struct vmw_buffer_object **vmw_bo_p)
1214 struct vmw_buffer_object *vmw_bo;
1215 uint32_t handle = ptr->gmrId;
1216 struct vmw_relocation *reloc;
1219 vmw_validation_preload_bo(sw_context->ctx);
1220 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1221 if (IS_ERR(vmw_bo)) {
1222 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1223 return PTR_ERR(vmw_bo);
1226 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1227 vmw_user_bo_noref_release();
1228 if (unlikely(ret != 0))
1231 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1235 reloc->location = ptr;
1236 reloc->vbo = vmw_bo;
1238 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1246 * @dev_priv: Pointer to a device private struct.
1247 * @sw_context: The software context used for this command submission.
1248 * @header: Pointer to the command header in the command stream.
1250 * This function adds the new query into the query COTABLE
1252 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1253 struct vmw_sw_context *sw_context,
1254 SVGA3dCmdHeader *header)
1256 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1257 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1258 struct vmw_resource *cotable_res;
1264 cmd = container_of(header, typeof(*cmd), header);
1266 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1267 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1270 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1271 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB. In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1287 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288 struct vmw_sw_context *sw_context,
1289 SVGA3dCmdHeader *header)
1291 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292 struct vmw_buffer_object *vmw_bo;
1295 cmd = container_of(header, typeof(*cmd), header);
1298 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 * list so its kernel mode MOB ID can be filled in later
1301 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1307 sw_context->dx_query_mob = vmw_bo;
1308 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1319 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320 struct vmw_sw_context *sw_context,
1321 SVGA3dCmdHeader *header)
1323 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324 container_of(header, typeof(*cmd), header);
1326 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327 VMW_RES_DIRTY_SET, user_context_converter,
1328 &cmd->body.cid, NULL);
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1338 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339 struct vmw_sw_context *sw_context,
1340 SVGA3dCmdHeader *header)
1342 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343 container_of(header, typeof(*cmd), header);
1345 if (unlikely(dev_priv->has_mob)) {
1346 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1348 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1350 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351 gb_cmd.header.size = cmd->header.size;
1352 gb_cmd.body.cid = cmd->body.cid;
1353 gb_cmd.body.type = cmd->body.type;
1355 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1359 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360 VMW_RES_DIRTY_SET, user_context_converter,
1361 &cmd->body.cid, NULL);
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1371 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372 struct vmw_sw_context *sw_context,
1373 SVGA3dCmdHeader *header)
1375 struct vmw_buffer_object *vmw_bo;
1376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1379 cmd = container_of(header, typeof(*cmd), header);
1380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 if (unlikely(ret != 0))
1384 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1386 if (unlikely(ret != 0))
1389 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1401 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1405 struct vmw_buffer_object *vmw_bo;
1406 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1409 cmd = container_of(header, typeof(*cmd), header);
1410 if (dev_priv->has_mob) {
1411 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1413 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1415 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416 gb_cmd.header.size = cmd->header.size;
1417 gb_cmd.body.cid = cmd->body.cid;
1418 gb_cmd.body.type = cmd->body.type;
1419 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420 gb_cmd.body.offset = cmd->body.guestResult.offset;
1422 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1426 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427 if (unlikely(ret != 0))
1430 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431 &cmd->body.guestResult, &vmw_bo);
1432 if (unlikely(ret != 0))
1435 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1447 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448 struct vmw_sw_context *sw_context,
1449 SVGA3dCmdHeader *header)
1451 struct vmw_buffer_object *vmw_bo;
1452 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1455 cmd = container_of(header, typeof(*cmd), header);
1456 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457 if (unlikely(ret != 0))
1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1462 if (unlikely(ret != 0))
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1475 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476 struct vmw_sw_context *sw_context,
1477 SVGA3dCmdHeader *header)
1479 struct vmw_buffer_object *vmw_bo;
1480 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1483 cmd = container_of(header, typeof(*cmd), header);
1484 if (dev_priv->has_mob) {
1485 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1487 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1489 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490 gb_cmd.header.size = cmd->header.size;
1491 gb_cmd.body.cid = cmd->body.cid;
1492 gb_cmd.body.type = cmd->body.type;
1493 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494 gb_cmd.body.offset = cmd->body.guestResult.offset;
1496 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1500 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501 if (unlikely(ret != 0))
1504 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505 &cmd->body.guestResult, &vmw_bo);
1506 if (unlikely(ret != 0))
1512 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513 struct vmw_sw_context *sw_context,
1514 SVGA3dCmdHeader *header)
1516 struct vmw_buffer_object *vmw_bo = NULL;
1517 struct vmw_surface *srf = NULL;
1518 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1520 SVGA3dCmdSurfaceDMASuffix *suffix;
1524 cmd = container_of(header, typeof(*cmd), header);
1525 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526 header->size - sizeof(*suffix));
1528 /* Make sure device and verifier stays in sync. */
1529 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1534 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535 &cmd->body.guest.ptr, &vmw_bo);
1536 if (unlikely(ret != 0))
1539 /* Make sure DMA doesn't cross BO boundaries. */
1540 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1541 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542 VMW_DEBUG_USER("Invalid DMA offset.\n");
1546 bo_size -= cmd->body.guest.ptr.offset;
1547 if (unlikely(suffix->maximumOffset > bo_size))
1548 suffix->maximumOffset = bo_size;
1550 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551 VMW_RES_DIRTY_SET : 0;
1552 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553 dirty, user_surface_converter,
1554 &cmd->body.host.sid, NULL);
1555 if (unlikely(ret != 0)) {
1556 if (unlikely(ret != -ERESTARTSYS))
1557 VMW_DEBUG_USER("could not find surface for DMA.\n");
1561 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1563 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1568 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569 struct vmw_sw_context *sw_context,
1570 SVGA3dCmdHeader *header)
1572 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1573 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574 (unsigned long)header + sizeof(*cmd));
1575 SVGA3dPrimitiveRange *range;
1580 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581 if (unlikely(ret != 0))
1584 cmd = container_of(header, typeof(*cmd), header);
1585 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1587 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1592 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1595 user_surface_converter,
1596 &decl->array.surfaceId, NULL);
1597 if (unlikely(ret != 0))
1601 maxnum = (header->size - sizeof(cmd->body) -
1602 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603 if (unlikely(cmd->body.numRanges > maxnum)) {
1604 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1608 range = (SVGA3dPrimitiveRange *) decl;
1609 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1612 user_surface_converter,
1613 &range->indexArray.surfaceId, NULL);
1614 if (unlikely(ret != 0))
1620 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621 struct vmw_sw_context *sw_context,
1622 SVGA3dCmdHeader *header)
1624 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1625 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626 ((unsigned long) header + header->size + sizeof(header));
1627 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628 ((unsigned long) header + sizeof(*cmd));
1629 struct vmw_resource *ctx;
1630 struct vmw_resource *res;
1633 cmd = container_of(header, typeof(*cmd), header);
1635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636 VMW_RES_DIRTY_SET, user_context_converter,
1637 &cmd->body.cid, &ctx);
1638 if (unlikely(ret != 0))
1641 for (; cur_state < last_state; ++cur_state) {
1642 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1645 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 (unsigned int) cur_state->stage);
1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1653 user_surface_converter,
1654 &cur_state->value, &res);
1655 if (unlikely(ret != 0))
1658 if (dev_priv->has_mob) {
1659 struct vmw_ctx_bindinfo_tex binding;
1660 struct vmw_ctx_validation_info *node;
1662 node = vmw_execbuf_info_from_res(sw_context, ctx);
1666 binding.bi.ctx = ctx;
1667 binding.bi.res = res;
1668 binding.bi.bt = vmw_ctx_binding_tex;
1669 binding.texture_stage = cur_state->stage;
1670 vmw_binding_add(node->staged, &binding.bi, 0,
1671 binding.texture_stage);
1678 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679 struct vmw_sw_context *sw_context,
1682 struct vmw_buffer_object *vmw_bo;
1686 SVGAFifoCmdDefineGMRFB body;
1689 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @val_node: The validation node representing the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1702 * @backup_offset: Offset of backup into MOB.
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1708 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709 struct vmw_sw_context *sw_context,
1710 struct vmw_resource *res, uint32_t *buf_id,
1711 unsigned long backup_offset)
1713 struct vmw_buffer_object *vbo;
1717 info = vmw_execbuf_info_from_res(sw_context, res);
1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1740 * @backup_offset: Offset of backup into MOB.
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1746 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747 struct vmw_sw_context *sw_context,
1748 enum vmw_res_type res_type,
1749 const struct vmw_user_resource_conv
1750 *converter, uint32_t *res_id, uint32_t *buf_id,
1751 unsigned long backup_offset)
1753 struct vmw_resource *res;
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1761 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1772 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773 struct vmw_sw_context *sw_context,
1774 SVGA3dCmdHeader *header)
1776 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777 container_of(header, typeof(*cmd), header);
1779 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780 user_surface_converter, &cmd->body.sid,
1781 &cmd->body.mobid, 0);
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1791 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792 struct vmw_sw_context *sw_context,
1793 SVGA3dCmdHeader *header)
1795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796 container_of(header, typeof(*cmd), header);
1798 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799 VMW_RES_DIRTY_NONE, user_surface_converter,
1800 &cmd->body.image.sid, NULL);
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1810 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811 struct vmw_sw_context *sw_context,
1812 SVGA3dCmdHeader *header)
1814 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815 container_of(header, typeof(*cmd), header);
1817 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819 &cmd->body.sid, NULL);
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1829 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1833 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834 container_of(header, typeof(*cmd), header);
1836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837 VMW_RES_DIRTY_NONE, user_surface_converter,
1838 &cmd->body.image.sid, NULL);
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1849 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850 struct vmw_sw_context *sw_context,
1851 SVGA3dCmdHeader *header)
1853 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854 container_of(header, typeof(*cmd), header);
1856 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858 &cmd->body.sid, NULL);
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1869 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870 struct vmw_sw_context *sw_context,
1871 SVGA3dCmdHeader *header)
1873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874 container_of(header, typeof(*cmd), header);
1876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877 VMW_RES_DIRTY_NONE, user_surface_converter,
1878 &cmd->body.image.sid, NULL);
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1889 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890 struct vmw_sw_context *sw_context,
1891 SVGA3dCmdHeader *header)
1893 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894 container_of(header, typeof(*cmd), header);
1896 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898 &cmd->body.sid, NULL);
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1908 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909 struct vmw_sw_context *sw_context,
1910 SVGA3dCmdHeader *header)
1912 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1915 struct vmw_resource *ctx;
1917 cmd = container_of(header, typeof(*cmd), header);
1919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920 VMW_RES_DIRTY_SET, user_context_converter,
1921 &cmd->body.cid, &ctx);
1922 if (unlikely(ret != 0))
1925 if (unlikely(!dev_priv->has_mob))
1928 size = cmd->header.size - sizeof(cmd->body);
1929 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930 cmd->body.shid, cmd + 1, cmd->body.type,
1931 size, &sw_context->staged_cmd_res);
1932 if (unlikely(ret != 0))
1935 return vmw_resource_relocation_add(sw_context, NULL,
1936 vmw_ptr_diff(sw_context->buf_start,
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1948 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949 struct vmw_sw_context *sw_context,
1950 SVGA3dCmdHeader *header)
1952 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1954 struct vmw_resource *ctx;
1956 cmd = container_of(header, typeof(*cmd), header);
1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959 VMW_RES_DIRTY_SET, user_context_converter,
1960 &cmd->body.cid, &ctx);
1961 if (unlikely(ret != 0))
1964 if (unlikely(!dev_priv->has_mob))
1967 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968 cmd->body.type, &sw_context->staged_cmd_res);
1969 if (unlikely(ret != 0))
1972 return vmw_resource_relocation_add(sw_context, NULL,
1973 vmw_ptr_diff(sw_context->buf_start,
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1985 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986 struct vmw_sw_context *sw_context,
1987 SVGA3dCmdHeader *header)
1989 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990 struct vmw_ctx_bindinfo_shader binding;
1991 struct vmw_resource *ctx, *res = NULL;
1992 struct vmw_ctx_validation_info *ctx_info;
1995 cmd = container_of(header, typeof(*cmd), header);
1997 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1998 VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 (unsigned int) cmd->body.type);
2003 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004 VMW_RES_DIRTY_SET, user_context_converter,
2005 &cmd->body.cid, &ctx);
2006 if (unlikely(ret != 0))
2009 if (!dev_priv->has_mob)
2012 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2014 * This is the compat shader path - Per device guest-backed
2015 * shaders, but user-space thinks it's per context host-
2018 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019 cmd->body.shid, cmd->body.type);
2021 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2022 VMW_RES_DIRTY_NONE);
2023 if (unlikely(ret != 0))
2026 ret = vmw_resource_relocation_add
2028 vmw_ptr_diff(sw_context->buf_start,
2030 vmw_res_rel_normal);
2031 if (unlikely(ret != 0))
2036 if (IS_ERR_OR_NULL(res)) {
2037 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2039 user_shader_converter, &cmd->body.shid,
2041 if (unlikely(ret != 0))
2045 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2049 binding.bi.ctx = ctx;
2050 binding.bi.res = res;
2051 binding.bi.bt = vmw_ctx_binding_shader;
2052 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2053 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2059 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2061 * @dev_priv: Pointer to a device private struct.
2062 * @sw_context: The software context being used for this batch.
2063 * @header: Pointer to the command header in the command stream.
2065 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2066 struct vmw_sw_context *sw_context,
2067 SVGA3dCmdHeader *header)
2069 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2072 cmd = container_of(header, typeof(*cmd), header);
2074 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2075 VMW_RES_DIRTY_SET, user_context_converter,
2076 &cmd->body.cid, NULL);
2077 if (unlikely(ret != 0))
2080 if (dev_priv->has_mob)
2081 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2087 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2089 * @dev_priv: Pointer to a device private struct.
2090 * @sw_context: The software context being used for this batch.
2091 * @header: Pointer to the command header in the command stream.
2093 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2094 struct vmw_sw_context *sw_context,
2095 SVGA3dCmdHeader *header)
2097 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2098 container_of(header, typeof(*cmd), header);
2100 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2101 user_shader_converter, &cmd->body.shid,
2102 &cmd->body.mobid, cmd->body.offsetInBytes);
2106 * vmw_cmd_dx_set_single_constant_buffer - Validate
2107 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2109 * @dev_priv: Pointer to a device private struct.
2110 * @sw_context: The software context being used for this batch.
2111 * @header: Pointer to the command header in the command stream.
2114 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2115 struct vmw_sw_context *sw_context,
2116 SVGA3dCmdHeader *header)
2118 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2119 struct vmw_resource *res = NULL;
2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121 struct vmw_ctx_bindinfo_cb binding;
2127 cmd = container_of(header, typeof(*cmd), header);
2128 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129 VMW_RES_DIRTY_NONE, user_surface_converter,
2130 &cmd->body.sid, &res);
2131 if (unlikely(ret != 0))
2134 binding.bi.ctx = ctx_node->ctx;
2135 binding.bi.res = res;
2136 binding.bi.bt = vmw_ctx_binding_cb;
2137 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138 binding.offset = cmd->body.offsetInBytes;
2139 binding.size = cmd->body.sizeInBytes;
2140 binding.slot = cmd->body.slot;
2142 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2143 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2144 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145 (unsigned int) cmd->body.type,
2146 (unsigned int) binding.slot);
2150 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2157 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2164 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2165 struct vmw_sw_context *sw_context,
2166 SVGA3dCmdHeader *header)
2168 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2169 container_of(header, typeof(*cmd), header);
2170 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2171 sizeof(SVGA3dShaderResourceViewId);
2173 if ((u64) cmd->body.startView + (u64) num_sr_view >
2174 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2175 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2176 VMW_DEBUG_USER("Invalid shader binding.\n");
2180 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2182 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2183 (void *) &cmd[1], num_sr_view,
2184 cmd->body.startView);
2188 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2190 * @dev_priv: Pointer to a device private struct.
2191 * @sw_context: The software context being used for this batch.
2192 * @header: Pointer to the command header in the command stream.
2194 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2195 struct vmw_sw_context *sw_context,
2196 SVGA3dCmdHeader *header)
2198 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2199 struct vmw_resource *res = NULL;
2200 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2201 struct vmw_ctx_bindinfo_shader binding;
2207 cmd = container_of(header, typeof(*cmd), header);
2209 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2210 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2211 VMW_DEBUG_USER("Illegal shader type %u.\n",
2212 (unsigned int) cmd->body.type);
2216 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2217 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2219 VMW_DEBUG_USER("Could not find shader for binding.\n");
2220 return PTR_ERR(res);
2223 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2224 VMW_RES_DIRTY_NONE);
2229 binding.bi.ctx = ctx_node->ctx;
2230 binding.bi.res = res;
2231 binding.bi.bt = vmw_ctx_binding_dx_shader;
2232 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2234 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2240 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2243 * @dev_priv: Pointer to a device private struct.
2244 * @sw_context: The software context being used for this batch.
2245 * @header: Pointer to the command header in the command stream.
2247 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2248 struct vmw_sw_context *sw_context,
2249 SVGA3dCmdHeader *header)
2251 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2252 struct vmw_ctx_bindinfo_vb binding;
2253 struct vmw_resource *res;
2255 SVGA3dCmdHeader header;
2256 SVGA3dCmdDXSetVertexBuffers body;
2257 SVGA3dVertexBuffer buf[];
2264 cmd = container_of(header, typeof(*cmd), header);
2265 num = (cmd->header.size - sizeof(cmd->body)) /
2266 sizeof(SVGA3dVertexBuffer);
2267 if ((u64)num + (u64)cmd->body.startBuffer >
2268 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2269 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2273 for (i = 0; i < num; i++) {
2274 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2276 user_surface_converter,
2277 &cmd->buf[i].sid, &res);
2278 if (unlikely(ret != 0))
2281 binding.bi.ctx = ctx_node->ctx;
2282 binding.bi.bt = vmw_ctx_binding_vb;
2283 binding.bi.res = res;
2284 binding.offset = cmd->buf[i].offset;
2285 binding.stride = cmd->buf[i].stride;
2286 binding.slot = i + cmd->body.startBuffer;
2288 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2295 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2296 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2298 * @dev_priv: Pointer to a device private struct.
2299 * @sw_context: The software context being used for this batch.
2300 * @header: Pointer to the command header in the command stream.
2302 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2303 struct vmw_sw_context *sw_context,
2304 SVGA3dCmdHeader *header)
2306 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2307 struct vmw_ctx_bindinfo_ib binding;
2308 struct vmw_resource *res;
2309 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2315 cmd = container_of(header, typeof(*cmd), header);
2316 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317 VMW_RES_DIRTY_NONE, user_surface_converter,
2318 &cmd->body.sid, &res);
2319 if (unlikely(ret != 0))
2322 binding.bi.ctx = ctx_node->ctx;
2323 binding.bi.res = res;
2324 binding.bi.bt = vmw_ctx_binding_ib;
2325 binding.offset = cmd->body.offset;
2326 binding.format = cmd->body.format;
2328 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2334 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2341 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2342 struct vmw_sw_context *sw_context,
2343 SVGA3dCmdHeader *header)
2345 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2346 container_of(header, typeof(*cmd), header);
2347 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2348 sizeof(SVGA3dRenderTargetViewId);
2351 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2352 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2356 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2357 0, &cmd->body.depthStencilViewId, 1, 0);
2361 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2362 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2367 * vmw_cmd_dx_clear_rendertarget_view - Validate
2368 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2370 * @dev_priv: Pointer to a device private struct.
2371 * @sw_context: The software context being used for this batch.
2372 * @header: Pointer to the command header in the command stream.
2374 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2375 struct vmw_sw_context *sw_context,
2376 SVGA3dCmdHeader *header)
2378 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2379 container_of(header, typeof(*cmd), header);
2381 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2382 cmd->body.renderTargetViewId));
2386 * vmw_cmd_dx_clear_rendertarget_view - Validate
2387 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2389 * @dev_priv: Pointer to a device private struct.
2390 * @sw_context: The software context being used for this batch.
2391 * @header: Pointer to the command header in the command stream.
2393 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2394 struct vmw_sw_context *sw_context,
2395 SVGA3dCmdHeader *header)
2397 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2398 container_of(header, typeof(*cmd), header);
2400 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2401 cmd->body.depthStencilViewId));
2404 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2405 struct vmw_sw_context *sw_context,
2406 SVGA3dCmdHeader *header)
2408 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2409 struct vmw_resource *srf;
2410 struct vmw_resource *res;
2411 enum vmw_view_type view_type;
2414 * This is based on the fact that all affected define commands have the
2415 * same initial command body layout.
2418 SVGA3dCmdHeader header;
2426 view_type = vmw_view_cmd_to_type(header->id);
2427 if (view_type == vmw_view_max)
2430 cmd = container_of(header, typeof(*cmd), header);
2431 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2432 VMW_DEBUG_USER("Invalid surface id.\n");
2435 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2436 VMW_RES_DIRTY_NONE, user_surface_converter,
2438 if (unlikely(ret != 0))
2441 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2442 ret = vmw_cotable_notify(res, cmd->defined_id);
2443 if (unlikely(ret != 0))
2446 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2447 cmd->defined_id, header,
2448 header->size + sizeof(*header),
2449 &sw_context->staged_cmd_res);
2453 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2455 * @dev_priv: Pointer to a device private struct.
2456 * @sw_context: The software context being used for this batch.
2457 * @header: Pointer to the command header in the command stream.
2459 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2460 struct vmw_sw_context *sw_context,
2461 SVGA3dCmdHeader *header)
2463 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2464 struct vmw_ctx_bindinfo_so binding;
2465 struct vmw_resource *res;
2467 SVGA3dCmdHeader header;
2468 SVGA3dCmdDXSetSOTargets body;
2469 SVGA3dSoTarget targets[];
2476 cmd = container_of(header, typeof(*cmd), header);
2477 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2479 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2480 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2484 for (i = 0; i < num; i++) {
2485 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2487 user_surface_converter,
2488 &cmd->targets[i].sid, &res);
2489 if (unlikely(ret != 0))
2492 binding.bi.ctx = ctx_node->ctx;
2493 binding.bi.res = res;
2494 binding.bi.bt = vmw_ctx_binding_so,
2495 binding.offset = cmd->targets[i].offset;
2496 binding.size = cmd->targets[i].sizeInBytes;
2499 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2505 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2506 struct vmw_sw_context *sw_context,
2507 SVGA3dCmdHeader *header)
2509 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2510 struct vmw_resource *res;
2512 * This is based on the fact that all affected define commands have
2513 * the same initial command body layout.
2516 SVGA3dCmdHeader header;
2519 enum vmw_so_type so_type;
2525 so_type = vmw_so_cmd_to_type(header->id);
2526 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2527 cmd = container_of(header, typeof(*cmd), header);
2528 ret = vmw_cotable_notify(res, cmd->defined_id);
2534 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2537 * @dev_priv: Pointer to a device private struct.
2538 * @sw_context: The software context being used for this batch.
2539 * @header: Pointer to the command header in the command stream.
2541 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2542 struct vmw_sw_context *sw_context,
2543 SVGA3dCmdHeader *header)
2546 SVGA3dCmdHeader header;
2548 SVGA3dCmdDXReadbackSubResource r_body;
2549 SVGA3dCmdDXInvalidateSubResource i_body;
2550 SVGA3dCmdDXUpdateSubResource u_body;
2551 SVGA3dSurfaceId sid;
2555 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2556 offsetof(typeof(*cmd), sid));
2557 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2558 offsetof(typeof(*cmd), sid));
2559 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2560 offsetof(typeof(*cmd), sid));
2562 cmd = container_of(header, typeof(*cmd), header);
2564 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2565 VMW_RES_DIRTY_NONE, user_surface_converter,
2569 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2570 struct vmw_sw_context *sw_context,
2571 SVGA3dCmdHeader *header)
2573 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2582 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2583 * resource for removal.
2585 * @dev_priv: Pointer to a device private struct.
2586 * @sw_context: The software context being used for this batch.
2587 * @header: Pointer to the command header in the command stream.
2589 * Check that the view exists, and if it was not created using this command
2590 * batch, conditionally make this command a NOP.
2592 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2593 struct vmw_sw_context *sw_context,
2594 SVGA3dCmdHeader *header)
2596 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2598 SVGA3dCmdHeader header;
2599 union vmw_view_destroy body;
2600 } *cmd = container_of(header, typeof(*cmd), header);
2601 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2602 struct vmw_resource *view;
2608 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2609 &sw_context->staged_cmd_res, &view);
2614 * If the view wasn't created during this command batch, it might
2615 * have been removed due to a context swapout, so add a
2616 * relocation to conditionally make this command a NOP to avoid
2619 return vmw_resource_relocation_add(sw_context, view,
2620 vmw_ptr_diff(sw_context->buf_start,
2622 vmw_res_rel_cond_nop);
2626 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2628 * @dev_priv: Pointer to a device private struct.
2629 * @sw_context: The software context being used for this batch.
2630 * @header: Pointer to the command header in the command stream.
2632 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2633 struct vmw_sw_context *sw_context,
2634 SVGA3dCmdHeader *header)
2636 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2637 struct vmw_resource *res;
2638 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2639 container_of(header, typeof(*cmd), header);
2645 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2646 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2650 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2651 cmd->body.shaderId, cmd->body.type,
2652 &sw_context->staged_cmd_res);
2656 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2658 * @dev_priv: Pointer to a device private struct.
2659 * @sw_context: The software context being used for this batch.
2660 * @header: Pointer to the command header in the command stream.
2662 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2663 struct vmw_sw_context *sw_context,
2664 SVGA3dCmdHeader *header)
2666 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2667 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2668 container_of(header, typeof(*cmd), header);
2674 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2675 &sw_context->staged_cmd_res);
2681 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2683 * @dev_priv: Pointer to a device private struct.
2684 * @sw_context: The software context being used for this batch.
2685 * @header: Pointer to the command header in the command stream.
2687 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2688 struct vmw_sw_context *sw_context,
2689 SVGA3dCmdHeader *header)
2691 struct vmw_resource *ctx;
2692 struct vmw_resource *res;
2693 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2694 container_of(header, typeof(*cmd), header);
2697 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2698 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2700 user_context_converter, &cmd->body.cid,
2705 struct vmw_ctx_validation_info *ctx_node =
2706 VMW_GET_CTX_NODE(sw_context);
2711 ctx = ctx_node->ctx;
2714 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2716 VMW_DEBUG_USER("Could not find shader to bind.\n");
2717 return PTR_ERR(res);
2720 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2721 VMW_RES_DIRTY_NONE);
2723 VMW_DEBUG_USER("Error creating resource validation node.\n");
2727 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2729 cmd->body.offsetInBytes);
2733 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2735 * @dev_priv: Pointer to a device private struct.
2736 * @sw_context: The software context being used for this batch.
2737 * @header: Pointer to the command header in the command stream.
2739 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2740 struct vmw_sw_context *sw_context,
2741 SVGA3dCmdHeader *header)
2743 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2744 container_of(header, typeof(*cmd), header);
2746 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2747 cmd->body.shaderResourceViewId));
2751 * vmw_cmd_dx_transfer_from_buffer - Validate
2752 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2754 * @dev_priv: Pointer to a device private struct.
2755 * @sw_context: The software context being used for this batch.
2756 * @header: Pointer to the command header in the command stream.
2758 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2759 struct vmw_sw_context *sw_context,
2760 SVGA3dCmdHeader *header)
2762 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2763 container_of(header, typeof(*cmd), header);
2766 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2767 VMW_RES_DIRTY_NONE, user_surface_converter,
2768 &cmd->body.srcSid, NULL);
2772 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2773 VMW_RES_DIRTY_SET, user_surface_converter,
2774 &cmd->body.destSid, NULL);
2778 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2780 * @dev_priv: Pointer to a device private struct.
2781 * @sw_context: The software context being used for this batch.
2782 * @header: Pointer to the command header in the command stream.
2784 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2785 struct vmw_sw_context *sw_context,
2786 SVGA3dCmdHeader *header)
2788 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2789 container_of(header, typeof(*cmd), header);
2791 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2794 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795 VMW_RES_DIRTY_SET, user_surface_converter,
2796 &cmd->body.surface.sid, NULL);
2799 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2800 struct vmw_sw_context *sw_context,
2801 void *buf, uint32_t *size)
2803 uint32_t size_remaining = *size;
2806 cmd_id = ((uint32_t *)buf)[0];
2808 case SVGA_CMD_UPDATE:
2809 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2811 case SVGA_CMD_DEFINE_GMRFB:
2812 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2814 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2815 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2817 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2818 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2821 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2825 if (*size > size_remaining) {
2826 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2831 if (unlikely(!sw_context->kernel)) {
2832 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2836 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2837 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2842 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2843 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2844 false, false, false),
2845 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2846 false, false, false),
2847 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2848 true, false, false),
2849 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2850 true, false, false),
2851 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2852 true, false, false),
2853 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2854 false, false, false),
2855 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2856 false, false, false),
2857 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2858 true, false, false),
2859 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2860 true, false, false),
2861 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2862 true, false, false),
2863 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2864 &vmw_cmd_set_render_target_check, true, false, false),
2865 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2866 true, false, false),
2867 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2868 true, false, false),
2869 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2870 true, false, false),
2871 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2872 true, false, false),
2873 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2874 true, false, false),
2875 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2876 true, false, false),
2877 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2878 true, false, false),
2879 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2880 false, false, false),
2881 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2882 true, false, false),
2883 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2884 true, false, false),
2885 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2886 true, false, false),
2887 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2888 true, false, false),
2889 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2890 true, false, false),
2891 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2892 true, false, false),
2893 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2894 true, false, false),
2895 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2896 true, false, false),
2897 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2898 true, false, false),
2899 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2900 true, false, false),
2901 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2902 &vmw_cmd_blt_surf_screen_check, false, false, false),
2903 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2904 false, false, false),
2905 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2906 false, false, false),
2907 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2908 false, false, false),
2909 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2910 false, false, false),
2911 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2912 false, false, false),
2913 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2914 false, false, false),
2915 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2916 false, false, false),
2917 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2918 false, false, false),
2919 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2920 false, false, false),
2921 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2922 false, false, false),
2923 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2924 false, false, false),
2925 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2926 false, false, false),
2927 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2928 false, false, false),
2929 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2930 false, false, true),
2931 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2932 false, false, true),
2933 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2934 false, false, true),
2935 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2936 false, false, true),
2937 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2938 false, false, true),
2939 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2940 false, false, true),
2941 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2942 false, false, true),
2943 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2944 false, false, true),
2945 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2947 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2948 false, false, true),
2949 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2951 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2952 &vmw_cmd_update_gb_surface, true, false, true),
2953 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2954 &vmw_cmd_readback_gb_image, true, false, true),
2955 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2956 &vmw_cmd_readback_gb_surface, true, false, true),
2957 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2958 &vmw_cmd_invalidate_gb_image, true, false, true),
2959 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2960 &vmw_cmd_invalidate_gb_surface, true, false, true),
2961 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2962 false, false, true),
2963 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2964 false, false, true),
2965 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2966 false, false, true),
2967 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2968 false, false, true),
2969 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2970 false, false, true),
2971 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2972 false, false, true),
2973 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2975 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2976 false, false, true),
2977 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2978 false, false, false),
2979 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2981 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2983 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2985 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2987 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2989 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2990 false, false, true),
2991 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2992 false, false, true),
2993 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2994 false, false, true),
2995 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2996 false, false, true),
2997 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2998 false, false, true),
2999 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3000 false, false, true),
3001 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3002 false, false, true),
3003 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3004 false, false, true),
3005 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3006 false, false, true),
3007 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3008 false, false, true),
3009 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3011 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3012 false, false, true),
3013 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3014 false, false, true),
3015 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3016 false, false, true),
3017 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3018 false, false, true),
3021 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3022 false, false, true),
3023 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3024 false, false, true),
3025 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3026 false, false, true),
3027 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3028 false, false, true),
3029 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3030 false, false, true),
3031 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3032 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3033 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3034 &vmw_cmd_dx_set_shader_res, true, false, true),
3035 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3037 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3039 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3041 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3043 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3045 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3046 &vmw_cmd_dx_cid_check, true, false, true),
3047 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3049 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3050 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3051 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3052 &vmw_cmd_dx_set_index_buffer, true, false, true),
3053 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3054 &vmw_cmd_dx_set_rendertargets, true, false, true),
3055 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3057 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3058 &vmw_cmd_dx_cid_check, true, false, true),
3059 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3060 &vmw_cmd_dx_cid_check, true, false, true),
3061 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3063 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3065 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3067 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3068 &vmw_cmd_dx_cid_check, true, false, true),
3069 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3071 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3073 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3075 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3077 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3079 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3081 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3082 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3083 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3084 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3085 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3087 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3089 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3090 &vmw_cmd_dx_check_subresource, true, false, true),
3091 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3092 &vmw_cmd_dx_check_subresource, true, false, true),
3093 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3094 &vmw_cmd_dx_check_subresource, true, false, true),
3095 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3096 &vmw_cmd_dx_view_define, true, false, true),
3097 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3098 &vmw_cmd_dx_view_remove, true, false, true),
3099 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3100 &vmw_cmd_dx_view_define, true, false, true),
3101 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3102 &vmw_cmd_dx_view_remove, true, false, true),
3103 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3104 &vmw_cmd_dx_view_define, true, false, true),
3105 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3106 &vmw_cmd_dx_view_remove, true, false, true),
3107 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3108 &vmw_cmd_dx_so_define, true, false, true),
3109 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3110 &vmw_cmd_dx_cid_check, true, false, true),
3111 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3112 &vmw_cmd_dx_so_define, true, false, true),
3113 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3114 &vmw_cmd_dx_cid_check, true, false, true),
3115 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3116 &vmw_cmd_dx_so_define, true, false, true),
3117 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3118 &vmw_cmd_dx_cid_check, true, false, true),
3119 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3120 &vmw_cmd_dx_so_define, true, false, true),
3121 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3122 &vmw_cmd_dx_cid_check, true, false, true),
3123 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3124 &vmw_cmd_dx_so_define, true, false, true),
3125 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3126 &vmw_cmd_dx_cid_check, true, false, true),
3127 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3128 &vmw_cmd_dx_define_shader, true, false, true),
3129 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3130 &vmw_cmd_dx_destroy_shader, true, false, true),
3131 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3132 &vmw_cmd_dx_bind_shader, true, false, true),
3133 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3134 &vmw_cmd_dx_so_define, true, false, true),
3135 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3136 &vmw_cmd_dx_cid_check, true, false, true),
3137 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3139 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3140 &vmw_cmd_dx_set_so_targets, true, false, true),
3141 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3142 &vmw_cmd_dx_cid_check, true, false, true),
3143 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3144 &vmw_cmd_dx_cid_check, true, false, true),
3145 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3146 &vmw_cmd_buffer_copy_check, true, false, true),
3147 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3148 &vmw_cmd_pred_copy_check, true, false, true),
3149 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3150 &vmw_cmd_dx_transfer_from_buffer,
3152 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3156 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3158 u32 cmd_id = ((u32 *) buf)[0];
3160 if (cmd_id >= SVGA_CMD_MAX) {
3161 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3162 const struct vmw_cmd_entry *entry;
3164 *size = header->size + sizeof(SVGA3dCmdHeader);
3165 cmd_id = header->id;
3166 if (cmd_id >= SVGA_3D_CMD_MAX)
3169 cmd_id -= SVGA_3D_CMD_BASE;
3170 entry = &vmw_cmd_entries[cmd_id];
3171 *cmd = entry->cmd_name;
3176 case SVGA_CMD_UPDATE:
3177 *cmd = "SVGA_CMD_UPDATE";
3178 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3180 case SVGA_CMD_DEFINE_GMRFB:
3181 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3182 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3184 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3185 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3186 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3188 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3189 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3190 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3201 static int vmw_cmd_check(struct vmw_private *dev_priv,
3202 struct vmw_sw_context *sw_context, void *buf,
3206 uint32_t size_remaining = *size;
3207 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3209 const struct vmw_cmd_entry *entry;
3210 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3212 cmd_id = ((uint32_t *)buf)[0];
3213 /* Handle any none 3D commands */
3214 if (unlikely(cmd_id < SVGA_CMD_MAX))
3215 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3218 cmd_id = header->id;
3219 *size = header->size + sizeof(SVGA3dCmdHeader);
3221 cmd_id -= SVGA_3D_CMD_BASE;
3222 if (unlikely(*size > size_remaining))
3225 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3228 entry = &vmw_cmd_entries[cmd_id];
3229 if (unlikely(!entry->func))
3232 if (unlikely(!entry->user_allow && !sw_context->kernel))
3233 goto out_privileged;
3235 if (unlikely(entry->gb_disable && gb))
3238 if (unlikely(entry->gb_enable && !gb))
3241 ret = entry->func(dev_priv, sw_context, header);
3242 if (unlikely(ret != 0)) {
3243 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3244 cmd_id + SVGA_3D_CMD_BASE, ret);
3250 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3251 cmd_id + SVGA_3D_CMD_BASE);
3254 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3255 cmd_id + SVGA_3D_CMD_BASE);
3258 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3259 cmd_id + SVGA_3D_CMD_BASE);
3262 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3263 cmd_id + SVGA_3D_CMD_BASE);
3267 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3268 struct vmw_sw_context *sw_context, void *buf,
3271 int32_t cur_size = size;
3274 sw_context->buf_start = buf;
3276 while (cur_size > 0) {
3278 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3279 if (unlikely(ret != 0))
3281 buf = (void *)((unsigned long) buf + size);
3285 if (unlikely(cur_size != 0)) {
3286 VMW_DEBUG_USER("Command verifier out of sync.\n");
3293 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3295 /* Memory is validation context memory, so no need to free it */
3296 INIT_LIST_HEAD(&sw_context->bo_relocations);
3299 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3301 struct vmw_relocation *reloc;
3302 struct ttm_buffer_object *bo;
3304 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3305 bo = &reloc->vbo->base;
3306 switch (bo->mem.mem_type) {
3308 reloc->location->offset += bo->offset;
3309 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3312 reloc->location->gmrId = bo->mem.start;
3315 *reloc->mob_loc = bo->mem.start;
3321 vmw_free_relocations(sw_context);
3324 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3327 if (likely(sw_context->cmd_bounce_size >= size))
3330 if (sw_context->cmd_bounce_size == 0)
3331 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3333 while (sw_context->cmd_bounce_size < size) {
3334 sw_context->cmd_bounce_size =
3335 PAGE_ALIGN(sw_context->cmd_bounce_size +
3336 (sw_context->cmd_bounce_size >> 1));
3339 vfree(sw_context->cmd_bounce);
3340 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3342 if (sw_context->cmd_bounce == NULL) {
3343 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3344 sw_context->cmd_bounce_size = 0;
3352 * vmw_execbuf_fence_commands - create and submit a command stream fence
3354 * Creates a fence object and submits a command stream marker.
3355 * If this fails for some reason, We sync the fifo and return NULL.
3356 * It is then safe to fence buffers with a NULL pointer.
3358 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3359 * userspace handle if @p_handle is not NULL, otherwise not.
3362 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3363 struct vmw_private *dev_priv,
3364 struct vmw_fence_obj **p_fence,
3369 bool synced = false;
3371 /* p_handle implies file_priv. */
3372 BUG_ON(p_handle != NULL && file_priv == NULL);
3374 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3375 if (unlikely(ret != 0)) {
3376 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3380 if (p_handle != NULL)
3381 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3382 sequence, p_fence, p_handle);
3384 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3386 if (unlikely(ret != 0 && !synced)) {
3387 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3388 false, VMW_FENCE_WAIT_TIMEOUT);
3396 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3398 * @dev_priv: Pointer to a vmw_private struct.
3399 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3400 * @ret: Return value from fence object creation.
3401 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3402 * the information should be copied.
3403 * @fence: Pointer to the fenc object.
3404 * @fence_handle: User-space fence handle.
3405 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3406 * @sync_file: Only used to clean up in case of an error in this function.
3408 * This function copies fence information to user-space. If copying fails, the
3409 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3410 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3411 * will hopefully be detected.
3413 * Also if copying fails, user-space will be unable to signal the fence object
3414 * so we wait for it immediately, and then unreference the user-space reference.
3417 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3418 struct vmw_fpriv *vmw_fp, int ret,
3419 struct drm_vmw_fence_rep __user *user_fence_rep,
3420 struct vmw_fence_obj *fence, uint32_t fence_handle,
3421 int32_t out_fence_fd, struct sync_file *sync_file)
3423 struct drm_vmw_fence_rep fence_rep;
3425 if (user_fence_rep == NULL)
3428 memset(&fence_rep, 0, sizeof(fence_rep));
3430 fence_rep.error = ret;
3431 fence_rep.fd = out_fence_fd;
3433 BUG_ON(fence == NULL);
3435 fence_rep.handle = fence_handle;
3436 fence_rep.seqno = fence->base.seqno;
3437 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3438 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3442 * copy_to_user errors will be detected by user space not seeing
3443 * fence_rep::error filled in. Typically user-space would have pre-set
3444 * that member to -EFAULT.
3446 ret = copy_to_user(user_fence_rep, &fence_rep,
3450 * User-space lost the fence object. We need to sync and unreference the
3453 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3455 fput(sync_file->file);
3457 if (fence_rep.fd != -1) {
3458 put_unused_fd(fence_rep.fd);
3462 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3464 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3465 (void) vmw_fence_obj_wait(fence, false, false,
3466 VMW_FENCE_WAIT_TIMEOUT);
3471 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3473 * @dev_priv: Pointer to a device private structure.
3474 * @kernel_commands: Pointer to the unpatched command batch.
3475 * @command_size: Size of the unpatched command batch.
3476 * @sw_context: Structure holding the relocation lists.
3478 * Side effects: If this function returns 0, then the command batch pointed to
3479 * by @kernel_commands will have been modified.
3481 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3482 void *kernel_commands, u32 command_size,
3483 struct vmw_sw_context *sw_context)
3487 if (sw_context->dx_ctx_node)
3488 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3489 sw_context->dx_ctx_node->ctx->id);
3491 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3496 vmw_apply_relocations(sw_context);
3497 memcpy(cmd, kernel_commands, command_size);
3498 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3499 vmw_resource_relocations_free(&sw_context->res_relocations);
3500 vmw_fifo_commit(dev_priv, command_size);
3506 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3507 * command buffer manager.
3509 * @dev_priv: Pointer to a device private structure.
3510 * @header: Opaque handle to the command buffer allocation.
3511 * @command_size: Size of the unpatched command batch.
3512 * @sw_context: Structure holding the relocation lists.
3514 * Side effects: If this function returns 0, then the command buffer represented
3515 * by @header will have been modified.
3517 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3518 struct vmw_cmdbuf_header *header,
3520 struct vmw_sw_context *sw_context)
3522 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3524 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3527 vmw_apply_relocations(sw_context);
3528 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3529 vmw_resource_relocations_free(&sw_context->res_relocations);
3530 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3536 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3537 * submission using a command buffer.
3539 * @dev_priv: Pointer to a device private structure.
3540 * @user_commands: User-space pointer to the commands to be submitted.
3541 * @command_size: Size of the unpatched command batch.
3542 * @header: Out parameter returning the opaque pointer to the command buffer.
3544 * This function checks whether we can use the command buffer manager for
3545 * submission and if so, creates a command buffer of suitable size and copies
3546 * the user data into that buffer.
3548 * On successful return, the function returns a pointer to the data in the
3549 * command buffer and *@header is set to non-NULL.
3551 * If command buffers could not be used, the function will return the value of
3552 * @kernel_commands on function call. That value may be NULL. In that case, the
3553 * value of *@header will be set to NULL.
3555 * If an error is encountered, the function will return a pointer error value.
3556 * If the function is interrupted by a signal while sleeping, it will return
3557 * -ERESTARTSYS casted to a pointer error value.
3559 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3560 void __user *user_commands,
3561 void *kernel_commands, u32 command_size,
3562 struct vmw_cmdbuf_header **header)
3568 if (command_size > SVGA_CB_MAX_SIZE) {
3569 VMW_DEBUG_USER("Command buffer is too large.\n");
3570 return ERR_PTR(-EINVAL);
3573 if (!dev_priv->cman || kernel_commands)
3574 return kernel_commands;
3576 /* If possible, add a little space for fencing. */
3577 cmdbuf_size = command_size + 512;
3578 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3579 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3581 if (IS_ERR(kernel_commands))
3582 return kernel_commands;
3584 ret = copy_from_user(kernel_commands, user_commands, command_size);
3586 VMW_DEBUG_USER("Failed copying commands.\n");
3587 vmw_cmdbuf_header_free(*header);
3589 return ERR_PTR(-EFAULT);
3592 return kernel_commands;
3595 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3596 struct vmw_sw_context *sw_context,
3599 struct vmw_resource *res;
3603 if (handle == SVGA3D_INVALID_ID)
3606 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3607 ret = vmw_validation_preload_res(sw_context->ctx, size);
3611 res = vmw_user_resource_noref_lookup_handle
3612 (dev_priv, sw_context->fp->tfile, handle,
3613 user_context_converter);
3615 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3616 (unsigned int) handle);
3617 return PTR_ERR(res);
3620 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3621 if (unlikely(ret != 0))
3624 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3625 sw_context->man = vmw_context_res_man(res);
3630 int vmw_execbuf_process(struct drm_file *file_priv,
3631 struct vmw_private *dev_priv,
3632 void __user *user_commands, void *kernel_commands,
3633 uint32_t command_size, uint64_t throttle_us,
3634 uint32_t dx_context_handle,
3635 struct drm_vmw_fence_rep __user *user_fence_rep,
3636 struct vmw_fence_obj **out_fence, uint32_t flags)
3638 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3639 struct vmw_fence_obj *fence = NULL;
3640 struct vmw_cmdbuf_header *header;
3641 uint32_t handle = 0;
3643 int32_t out_fence_fd = -1;
3644 struct sync_file *sync_file = NULL;
3645 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3647 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3649 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3650 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3651 if (out_fence_fd < 0) {
3652 VMW_DEBUG_USER("Failed to get a fence fd.\n");
3653 return out_fence_fd;
3658 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3662 goto out_free_fence_fd;
3665 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3666 kernel_commands, command_size,
3668 if (IS_ERR(kernel_commands)) {
3669 ret = PTR_ERR(kernel_commands);
3670 goto out_free_fence_fd;
3673 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3676 goto out_free_header;
3679 sw_context->kernel = false;
3680 if (kernel_commands == NULL) {
3681 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3682 if (unlikely(ret != 0))
3685 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3687 if (unlikely(ret != 0)) {
3689 VMW_DEBUG_USER("Failed copying commands.\n");
3693 kernel_commands = sw_context->cmd_bounce;
3694 } else if (!header) {
3695 sw_context->kernel = true;
3698 sw_context->fp = vmw_fpriv(file_priv);
3699 INIT_LIST_HEAD(&sw_context->ctx_list);
3700 sw_context->cur_query_bo = dev_priv->pinned_bo;
3701 sw_context->last_query_ctx = NULL;
3702 sw_context->needs_post_query_barrier = false;
3703 sw_context->dx_ctx_node = NULL;
3704 sw_context->dx_query_mob = NULL;
3705 sw_context->dx_query_ctx = NULL;
3706 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3707 INIT_LIST_HEAD(&sw_context->res_relocations);
3708 INIT_LIST_HEAD(&sw_context->bo_relocations);
3710 if (sw_context->staged_bindings)
3711 vmw_binding_state_reset(sw_context->staged_bindings);
3713 if (!sw_context->res_ht_initialized) {
3714 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3715 if (unlikely(ret != 0))
3718 sw_context->res_ht_initialized = true;
3721 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3722 sw_context->ctx = &val_ctx;
3723 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3724 if (unlikely(ret != 0))
3727 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3729 if (unlikely(ret != 0))
3732 ret = vmw_resources_reserve(sw_context);
3733 if (unlikely(ret != 0))
3736 ret = vmw_validation_bo_reserve(&val_ctx, true);
3737 if (unlikely(ret != 0))
3740 ret = vmw_validation_bo_validate(&val_ctx, true);
3741 if (unlikely(ret != 0))
3744 ret = vmw_validation_res_validate(&val_ctx, true);
3745 if (unlikely(ret != 0))
3748 vmw_validation_drop_ht(&val_ctx);
3750 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3751 if (unlikely(ret != 0)) {
3756 if (dev_priv->has_mob) {
3757 ret = vmw_rebind_contexts(sw_context);
3758 if (unlikely(ret != 0))
3759 goto out_unlock_binding;
3763 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3764 command_size, sw_context);
3766 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3770 mutex_unlock(&dev_priv->binding_mutex);
3774 vmw_query_bo_switch_commit(dev_priv, sw_context);
3775 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
3776 (user_fence_rep) ? &handle : NULL);
3778 * This error is harmless, because if fence submission fails,
3779 * vmw_fifo_send_fence will sync. The error will be propagated to
3780 * user-space in @fence_rep
3783 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3785 vmw_execbuf_bindings_commit(sw_context, false);
3786 vmw_bind_dx_query_mob(sw_context);
3787 vmw_validation_res_unreserve(&val_ctx, false);
3789 vmw_validation_bo_fence(sw_context->ctx, fence);
3791 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3792 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3795 * If anything fails here, give up trying to export the fence and do a
3796 * sync since the user mode will not be able to sync the fence itself.
3797 * This ensures we are still functionally correct.
3799 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3801 sync_file = sync_file_create(&fence->base);
3803 VMW_DEBUG_USER("Sync file create failed for fence\n");
3804 put_unused_fd(out_fence_fd);
3807 (void) vmw_fence_obj_wait(fence, false, false,
3808 VMW_FENCE_WAIT_TIMEOUT);
3810 /* Link the fence with the FD created earlier */
3811 fd_install(out_fence_fd, sync_file->file);
3815 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3816 user_fence_rep, fence, handle, out_fence_fd,
3819 /* Don't unreference when handing fence out */
3820 if (unlikely(out_fence != NULL)) {
3823 } else if (likely(fence != NULL)) {
3824 vmw_fence_obj_unreference(&fence);
3827 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
3828 mutex_unlock(&dev_priv->cmdbuf_mutex);
3831 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3832 * in resource destruction paths.
3834 vmw_validation_unref_lists(&val_ctx);
3839 mutex_unlock(&dev_priv->binding_mutex);
3841 vmw_validation_bo_backoff(&val_ctx);
3843 vmw_execbuf_bindings_commit(sw_context, true);
3844 vmw_validation_res_unreserve(&val_ctx, true);
3845 vmw_resource_relocations_free(&sw_context->res_relocations);
3846 vmw_free_relocations(sw_context);
3847 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3848 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3850 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3851 vmw_validation_drop_ht(&val_ctx);
3852 WARN_ON(!list_empty(&sw_context->ctx_list));
3853 mutex_unlock(&dev_priv->cmdbuf_mutex);
3856 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3857 * in resource destruction paths.
3859 vmw_validation_unref_lists(&val_ctx);
3862 vmw_cmdbuf_header_free(header);
3864 if (out_fence_fd >= 0)
3865 put_unused_fd(out_fence_fd);
3871 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3873 * @dev_priv: The device private structure.
3875 * This function is called to idle the fifo and unpin the query buffer if the
3876 * normal way to do this hits an error, which should typically be extremely
3879 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3881 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3883 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3884 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3885 if (dev_priv->dummy_query_bo_pinned) {
3886 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3887 dev_priv->dummy_query_bo_pinned = false;
3893 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3896 * @dev_priv: The device private structure.
3897 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3898 * query barrier that flushes all queries touching the current buffer pointed to
3899 * by @dev_priv->pinned_bo
3901 * This function should be used to unpin the pinned query bo, or as a query
3902 * barrier when we need to make sure that all queries have finished before the
3903 * next fifo command. (For example on hardware context destructions where the
3904 * hardware may otherwise leak unfinished queries).
3906 * This function does not return any failure codes, but make attempts to do safe
3907 * unpinning in case of errors.
3909 * The function will synchronize on the previous query barrier, and will thus
3910 * not finish until that barrier has executed.
3912 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3913 * calling this function.
3915 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3916 struct vmw_fence_obj *fence)
3919 struct vmw_fence_obj *lfence = NULL;
3920 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3922 if (dev_priv->pinned_bo == NULL)
3925 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3928 goto out_no_reserve;
3930 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3933 goto out_no_reserve;
3935 ret = vmw_validation_bo_reserve(&val_ctx, false);
3937 goto out_no_reserve;
3939 if (dev_priv->query_cid_valid) {
3940 BUG_ON(fence != NULL);
3941 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3944 dev_priv->query_cid_valid = false;
3947 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3948 if (dev_priv->dummy_query_bo_pinned) {
3949 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3950 dev_priv->dummy_query_bo_pinned = false;
3952 if (fence == NULL) {
3953 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3957 vmw_validation_bo_fence(&val_ctx, fence);
3959 vmw_fence_obj_unreference(&lfence);
3961 vmw_validation_unref_lists(&val_ctx);
3962 vmw_bo_unreference(&dev_priv->pinned_bo);
3967 vmw_validation_bo_backoff(&val_ctx);
3969 vmw_validation_unref_lists(&val_ctx);
3970 vmw_execbuf_unpin_panic(dev_priv);
3971 vmw_bo_unreference(&dev_priv->pinned_bo);
3975 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
3977 * @dev_priv: The device private structure.
3979 * This function should be used to unpin the pinned query bo, or as a query
3980 * barrier when we need to make sure that all queries have finished before the
3981 * next fifo command. (For example on hardware context destructions where the
3982 * hardware may otherwise leak unfinished queries).
3984 * This function does not return any failure codes, but make attempts to do safe
3985 * unpinning in case of errors.
3987 * The function will synchronize on the previous query barrier, and will thus
3988 * not finish until that barrier has executed.
3990 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3992 mutex_lock(&dev_priv->cmdbuf_mutex);
3993 if (dev_priv->query_cid_valid)
3994 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3995 mutex_unlock(&dev_priv->cmdbuf_mutex);
3998 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
3999 struct drm_file *file_priv, size_t size)
4001 struct vmw_private *dev_priv = vmw_priv(dev);
4002 struct drm_vmw_execbuf_arg arg;
4004 static const size_t copy_offset[] = {
4005 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4006 sizeof(struct drm_vmw_execbuf_arg)};
4007 struct dma_fence *in_fence = NULL;
4009 if (unlikely(size < copy_offset[0])) {
4010 VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
4015 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4019 * Extend the ioctl argument while maintaining backwards compatibility:
4020 * We take different code paths depending on the value of arg.version.
4022 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4023 arg.version == 0)) {
4024 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4028 if (arg.version > 1 &&
4029 copy_from_user(&arg.context_handle,
4030 (void __user *) (data + copy_offset[0]),
4031 copy_offset[arg.version - 1] - copy_offset[0]) != 0)
4034 switch (arg.version) {
4036 arg.context_handle = (uint32_t) -1;
4043 /* If imported a fence FD from elsewhere, then wait on it */
4044 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4045 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4048 VMW_DEBUG_USER("Cannot get imported fence\n");
4052 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4057 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4058 if (unlikely(ret != 0))
4061 ret = vmw_execbuf_process(file_priv, dev_priv,
4062 (void __user *)(unsigned long)arg.commands,
4063 NULL, arg.command_size, arg.throttle_us,
4065 (void __user *)(unsigned long)arg.fence_rep,
4068 ttm_read_unlock(&dev_priv->reservation_sem);
4069 if (unlikely(ret != 0))
4072 vmw_kms_cursor_post_execbuf(dev_priv);
4076 dma_fence_put(in_fence);