1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
5 #include <drm/drm_syncobj.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/reset.h>
10 #include <linux/device.h>
12 #include <linux/sched/signal.h>
14 #include "uapi/drm/v3d_drm.h"
17 #include "v3d_trace.h"
20 v3d_init_core(struct v3d_dev *v3d, int core)
22 /* Set OVRTMUOUT, which means that the texture sampler uniform
23 * configuration's tmu output type field is used, instead of
24 * using the hardware default behavior based on the texture
25 * type. If you want the default behavior, you can still put
26 * "2" in the indirect texture state's output_type field.
29 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
31 /* Whenever we flush the L2T cache, we always want to flush
34 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
35 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
38 /* Sets invariant state for the HW. */
40 v3d_init_hw_state(struct v3d_dev *v3d)
42 v3d_init_core(v3d, 0);
46 v3d_idle_axi(struct v3d_dev *v3d, int core)
48 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
50 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
51 (V3D_GMP_STATUS_RD_COUNT_MASK |
52 V3D_GMP_STATUS_WR_COUNT_MASK |
53 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
54 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
59 v3d_idle_gca(struct v3d_dev *v3d)
64 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
66 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
67 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
68 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
69 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
74 v3d_reset_by_bridge(struct v3d_dev *v3d)
76 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
78 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
79 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
80 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
81 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
83 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
84 * of the unit, so reset it to its power-on value here.
86 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
88 WARN_ON_ONCE(V3D_GET_FIELD(version,
89 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
90 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
91 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
97 v3d_reset_v3d(struct v3d_dev *v3d)
100 reset_control_reset(v3d->reset);
102 v3d_reset_by_bridge(v3d);
104 v3d_init_hw_state(v3d);
108 v3d_reset(struct v3d_dev *v3d)
110 struct drm_device *dev = &v3d->drm;
112 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
113 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
114 V3D_CORE_READ(0, V3D_ERR_STAT));
115 trace_v3d_reset_begin(dev);
117 /* XXX: only needed for safe powerdown, not reset. */
119 v3d_idle_axi(v3d, 0);
124 v3d_mmu_set_page_table(v3d);
127 trace_v3d_reset_end(dev);
131 v3d_flush_l3(struct v3d_dev *v3d)
134 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
136 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
137 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
140 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
141 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
146 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
147 * uniforms and instructions on V3D 3.2.
150 v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
155 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
160 /* Invalidates texture L2 cachelines */
162 v3d_flush_l2t(struct v3d_dev *v3d, int core)
164 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
165 * need to wait for completion before dispatching the job --
166 * L2T accesses will be stalled until the flush has completed.
167 * However, we do need to make sure we don't try to trigger a
168 * new flush while the L2_CLEAN queue is trying to
169 * synchronously clean after a job.
171 mutex_lock(&v3d->cache_clean_lock);
172 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
173 V3D_L2TCACTL_L2TFLS |
174 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
175 mutex_unlock(&v3d->cache_clean_lock);
178 /* Cleans texture L1 and L2 cachelines (writing back dirty data).
180 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
181 * executed, we need to make sure that the clean is done before
182 * signaling job completion. So, we synchronously wait before
183 * returning, and we make sure that L2 invalidates don't happen in the
184 * meantime to confuse our are-we-done checks.
187 v3d_clean_caches(struct v3d_dev *v3d)
189 struct drm_device *dev = &v3d->drm;
192 trace_v3d_cache_clean_begin(dev);
194 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
195 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
196 V3D_L2TCACTL_L2TFLS), 100)) {
197 DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
200 mutex_lock(&v3d->cache_clean_lock);
201 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
202 V3D_L2TCACTL_L2TFLS |
203 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
205 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
206 V3D_L2TCACTL_L2TFLS), 100)) {
207 DRM_ERROR("Timeout waiting for L2T clean\n");
210 mutex_unlock(&v3d->cache_clean_lock);
212 trace_v3d_cache_clean_end(dev);
215 /* Invalidates the slice caches. These are read-only caches. */
217 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
219 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
220 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
221 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
222 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
223 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
227 v3d_invalidate_caches(struct v3d_dev *v3d)
229 /* Invalidate the caches from the outside in. That way if
230 * another CL's concurrent use of nearby memory were to pull
231 * an invalidated cacheline back in, we wouldn't leave stale
232 * data in the inner cache.
235 v3d_invalidate_l2c(v3d, 0);
236 v3d_flush_l2t(v3d, 0);
237 v3d_invalidate_slices(v3d, 0);
240 /* Takes the reservation lock on all the BOs being referenced, so that
241 * at queue submit time we can update the reservations.
243 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
244 * (all of which are on exec->unref_list). They're entirely private
245 * to v3d, so we don't attach dma-buf fences to them.
248 v3d_lock_bo_reservations(struct v3d_job *job,
249 struct ww_acquire_ctx *acquire_ctx)
253 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
257 for (i = 0; i < job->bo_count; i++) {
258 ret = drm_gem_fence_array_add_implicit(&job->deps,
261 drm_gem_unlock_reservations(job->bo, job->bo_count,
271 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
272 * referenced by the job.
274 * @file_priv: DRM file for this fd
275 * @job: V3D job being set up
277 * The command validator needs to reference BOs by their index within
278 * the submitted job's BO list. This does the validation of the job's
279 * BO list and reference counting for the lifetime of the job.
281 * Note that this function doesn't need to unreference the BOs on
282 * failure, because that will happen at v3d_exec_cleanup() time.
285 v3d_lookup_bos(struct drm_device *dev,
286 struct drm_file *file_priv,
295 job->bo_count = bo_count;
297 if (!job->bo_count) {
298 /* See comment on bo_index for why we have to check
301 DRM_DEBUG("Rendering requires BOs\n");
305 job->bo = kvmalloc_array(job->bo_count,
306 sizeof(struct drm_gem_cma_object *),
307 GFP_KERNEL | __GFP_ZERO);
309 DRM_DEBUG("Failed to allocate validated BO pointers\n");
313 handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
316 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
320 if (copy_from_user(handles,
321 (void __user *)(uintptr_t)bo_handles,
322 job->bo_count * sizeof(u32))) {
324 DRM_DEBUG("Failed to copy in GEM handles\n");
328 spin_lock(&file_priv->table_lock);
329 for (i = 0; i < job->bo_count; i++) {
330 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
333 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
336 spin_unlock(&file_priv->table_lock);
339 drm_gem_object_get(bo);
342 spin_unlock(&file_priv->table_lock);
350 v3d_job_free(struct kref *ref)
352 struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
354 struct dma_fence *fence;
357 for (i = 0; i < job->bo_count; i++) {
359 drm_gem_object_put_unlocked(job->bo[i]);
363 xa_for_each(&job->deps, index, fence) {
364 dma_fence_put(fence);
366 xa_destroy(&job->deps);
368 dma_fence_put(job->irq_fence);
369 dma_fence_put(job->done_fence);
371 pm_runtime_mark_last_busy(job->v3d->dev);
372 pm_runtime_put_autosuspend(job->v3d->dev);
378 v3d_render_job_free(struct kref *ref)
380 struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
382 struct v3d_bo *bo, *save;
384 list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
385 drm_gem_object_put_unlocked(&bo->base.base);
391 void v3d_job_put(struct v3d_job *job)
393 kref_put(&job->refcount, job->free);
397 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file_priv)
401 struct drm_v3d_wait_bo *args = data;
402 ktime_t start = ktime_get();
404 unsigned long timeout_jiffies =
405 nsecs_to_jiffies_timeout(args->timeout_ns);
410 ret = drm_gem_reservation_object_wait(file_priv, args->handle,
411 true, timeout_jiffies);
413 /* Decrement the user's timeout, in case we got interrupted
414 * such that the ioctl will be restarted.
416 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
417 if (delta_ns < args->timeout_ns)
418 args->timeout_ns -= delta_ns;
420 args->timeout_ns = 0;
422 /* Asked to wait beyond the jiffie/scheduler precision? */
423 if (ret == -ETIME && args->timeout_ns)
430 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
431 struct v3d_job *job, void (*free)(struct kref *ref),
434 struct dma_fence *in_fence = NULL;
440 ret = pm_runtime_get_sync(v3d->dev);
444 xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
446 ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
450 ret = drm_gem_fence_array_add(&job->deps, in_fence);
454 kref_init(&job->refcount);
458 xa_destroy(&job->deps);
459 pm_runtime_put_autosuspend(v3d->dev);
464 v3d_push_job(struct v3d_file_priv *v3d_priv,
465 struct v3d_job *job, enum v3d_queue queue)
469 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
474 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
476 /* put by scheduler job completion */
477 kref_get(&job->refcount);
479 drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
485 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
487 struct ww_acquire_ctx *acquire_ctx,
489 struct dma_fence *done_fence)
491 struct drm_syncobj *sync_out;
494 for (i = 0; i < job->bo_count; i++) {
495 /* XXX: Use shared fences for read-only objects. */
496 reservation_object_add_excl_fence(job->bo[i]->resv,
500 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
502 /* Update the return sync object for the job */
503 sync_out = drm_syncobj_find(file_priv, out_sync);
505 drm_syncobj_replace_fence(sync_out, done_fence);
506 drm_syncobj_put(sync_out);
511 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
513 * @data: ioctl argument
514 * @file_priv: DRM file for this fd
516 * This is the main entrypoint for userspace to submit a 3D frame to
517 * the GPU. Userspace provides the binner command list (if
518 * applicable), and the kernel sets up the render command list to draw
519 * to the framebuffer described in the ioctl, using the command lists
520 * that the 3D engine's binner will produce.
523 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
524 struct drm_file *file_priv)
526 struct v3d_dev *v3d = to_v3d_dev(dev);
527 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
528 struct drm_v3d_submit_cl *args = data;
529 struct v3d_bin_job *bin = NULL;
530 struct v3d_render_job *render;
531 struct ww_acquire_ctx acquire_ctx;
534 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
536 if (args->pad != 0) {
537 DRM_INFO("pad must be zero: %d\n", args->pad);
541 render = kcalloc(1, sizeof(*render), GFP_KERNEL);
545 render->start = args->rcl_start;
546 render->end = args->rcl_end;
547 INIT_LIST_HEAD(&render->unref_list);
549 ret = v3d_job_init(v3d, file_priv, &render->base,
550 v3d_render_job_free, args->in_sync_rcl);
556 if (args->bcl_start != args->bcl_end) {
557 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
559 v3d_job_put(&render->base);
563 ret = v3d_job_init(v3d, file_priv, &bin->base,
564 v3d_job_free, args->in_sync_bcl);
566 v3d_job_put(&render->base);
571 bin->start = args->bcl_start;
572 bin->end = args->bcl_end;
573 bin->qma = args->qma;
574 bin->qms = args->qms;
575 bin->qts = args->qts;
576 bin->render = render;
579 ret = v3d_lookup_bos(dev, file_priv, &render->base,
580 args->bo_handles, args->bo_handle_count);
584 ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
588 mutex_lock(&v3d->sched_lock);
590 ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
594 ret = drm_gem_fence_array_add(&render->base.deps,
595 dma_fence_get(bin->base.done_fence));
600 ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
603 mutex_unlock(&v3d->sched_lock);
605 v3d_attach_fences_and_unlock_reservation(file_priv,
609 render->base.done_fence);
612 v3d_job_put(&bin->base);
613 v3d_job_put(&render->base);
618 mutex_unlock(&v3d->sched_lock);
619 drm_gem_unlock_reservations(render->base.bo,
620 render->base.bo_count, &acquire_ctx);
623 v3d_job_put(&bin->base);
624 v3d_job_put(&render->base);
630 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
632 * @data: ioctl argument
633 * @file_priv: DRM file for this fd
635 * Userspace provides the register setup for the TFU, which we don't
636 * need to validate since the TFU is behind the MMU.
639 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv)
642 struct v3d_dev *v3d = to_v3d_dev(dev);
643 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
644 struct drm_v3d_submit_tfu *args = data;
645 struct v3d_tfu_job *job;
646 struct ww_acquire_ctx acquire_ctx;
649 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
651 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
655 ret = v3d_job_init(v3d, file_priv, &job->base,
656 v3d_job_free, args->in_sync);
662 job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
663 sizeof(*job->base.bo), GFP_KERNEL);
665 v3d_job_put(&job->base);
671 spin_lock(&file_priv->table_lock);
672 for (job->base.bo_count = 0;
673 job->base.bo_count < ARRAY_SIZE(args->bo_handles);
674 job->base.bo_count++) {
675 struct drm_gem_object *bo;
677 if (!args->bo_handles[job->base.bo_count])
680 bo = idr_find(&file_priv->object_idr,
681 args->bo_handles[job->base.bo_count]);
683 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
685 args->bo_handles[job->base.bo_count]);
687 spin_unlock(&file_priv->table_lock);
690 drm_gem_object_get(bo);
691 job->base.bo[job->base.bo_count] = bo;
693 spin_unlock(&file_priv->table_lock);
695 ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
699 mutex_lock(&v3d->sched_lock);
700 ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
703 mutex_unlock(&v3d->sched_lock);
705 v3d_attach_fences_and_unlock_reservation(file_priv,
706 &job->base, &acquire_ctx,
708 job->base.done_fence);
710 v3d_job_put(&job->base);
715 mutex_unlock(&v3d->sched_lock);
716 drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
719 v3d_job_put(&job->base);
725 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
727 * @data: ioctl argument
728 * @file_priv: DRM file for this fd
730 * Userspace provides the register setup for the CSD, which we don't
731 * need to validate since the CSD is behind the MMU.
734 v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
735 struct drm_file *file_priv)
737 struct v3d_dev *v3d = to_v3d_dev(dev);
738 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
739 struct drm_v3d_submit_csd *args = data;
740 struct v3d_csd_job *job;
741 struct v3d_job *clean_job;
742 struct ww_acquire_ctx acquire_ctx;
745 trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
747 if (!v3d_has_csd(v3d)) {
748 DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
752 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
756 ret = v3d_job_init(v3d, file_priv, &job->base,
757 v3d_job_free, args->in_sync);
763 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
765 v3d_job_put(&job->base);
770 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
772 v3d_job_put(&job->base);
779 ret = v3d_lookup_bos(dev, file_priv, clean_job,
780 args->bo_handles, args->bo_handle_count);
784 ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
788 mutex_lock(&v3d->sched_lock);
789 ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
793 ret = drm_gem_fence_array_add(&clean_job->deps,
794 dma_fence_get(job->base.done_fence));
798 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
801 mutex_unlock(&v3d->sched_lock);
803 v3d_attach_fences_and_unlock_reservation(file_priv,
807 clean_job->done_fence);
809 v3d_job_put(&job->base);
810 v3d_job_put(clean_job);
815 mutex_unlock(&v3d->sched_lock);
816 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
819 v3d_job_put(&job->base);
820 v3d_job_put(clean_job);
826 v3d_gem_init(struct drm_device *dev)
828 struct v3d_dev *v3d = to_v3d_dev(dev);
829 u32 pt_size = 4096 * 1024;
832 for (i = 0; i < V3D_MAX_QUEUES; i++)
833 v3d->queue[i].fence_context = dma_fence_context_alloc(1);
835 spin_lock_init(&v3d->mm_lock);
836 spin_lock_init(&v3d->job_lock);
837 mutex_init(&v3d->bo_lock);
838 mutex_init(&v3d->reset_lock);
839 mutex_init(&v3d->sched_lock);
840 mutex_init(&v3d->cache_clean_lock);
842 /* Note: We don't allocate address 0. Various bits of HW
843 * treat 0 as special, such as the occlusion query counters
844 * where 0 means "disabled".
846 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
848 v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
850 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
852 drm_mm_takedown(&v3d->mm);
854 "Failed to allocate page tables. "
855 "Please ensure you have CMA enabled.\n");
859 v3d_init_hw_state(v3d);
860 v3d_mmu_set_page_table(v3d);
862 ret = v3d_sched_init(v3d);
864 drm_mm_takedown(&v3d->mm);
865 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
873 v3d_gem_destroy(struct drm_device *dev)
875 struct v3d_dev *v3d = to_v3d_dev(dev);
879 /* Waiting for jobs to finish would need to be done before
882 WARN_ON(v3d->bin_job);
883 WARN_ON(v3d->render_job);
885 drm_mm_takedown(&v3d->mm);
887 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);