Linux-libre 4.9.123-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/radeon_drm.h>
36 #include <drm/drm_cache.h>
37 #include "radeon.h"
38 #include "radeon_trace.h"
39
40
41 int radeon_ttm_init(struct radeon_device *rdev);
42 void radeon_ttm_fini(struct radeon_device *rdev);
43 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
44
45 /*
46  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
47  * function are calling it.
48  */
49
50 static void radeon_update_memory_usage(struct radeon_bo *bo,
51                                        unsigned mem_type, int sign)
52 {
53         struct radeon_device *rdev = bo->rdev;
54         u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
55
56         switch (mem_type) {
57         case TTM_PL_TT:
58                 if (sign > 0)
59                         atomic64_add(size, &rdev->gtt_usage);
60                 else
61                         atomic64_sub(size, &rdev->gtt_usage);
62                 break;
63         case TTM_PL_VRAM:
64                 if (sign > 0)
65                         atomic64_add(size, &rdev->vram_usage);
66                 else
67                         atomic64_sub(size, &rdev->vram_usage);
68                 break;
69         }
70 }
71
72 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
73 {
74         struct radeon_bo *bo;
75
76         bo = container_of(tbo, struct radeon_bo, tbo);
77
78         radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
79
80         mutex_lock(&bo->rdev->gem.mutex);
81         list_del_init(&bo->list);
82         mutex_unlock(&bo->rdev->gem.mutex);
83         radeon_bo_clear_surface_reg(bo);
84         WARN_ON(!list_empty(&bo->va));
85         drm_gem_object_release(&bo->gem_base);
86         kfree(bo);
87 }
88
89 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
90 {
91         if (bo->destroy == &radeon_ttm_bo_destroy)
92                 return true;
93         return false;
94 }
95
96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
97 {
98         u32 c = 0, i;
99
100         rbo->placement.placement = rbo->placements;
101         rbo->placement.busy_placement = rbo->placements;
102         if (domain & RADEON_GEM_DOMAIN_VRAM) {
103                 /* Try placing BOs which don't need CPU access outside of the
104                  * CPU accessible part of VRAM
105                  */
106                 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
107                     rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
108                         rbo->placements[c].fpfn =
109                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
110                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
111                                                      TTM_PL_FLAG_UNCACHED |
112                                                      TTM_PL_FLAG_VRAM;
113                 }
114
115                 rbo->placements[c].fpfn = 0;
116                 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
117                                              TTM_PL_FLAG_UNCACHED |
118                                              TTM_PL_FLAG_VRAM;
119         }
120
121         if (domain & RADEON_GEM_DOMAIN_GTT) {
122                 if (rbo->flags & RADEON_GEM_GTT_UC) {
123                         rbo->placements[c].fpfn = 0;
124                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
125                                 TTM_PL_FLAG_TT;
126
127                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
128                            (rbo->rdev->flags & RADEON_IS_AGP)) {
129                         rbo->placements[c].fpfn = 0;
130                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
131                                 TTM_PL_FLAG_UNCACHED |
132                                 TTM_PL_FLAG_TT;
133                 } else {
134                         rbo->placements[c].fpfn = 0;
135                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
136                                                      TTM_PL_FLAG_TT;
137                 }
138         }
139
140         if (domain & RADEON_GEM_DOMAIN_CPU) {
141                 if (rbo->flags & RADEON_GEM_GTT_UC) {
142                         rbo->placements[c].fpfn = 0;
143                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
144                                 TTM_PL_FLAG_SYSTEM;
145
146                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
147                     rbo->rdev->flags & RADEON_IS_AGP) {
148                         rbo->placements[c].fpfn = 0;
149                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
150                                 TTM_PL_FLAG_UNCACHED |
151                                 TTM_PL_FLAG_SYSTEM;
152                 } else {
153                         rbo->placements[c].fpfn = 0;
154                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
155                                                      TTM_PL_FLAG_SYSTEM;
156                 }
157         }
158         if (!c) {
159                 rbo->placements[c].fpfn = 0;
160                 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
161                                              TTM_PL_FLAG_SYSTEM;
162         }
163
164         rbo->placement.num_placement = c;
165         rbo->placement.num_busy_placement = c;
166
167         for (i = 0; i < c; ++i) {
168                 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
169                     (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
170                     !rbo->placements[i].fpfn)
171                         rbo->placements[i].lpfn =
172                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
173                 else
174                         rbo->placements[i].lpfn = 0;
175         }
176 }
177
178 int radeon_bo_create(struct radeon_device *rdev,
179                      unsigned long size, int byte_align, bool kernel,
180                      u32 domain, u32 flags, struct sg_table *sg,
181                      struct reservation_object *resv,
182                      struct radeon_bo **bo_ptr)
183 {
184         struct radeon_bo *bo;
185         enum ttm_bo_type type;
186         unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
187         size_t acc_size;
188         int r;
189
190         size = ALIGN(size, PAGE_SIZE);
191
192         if (kernel) {
193                 type = ttm_bo_type_kernel;
194         } else if (sg) {
195                 type = ttm_bo_type_sg;
196         } else {
197                 type = ttm_bo_type_device;
198         }
199         *bo_ptr = NULL;
200
201         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
202                                        sizeof(struct radeon_bo));
203
204         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
205         if (bo == NULL)
206                 return -ENOMEM;
207         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
208         if (unlikely(r)) {
209                 kfree(bo);
210                 return r;
211         }
212         bo->rdev = rdev;
213         bo->surface_reg = -1;
214         INIT_LIST_HEAD(&bo->list);
215         INIT_LIST_HEAD(&bo->va);
216         bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
217                                        RADEON_GEM_DOMAIN_GTT |
218                                        RADEON_GEM_DOMAIN_CPU);
219
220         bo->flags = flags;
221         /* PCI GART is always snooped */
222         if (!(rdev->flags & RADEON_IS_PCIE))
223                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
224
225         /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
226          * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
227          */
228         if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
229                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
230
231 #ifdef CONFIG_X86_32
232         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
233          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
234          */
235         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
236 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
237         /* Don't try to enable write-combining when it can't work, or things
238          * may be slow
239          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
240          */
241 #ifndef CONFIG_COMPILE_TEST
242 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
243          thanks to write-combining
244 #endif
245
246         if (bo->flags & RADEON_GEM_GTT_WC)
247                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
248                               "better performance thanks to write-combining\n");
249         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
250 #else
251         /* For architectures that don't support WC memory,
252          * mask out the WC flag from the BO
253          */
254         if (!drm_arch_can_wc_memory())
255                 bo->flags &= ~RADEON_GEM_GTT_WC;
256 #endif
257
258         radeon_ttm_placement_from_domain(bo, domain);
259         /* Kernel allocation are uninterruptible */
260         down_read(&rdev->pm.mclk_lock);
261         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
262                         &bo->placement, page_align, !kernel, NULL,
263                         acc_size, sg, resv, &radeon_ttm_bo_destroy);
264         up_read(&rdev->pm.mclk_lock);
265         if (unlikely(r != 0)) {
266                 return r;
267         }
268         *bo_ptr = bo;
269
270         trace_radeon_bo_create(bo);
271
272         return 0;
273 }
274
275 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
276 {
277         bool is_iomem;
278         int r;
279
280         if (bo->kptr) {
281                 if (ptr) {
282                         *ptr = bo->kptr;
283                 }
284                 return 0;
285         }
286         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
287         if (r) {
288                 return r;
289         }
290         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
291         if (ptr) {
292                 *ptr = bo->kptr;
293         }
294         radeon_bo_check_tiling(bo, 0, 0);
295         return 0;
296 }
297
298 void radeon_bo_kunmap(struct radeon_bo *bo)
299 {
300         if (bo->kptr == NULL)
301                 return;
302         bo->kptr = NULL;
303         radeon_bo_check_tiling(bo, 0, 0);
304         ttm_bo_kunmap(&bo->kmap);
305 }
306
307 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
308 {
309         if (bo == NULL)
310                 return NULL;
311
312         ttm_bo_reference(&bo->tbo);
313         return bo;
314 }
315
316 void radeon_bo_unref(struct radeon_bo **bo)
317 {
318         struct ttm_buffer_object *tbo;
319         struct radeon_device *rdev;
320
321         if ((*bo) == NULL)
322                 return;
323         rdev = (*bo)->rdev;
324         tbo = &((*bo)->tbo);
325         ttm_bo_unref(&tbo);
326         if (tbo == NULL)
327                 *bo = NULL;
328 }
329
330 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
331                              u64 *gpu_addr)
332 {
333         int r, i;
334
335         if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
336                 return -EPERM;
337
338         if (bo->pin_count) {
339                 bo->pin_count++;
340                 if (gpu_addr)
341                         *gpu_addr = radeon_bo_gpu_offset(bo);
342
343                 if (max_offset != 0) {
344                         u64 domain_start;
345
346                         if (domain == RADEON_GEM_DOMAIN_VRAM)
347                                 domain_start = bo->rdev->mc.vram_start;
348                         else
349                                 domain_start = bo->rdev->mc.gtt_start;
350                         WARN_ON_ONCE(max_offset <
351                                      (radeon_bo_gpu_offset(bo) - domain_start));
352                 }
353
354                 return 0;
355         }
356         radeon_ttm_placement_from_domain(bo, domain);
357         for (i = 0; i < bo->placement.num_placement; i++) {
358                 /* force to pin into visible video ram */
359                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
360                     !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
361                     (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
362                         bo->placements[i].lpfn =
363                                 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
364                 else
365                         bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
366
367                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
368         }
369
370         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
371         if (likely(r == 0)) {
372                 bo->pin_count = 1;
373                 if (gpu_addr != NULL)
374                         *gpu_addr = radeon_bo_gpu_offset(bo);
375                 if (domain == RADEON_GEM_DOMAIN_VRAM)
376                         bo->rdev->vram_pin_size += radeon_bo_size(bo);
377                 else
378                         bo->rdev->gart_pin_size += radeon_bo_size(bo);
379         } else {
380                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
381         }
382         return r;
383 }
384
385 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
386 {
387         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
388 }
389
390 int radeon_bo_unpin(struct radeon_bo *bo)
391 {
392         int r, i;
393
394         if (!bo->pin_count) {
395                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
396                 return 0;
397         }
398         bo->pin_count--;
399         if (bo->pin_count)
400                 return 0;
401         for (i = 0; i < bo->placement.num_placement; i++) {
402                 bo->placements[i].lpfn = 0;
403                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
404         }
405         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
406         if (likely(r == 0)) {
407                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
408                         bo->rdev->vram_pin_size -= radeon_bo_size(bo);
409                 else
410                         bo->rdev->gart_pin_size -= radeon_bo_size(bo);
411         } else {
412                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
413         }
414         return r;
415 }
416
417 int radeon_bo_evict_vram(struct radeon_device *rdev)
418 {
419         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
420         if (0 && (rdev->flags & RADEON_IS_IGP)) {
421                 if (rdev->mc.igp_sideport_enabled == false)
422                         /* Useless to evict on IGP chips */
423                         return 0;
424         }
425         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
426 }
427
428 void radeon_bo_force_delete(struct radeon_device *rdev)
429 {
430         struct radeon_bo *bo, *n;
431
432         if (list_empty(&rdev->gem.objects)) {
433                 return;
434         }
435         dev_err(rdev->dev, "Userspace still has active objects !\n");
436         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
437                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
438                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
439                         *((unsigned long *)&bo->gem_base.refcount));
440                 mutex_lock(&bo->rdev->gem.mutex);
441                 list_del_init(&bo->list);
442                 mutex_unlock(&bo->rdev->gem.mutex);
443                 /* this should unref the ttm bo */
444                 drm_gem_object_unreference_unlocked(&bo->gem_base);
445         }
446 }
447
448 int radeon_bo_init(struct radeon_device *rdev)
449 {
450         /* reserve PAT memory space to WC for VRAM */
451         arch_io_reserve_memtype_wc(rdev->mc.aper_base,
452                                    rdev->mc.aper_size);
453
454         /* Add an MTRR for the VRAM */
455         if (!rdev->fastfb_working) {
456                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
457                                                       rdev->mc.aper_size);
458         }
459         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
460                 rdev->mc.mc_vram_size >> 20,
461                 (unsigned long long)rdev->mc.aper_size >> 20);
462         DRM_INFO("RAM width %dbits %cDR\n",
463                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
464         return radeon_ttm_init(rdev);
465 }
466
467 void radeon_bo_fini(struct radeon_device *rdev)
468 {
469         radeon_ttm_fini(rdev);
470         arch_phys_wc_del(rdev->mc.vram_mtrr);
471         arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
472 }
473
474 /* Returns how many bytes TTM can move per IB.
475  */
476 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
477 {
478         u64 real_vram_size = rdev->mc.real_vram_size;
479         u64 vram_usage = atomic64_read(&rdev->vram_usage);
480
481         /* This function is based on the current VRAM usage.
482          *
483          * - If all of VRAM is free, allow relocating the number of bytes that
484          *   is equal to 1/4 of the size of VRAM for this IB.
485
486          * - If more than one half of VRAM is occupied, only allow relocating
487          *   1 MB of data for this IB.
488          *
489          * - From 0 to one half of used VRAM, the threshold decreases
490          *   linearly.
491          *         __________________
492          * 1/4 of -|\               |
493          * VRAM    | \              |
494          *         |  \             |
495          *         |   \            |
496          *         |    \           |
497          *         |     \          |
498          *         |      \         |
499          *         |       \________|1 MB
500          *         |----------------|
501          *    VRAM 0 %             100 %
502          *         used            used
503          *
504          * Note: It's a threshold, not a limit. The threshold must be crossed
505          * for buffer relocations to stop, so any buffer of an arbitrary size
506          * can be moved as long as the threshold isn't crossed before
507          * the relocation takes place. We don't want to disable buffer
508          * relocations completely.
509          *
510          * The idea is that buffers should be placed in VRAM at creation time
511          * and TTM should only do a minimum number of relocations during
512          * command submission. In practice, you need to submit at least
513          * a dozen IBs to move all buffers to VRAM if they are in GTT.
514          *
515          * Also, things can get pretty crazy under memory pressure and actual
516          * VRAM usage can change a lot, so playing safe even at 50% does
517          * consistently increase performance.
518          */
519
520         u64 half_vram = real_vram_size >> 1;
521         u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
522         u64 bytes_moved_threshold = half_free_vram >> 1;
523         return max(bytes_moved_threshold, 1024*1024ull);
524 }
525
526 int radeon_bo_list_validate(struct radeon_device *rdev,
527                             struct ww_acquire_ctx *ticket,
528                             struct list_head *head, int ring)
529 {
530         struct radeon_bo_list *lobj;
531         struct list_head duplicates;
532         int r;
533         u64 bytes_moved = 0, initial_bytes_moved;
534         u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
535
536         INIT_LIST_HEAD(&duplicates);
537         r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
538         if (unlikely(r != 0)) {
539                 return r;
540         }
541
542         list_for_each_entry(lobj, head, tv.head) {
543                 struct radeon_bo *bo = lobj->robj;
544                 if (!bo->pin_count) {
545                         u32 domain = lobj->prefered_domains;
546                         u32 allowed = lobj->allowed_domains;
547                         u32 current_domain =
548                                 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
549
550                         /* Check if this buffer will be moved and don't move it
551                          * if we have moved too many buffers for this IB already.
552                          *
553                          * Note that this allows moving at least one buffer of
554                          * any size, because it doesn't take the current "bo"
555                          * into account. We don't want to disallow buffer moves
556                          * completely.
557                          */
558                         if ((allowed & current_domain) != 0 &&
559                             (domain & current_domain) == 0 && /* will be moved */
560                             bytes_moved > bytes_moved_threshold) {
561                                 /* don't move it */
562                                 domain = current_domain;
563                         }
564
565                 retry:
566                         radeon_ttm_placement_from_domain(bo, domain);
567                         if (ring == R600_RING_TYPE_UVD_INDEX)
568                                 radeon_uvd_force_into_uvd_segment(bo, allowed);
569
570                         initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
571                         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
572                         bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
573                                        initial_bytes_moved;
574
575                         if (unlikely(r)) {
576                                 if (r != -ERESTARTSYS &&
577                                     domain != lobj->allowed_domains) {
578                                         domain = lobj->allowed_domains;
579                                         goto retry;
580                                 }
581                                 ttm_eu_backoff_reservation(ticket, head);
582                                 return r;
583                         }
584                 }
585                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
586                 lobj->tiling_flags = bo->tiling_flags;
587         }
588
589         list_for_each_entry(lobj, &duplicates, tv.head) {
590                 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
591                 lobj->tiling_flags = lobj->robj->tiling_flags;
592         }
593
594         return 0;
595 }
596
597 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
598 {
599         struct radeon_device *rdev = bo->rdev;
600         struct radeon_surface_reg *reg;
601         struct radeon_bo *old_object;
602         int steal;
603         int i;
604
605         lockdep_assert_held(&bo->tbo.resv->lock.base);
606
607         if (!bo->tiling_flags)
608                 return 0;
609
610         if (bo->surface_reg >= 0) {
611                 reg = &rdev->surface_regs[bo->surface_reg];
612                 i = bo->surface_reg;
613                 goto out;
614         }
615
616         steal = -1;
617         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
618
619                 reg = &rdev->surface_regs[i];
620                 if (!reg->bo)
621                         break;
622
623                 old_object = reg->bo;
624                 if (old_object->pin_count == 0)
625                         steal = i;
626         }
627
628         /* if we are all out */
629         if (i == RADEON_GEM_MAX_SURFACES) {
630                 if (steal == -1)
631                         return -ENOMEM;
632                 /* find someone with a surface reg and nuke their BO */
633                 reg = &rdev->surface_regs[steal];
634                 old_object = reg->bo;
635                 /* blow away the mapping */
636                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
637                 ttm_bo_unmap_virtual(&old_object->tbo);
638                 old_object->surface_reg = -1;
639                 i = steal;
640         }
641
642         bo->surface_reg = i;
643         reg->bo = bo;
644
645 out:
646         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
647                                bo->tbo.mem.start << PAGE_SHIFT,
648                                bo->tbo.num_pages << PAGE_SHIFT);
649         return 0;
650 }
651
652 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
653 {
654         struct radeon_device *rdev = bo->rdev;
655         struct radeon_surface_reg *reg;
656
657         if (bo->surface_reg == -1)
658                 return;
659
660         reg = &rdev->surface_regs[bo->surface_reg];
661         radeon_clear_surface_reg(rdev, bo->surface_reg);
662
663         reg->bo = NULL;
664         bo->surface_reg = -1;
665 }
666
667 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
668                                 uint32_t tiling_flags, uint32_t pitch)
669 {
670         struct radeon_device *rdev = bo->rdev;
671         int r;
672
673         if (rdev->family >= CHIP_CEDAR) {
674                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
675
676                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
677                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
678                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
679                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
680                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
681                 switch (bankw) {
682                 case 0:
683                 case 1:
684                 case 2:
685                 case 4:
686                 case 8:
687                         break;
688                 default:
689                         return -EINVAL;
690                 }
691                 switch (bankh) {
692                 case 0:
693                 case 1:
694                 case 2:
695                 case 4:
696                 case 8:
697                         break;
698                 default:
699                         return -EINVAL;
700                 }
701                 switch (mtaspect) {
702                 case 0:
703                 case 1:
704                 case 2:
705                 case 4:
706                 case 8:
707                         break;
708                 default:
709                         return -EINVAL;
710                 }
711                 if (tilesplit > 6) {
712                         return -EINVAL;
713                 }
714                 if (stilesplit > 6) {
715                         return -EINVAL;
716                 }
717         }
718         r = radeon_bo_reserve(bo, false);
719         if (unlikely(r != 0))
720                 return r;
721         bo->tiling_flags = tiling_flags;
722         bo->pitch = pitch;
723         radeon_bo_unreserve(bo);
724         return 0;
725 }
726
727 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
728                                 uint32_t *tiling_flags,
729                                 uint32_t *pitch)
730 {
731         lockdep_assert_held(&bo->tbo.resv->lock.base);
732
733         if (tiling_flags)
734                 *tiling_flags = bo->tiling_flags;
735         if (pitch)
736                 *pitch = bo->pitch;
737 }
738
739 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
740                                 bool force_drop)
741 {
742         if (!force_drop)
743                 lockdep_assert_held(&bo->tbo.resv->lock.base);
744
745         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
746                 return 0;
747
748         if (force_drop) {
749                 radeon_bo_clear_surface_reg(bo);
750                 return 0;
751         }
752
753         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
754                 if (!has_moved)
755                         return 0;
756
757                 if (bo->surface_reg >= 0)
758                         radeon_bo_clear_surface_reg(bo);
759                 return 0;
760         }
761
762         if ((bo->surface_reg >= 0) && !has_moved)
763                 return 0;
764
765         return radeon_bo_get_surface_reg(bo);
766 }
767
768 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
769                            struct ttm_mem_reg *new_mem)
770 {
771         struct radeon_bo *rbo;
772
773         if (!radeon_ttm_bo_is_radeon_bo(bo))
774                 return;
775
776         rbo = container_of(bo, struct radeon_bo, tbo);
777         radeon_bo_check_tiling(rbo, 0, 1);
778         radeon_vm_bo_invalidate(rbo->rdev, rbo);
779
780         /* update statistics */
781         if (!new_mem)
782                 return;
783
784         radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
785         radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
786 }
787
788 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
789 {
790         struct radeon_device *rdev;
791         struct radeon_bo *rbo;
792         unsigned long offset, size, lpfn;
793         int i, r;
794
795         if (!radeon_ttm_bo_is_radeon_bo(bo))
796                 return 0;
797         rbo = container_of(bo, struct radeon_bo, tbo);
798         radeon_bo_check_tiling(rbo, 0, 0);
799         rdev = rbo->rdev;
800         if (bo->mem.mem_type != TTM_PL_VRAM)
801                 return 0;
802
803         size = bo->mem.num_pages << PAGE_SHIFT;
804         offset = bo->mem.start << PAGE_SHIFT;
805         if ((offset + size) <= rdev->mc.visible_vram_size)
806                 return 0;
807
808         /* Can't move a pinned BO to visible VRAM */
809         if (rbo->pin_count > 0)
810                 return -EINVAL;
811
812         /* hurrah the memory is not visible ! */
813         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
814         lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
815         for (i = 0; i < rbo->placement.num_placement; i++) {
816                 /* Force into visible VRAM */
817                 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
818                     (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
819                         rbo->placements[i].lpfn = lpfn;
820         }
821         r = ttm_bo_validate(bo, &rbo->placement, false, false);
822         if (unlikely(r == -ENOMEM)) {
823                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
824                 return ttm_bo_validate(bo, &rbo->placement, false, false);
825         } else if (unlikely(r != 0)) {
826                 return r;
827         }
828
829         offset = bo->mem.start << PAGE_SHIFT;
830         /* this should never happen */
831         if ((offset + size) > rdev->mc.visible_vram_size)
832                 return -EINVAL;
833
834         return 0;
835 }
836
837 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
838 {
839         int r;
840
841         r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
842         if (unlikely(r != 0))
843                 return r;
844         if (mem_type)
845                 *mem_type = bo->tbo.mem.mem_type;
846
847         r = ttm_bo_wait(&bo->tbo, true, no_wait);
848         ttm_bo_unreserve(&bo->tbo);
849         return r;
850 }
851
852 /**
853  * radeon_bo_fence - add fence to buffer object
854  *
855  * @bo: buffer object in question
856  * @fence: fence to add
857  * @shared: true if fence should be added shared
858  *
859  */
860 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
861                      bool shared)
862 {
863         struct reservation_object *resv = bo->tbo.resv;
864
865         if (shared)
866                 reservation_object_add_shared_fence(resv, &fence->base);
867         else
868                 reservation_object_add_excl_fence(resv, &fence->base);
869 }