2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2012 Intel Corporation
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
16 * The BIOS typically reserves some of the system's memory for the exclusive
17 * use of the integrated graphics. This memory is no longer available for
18 * use by the OS and so the user finds that his system has less memory
19 * available than he put in. We refer to this memory as stolen.
21 * The BIOS will allocate its framebuffer from the stolen memory. Our
22 * goal is try to reuse that object for our own fbcon which must always
23 * be available for panics. Anything else we can reuse the stolen memory
27 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
28 struct drm_mm_node *node, u64 size,
29 unsigned alignment, u64 start, u64 end)
33 if (!drm_mm_initialized(&dev_priv->mm.stolen))
36 /* WaSkipStolenMemoryFirstPage:bdw+ */
37 if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
40 mutex_lock(&dev_priv->mm.stolen_lock);
41 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
43 start, end, DRM_MM_INSERT_BEST);
44 mutex_unlock(&dev_priv->mm.stolen_lock);
49 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
50 struct drm_mm_node *node, u64 size,
53 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
54 alignment, 0, U64_MAX);
57 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
58 struct drm_mm_node *node)
60 mutex_lock(&dev_priv->mm.stolen_lock);
61 drm_mm_remove_node(node);
62 mutex_unlock(&dev_priv->mm.stolen_lock);
65 static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
68 struct i915_ggtt *ggtt = &dev_priv->ggtt;
71 if (dsm->start == 0 || dsm->end <= dsm->start)
75 * TODO: We have yet too encounter the case where the GTT wasn't at the
76 * end of stolen. With that assumption we could simplify this.
79 /* Make sure we don't clobber the GTT if it's within stolen memory */
80 if (INTEL_GEN(dev_priv) <= 4 &&
81 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
82 struct resource stolen[2] = {*dsm, *dsm};
83 struct resource ggtt_res;
84 resource_size_t ggtt_start;
86 ggtt_start = I915_READ(PGTBL_CTL);
87 if (IS_GEN(dev_priv, 4))
88 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
89 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
91 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
94 (struct resource) DEFINE_RES_MEM(ggtt_start,
95 ggtt_total_entries(ggtt) * 4);
97 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
98 stolen[0].end = ggtt_res.start;
99 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
100 stolen[1].start = ggtt_res.end;
102 /* Pick the larger of the two chunks */
103 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
108 if (stolen[0].start != stolen[1].start ||
109 stolen[0].end != stolen[1].end) {
110 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
111 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
116 * Verify that nothing else uses this physical address. Stolen
117 * memory should be reserved by the BIOS and hidden from the
118 * kernel. So if the region is already marked as busy, something
119 * is seriously wrong.
121 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
123 "Graphics Stolen Memory");
126 * One more attempt but this time requesting region from
127 * start + 1, as we have seen that this resolves the region
128 * conflict with the PCI Bus.
129 * This is a BIOS w/a: Some BIOS wrap stolen in the root
130 * PCI bus, but have an off-by-one error. Hence retry the
131 * reservation starting from 1 instead of 0.
132 * There's also BIOS with off-by-one on the other end.
134 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
135 resource_size(dsm) - 2,
136 "Graphics Stolen Memory");
138 * GEN3 firmware likes to smash pci bridges into the stolen
139 * range. Apparently this works.
141 if (r == NULL && !IS_GEN(dev_priv, 3)) {
142 DRM_ERROR("conflict detected with stolen region: %pR\n",
152 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
154 if (!drm_mm_initialized(&dev_priv->mm.stolen))
157 drm_mm_takedown(&dev_priv->mm.stolen);
160 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
161 resource_size_t *base,
162 resource_size_t *size)
164 u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
165 CTG_STOLEN_RESERVED :
166 ELK_STOLEN_RESERVED);
167 resource_size_t stolen_top = dev_priv->dsm.end + 1;
169 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
170 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
172 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
176 * Whether ILK really reuses the ELK register for this is unclear.
177 * Let's see if we catch anyone with this supposedly enabled on ILK.
179 WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
182 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
185 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
186 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
188 *size = stolen_top - *base;
191 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
192 resource_size_t *base,
193 resource_size_t *size)
195 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
197 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
199 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
202 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
204 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
205 case GEN6_STOLEN_RESERVED_1M:
208 case GEN6_STOLEN_RESERVED_512K:
211 case GEN6_STOLEN_RESERVED_256K:
214 case GEN6_STOLEN_RESERVED_128K:
219 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
223 static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
224 resource_size_t *base,
225 resource_size_t *size)
227 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
228 resource_size_t stolen_top = dev_priv->dsm.end + 1;
230 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
232 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
235 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
237 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
239 case GEN7_STOLEN_RESERVED_1M:
245 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
246 * reserved location as (top - size).
248 *base = stolen_top - *size;
251 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
252 resource_size_t *base,
253 resource_size_t *size)
255 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
257 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
259 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
262 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
264 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
265 case GEN7_STOLEN_RESERVED_1M:
268 case GEN7_STOLEN_RESERVED_256K:
273 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
277 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
278 resource_size_t *base,
279 resource_size_t *size)
281 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
283 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
285 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
288 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
290 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
291 case GEN8_STOLEN_RESERVED_1M:
294 case GEN8_STOLEN_RESERVED_2M:
295 *size = 2 * 1024 * 1024;
297 case GEN8_STOLEN_RESERVED_4M:
298 *size = 4 * 1024 * 1024;
300 case GEN8_STOLEN_RESERVED_8M:
301 *size = 8 * 1024 * 1024;
304 *size = 8 * 1024 * 1024;
305 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
309 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
310 resource_size_t *base,
311 resource_size_t *size)
313 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
314 resource_size_t stolen_top = dev_priv->dsm.end + 1;
316 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
318 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
321 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
324 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
325 *size = stolen_top - *base;
328 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
329 resource_size_t *base,
330 resource_size_t *size)
332 u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
334 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
336 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
338 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
339 case GEN8_STOLEN_RESERVED_1M:
342 case GEN8_STOLEN_RESERVED_2M:
343 *size = 2 * 1024 * 1024;
345 case GEN8_STOLEN_RESERVED_4M:
346 *size = 4 * 1024 * 1024;
348 case GEN8_STOLEN_RESERVED_8M:
349 *size = 8 * 1024 * 1024;
352 *size = 8 * 1024 * 1024;
353 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
357 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
359 resource_size_t reserved_base, stolen_top;
360 resource_size_t reserved_total, reserved_size;
362 mutex_init(&dev_priv->mm.stolen_lock);
364 if (intel_vgpu_active(dev_priv)) {
365 DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
369 if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
370 DRM_INFO("DMAR active, disabling use of stolen memory\n");
374 if (resource_size(&intel_graphics_stolen_res) == 0)
377 dev_priv->dsm = intel_graphics_stolen_res;
379 if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
382 GEM_BUG_ON(dev_priv->dsm.start == 0);
383 GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
385 stolen_top = dev_priv->dsm.end + 1;
386 reserved_base = stolen_top;
389 switch (INTEL_GEN(dev_priv)) {
394 if (!IS_G4X(dev_priv))
398 g4x_get_stolen_reserved(dev_priv,
399 &reserved_base, &reserved_size);
402 gen6_get_stolen_reserved(dev_priv,
403 &reserved_base, &reserved_size);
406 if (IS_VALLEYVIEW(dev_priv))
407 vlv_get_stolen_reserved(dev_priv,
408 &reserved_base, &reserved_size);
410 gen7_get_stolen_reserved(dev_priv,
411 &reserved_base, &reserved_size);
417 chv_get_stolen_reserved(dev_priv,
418 &reserved_base, &reserved_size);
420 bdw_get_stolen_reserved(dev_priv,
421 &reserved_base, &reserved_size);
425 icl_get_stolen_reserved(dev_priv, &reserved_base,
431 * Our expectation is that the reserved space is at the top of the
432 * stolen region and *never* at the bottom. If we see !reserved_base,
433 * it likely means we failed to read the registers correctly.
435 if (!reserved_base) {
436 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
437 &reserved_base, &reserved_size);
438 reserved_base = stolen_top;
442 dev_priv->dsm_reserved =
443 (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
445 if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
446 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
447 &dev_priv->dsm_reserved, &dev_priv->dsm);
451 /* It is possible for the reserved area to end before the end of stolen
452 * memory, so just consider the start. */
453 reserved_total = stolen_top - reserved_base;
455 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
456 (u64)resource_size(&dev_priv->dsm) >> 10,
457 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
459 dev_priv->stolen_usable_size =
460 resource_size(&dev_priv->dsm) - reserved_total;
462 /* Basic memrange allocator for stolen space. */
463 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
468 static struct sg_table *
469 i915_pages_create_for_stolen(struct drm_device *dev,
470 resource_size_t offset, resource_size_t size)
472 struct drm_i915_private *dev_priv = to_i915(dev);
474 struct scatterlist *sg;
476 GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
478 /* We hide that we have no struct page backing our stolen object
479 * by wrapping the contiguous physical allocation with a fake
480 * dma mapping in a single scatterlist.
483 st = kmalloc(sizeof(*st), GFP_KERNEL);
485 return ERR_PTR(-ENOMEM);
487 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
489 return ERR_PTR(-ENOMEM);
496 sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
497 sg_dma_len(sg) = size;
502 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
504 struct sg_table *pages =
505 i915_pages_create_for_stolen(obj->base.dev,
509 return PTR_ERR(pages);
511 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
516 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
517 struct sg_table *pages)
519 /* Should only be called from i915_gem_object_release_stolen() */
520 sg_free_table(pages);
525 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
527 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
528 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
532 __i915_gem_object_unpin_pages(obj);
534 i915_gem_stolen_remove_node(dev_priv, stolen);
538 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
539 .get_pages = i915_gem_object_get_pages_stolen,
540 .put_pages = i915_gem_object_put_pages_stolen,
541 .release = i915_gem_object_release_stolen,
544 static struct drm_i915_gem_object *
545 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
546 struct drm_mm_node *stolen)
548 struct drm_i915_gem_object *obj;
549 unsigned int cache_level;
551 obj = i915_gem_object_alloc();
555 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
556 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
558 obj->stolen = stolen;
559 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
560 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
561 i915_gem_object_set_cache_coherency(obj, cache_level);
563 if (i915_gem_object_pin_pages(obj))
569 i915_gem_object_free(obj);
573 struct drm_i915_gem_object *
574 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
575 resource_size_t size)
577 struct drm_i915_gem_object *obj;
578 struct drm_mm_node *stolen;
581 if (!drm_mm_initialized(&dev_priv->mm.stolen))
587 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
591 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
597 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
601 i915_gem_stolen_remove_node(dev_priv, stolen);
606 struct drm_i915_gem_object *
607 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
608 resource_size_t stolen_offset,
609 resource_size_t gtt_offset,
610 resource_size_t size)
612 struct i915_ggtt *ggtt = &dev_priv->ggtt;
613 struct drm_i915_gem_object *obj;
614 struct drm_mm_node *stolen;
615 struct i915_vma *vma;
618 if (!drm_mm_initialized(&dev_priv->mm.stolen))
621 lockdep_assert_held(&dev_priv->drm.struct_mutex);
623 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
624 &stolen_offset, >t_offset, &size);
626 /* KISS and expect everything to be page-aligned */
627 if (WARN_ON(size == 0) ||
628 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
629 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
632 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
636 stolen->start = stolen_offset;
638 mutex_lock(&dev_priv->mm.stolen_lock);
639 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
640 mutex_unlock(&dev_priv->mm.stolen_lock);
642 DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
647 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
649 DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
650 i915_gem_stolen_remove_node(dev_priv, stolen);
655 /* Some objects just need physical mem from stolen space */
656 if (gtt_offset == I915_GTT_OFFSET_NONE)
659 ret = i915_gem_object_pin_pages(obj);
663 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
669 /* To simplify the initialisation sequence between KMS and GTT,
670 * we allow construction of the stolen object prior to
671 * setting up the GTT space. The actual reservation will occur
674 ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
675 size, gtt_offset, obj->cache_level,
678 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
682 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
684 vma->pages = obj->mm.pages;
685 vma->flags |= I915_VMA_GLOBAL_BIND;
686 __i915_vma_set_map_and_fenceable(vma);
688 mutex_lock(&ggtt->vm.mutex);
689 list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
690 mutex_unlock(&ggtt->vm.mutex);
692 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
693 atomic_inc(&obj->bind_count);
698 i915_gem_object_unpin_pages(obj);
700 i915_gem_object_put(obj);