2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
33 #define KB(x) ((x) * 1024)
34 #define MB(x) (KB(x) * 1024)
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
54 if (!drm_mm_initialized(&dev_priv->mm.stolen))
57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
60 if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
63 mutex_lock(&dev_priv->mm.stolen_lock);
64 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
65 alignment, start, end,
66 DRM_MM_SEARCH_DEFAULT);
67 mutex_unlock(&dev_priv->mm.stolen_lock);
72 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
73 struct drm_mm_node *node, u64 size,
76 struct i915_ggtt *ggtt = &dev_priv->ggtt;
78 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
80 ggtt->stolen_usable_size);
83 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
84 struct drm_mm_node *node)
86 mutex_lock(&dev_priv->mm.stolen_lock);
87 drm_mm_remove_node(node);
88 mutex_unlock(&dev_priv->mm.stolen_lock);
91 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
93 struct drm_i915_private *dev_priv = to_i915(dev);
94 struct pci_dev *pdev = dev_priv->drm.pdev;
95 struct i915_ggtt *ggtt = &dev_priv->ggtt;
99 /* Almost universally we can find the Graphics Base of Stolen Memory
100 * at register BSM (0x5c) in the igfx configuration space. On a few
101 * (desktop) machines this is also mirrored in the bridge device at
102 * different locations, or in the MCHBAR.
104 * On 865 we just check the TOUD register.
106 * On 830/845/85x the stolen memory base isn't available in any
107 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
111 if (INTEL_INFO(dev)->gen >= 3) {
114 pci_read_config_dword(pdev, INTEL_BSM, &bsm);
116 base = bsm & INTEL_BSM_MASK;
117 } else if (IS_I865G(dev)) {
122 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
125 if (tmp & TSEG_ENABLE) {
126 switch (tmp & I845_TSEG_SIZE_MASK) {
127 case I845_TSEG_SIZE_512K:
130 case I845_TSEG_SIZE_1M:
136 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
139 base = (toud << 16) + tseg_size;
140 } else if (IS_I85X(dev)) {
145 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
148 if (tmp & TSEG_ENABLE)
151 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
155 base = tom - tseg_size - ggtt->stolen_size;
156 } else if (IS_845G(dev)) {
161 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
164 if (tmp & TSEG_ENABLE) {
165 switch (tmp & I845_TSEG_SIZE_MASK) {
166 case I845_TSEG_SIZE_512K:
169 case I845_TSEG_SIZE_1M:
175 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
179 base = tom - tseg_size - ggtt->stolen_size;
180 } else if (IS_I830(dev)) {
185 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
188 if (tmp & TSEG_ENABLE) {
189 if (tmp & I830_TSEG_SIZE_1M)
195 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
199 base = tom - tseg_size - ggtt->stolen_size;
205 /* make sure we don't clobber the GTT if it's within stolen memory */
206 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
210 { .start = base, .end = base + ggtt->stolen_size, },
211 { .start = base, .end = base + ggtt->stolen_size, },
213 u64 ggtt_start, ggtt_end;
215 ggtt_start = I915_READ(PGTBL_CTL);
217 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
218 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
220 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
221 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
223 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
224 stolen[0].end = ggtt_start;
225 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
226 stolen[1].start = ggtt_end;
228 /* pick the larger of the two chunks */
229 if (stolen[0].end - stolen[0].start >
230 stolen[1].end - stolen[1].start) {
231 base = stolen[0].start;
232 ggtt->stolen_size = stolen[0].end - stolen[0].start;
234 base = stolen[1].start;
235 ggtt->stolen_size = stolen[1].end - stolen[1].start;
238 if (stolen[0].start != stolen[1].start ||
239 stolen[0].end != stolen[1].end) {
240 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
241 (unsigned long long)ggtt_start,
242 (unsigned long long)ggtt_end - 1);
243 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
244 base, base + (u32)ggtt->stolen_size - 1);
249 /* Verify that nothing else uses this physical address. Stolen
250 * memory should be reserved by the BIOS and hidden from the
251 * kernel. So if the region is already marked as busy, something
252 * is seriously wrong.
254 r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
255 "Graphics Stolen Memory");
258 * One more attempt but this time requesting region from
259 * base + 1, as we have seen that this resolves the region
260 * conflict with the PCI Bus.
261 * This is a BIOS w/a: Some BIOS wrap stolen in the root
262 * PCI bus, but have an off-by-one error. Hence retry the
263 * reservation starting from 1 instead of 0.
265 r = devm_request_mem_region(dev->dev, base + 1,
266 ggtt->stolen_size - 1,
267 "Graphics Stolen Memory");
269 * GEN3 firmware likes to smash pci bridges into the stolen
270 * range. Apparently this works.
272 if (r == NULL && !IS_GEN3(dev)) {
273 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
274 base, base + (uint32_t)ggtt->stolen_size);
282 void i915_gem_cleanup_stolen(struct drm_device *dev)
284 struct drm_i915_private *dev_priv = to_i915(dev);
286 if (!drm_mm_initialized(&dev_priv->mm.stolen))
289 drm_mm_takedown(&dev_priv->mm.stolen);
292 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
293 unsigned long *base, unsigned long *size)
295 struct i915_ggtt *ggtt = &dev_priv->ggtt;
296 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
297 CTG_STOLEN_RESERVED :
298 ELK_STOLEN_RESERVED);
299 unsigned long stolen_top = dev_priv->mm.stolen_base +
302 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
304 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
306 /* On these platforms, the register doesn't have a size field, so the
307 * size is the distance between the base and the top of the stolen
308 * memory. We also have the genuine case where base is zero and there's
309 * nothing reserved. */
313 *size = stolen_top - *base;
316 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
317 unsigned long *base, unsigned long *size)
319 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
321 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
323 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
324 case GEN6_STOLEN_RESERVED_1M:
327 case GEN6_STOLEN_RESERVED_512K:
330 case GEN6_STOLEN_RESERVED_256K:
333 case GEN6_STOLEN_RESERVED_128K:
338 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
342 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
343 unsigned long *base, unsigned long *size)
345 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
347 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
349 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
350 case GEN7_STOLEN_RESERVED_1M:
353 case GEN7_STOLEN_RESERVED_256K:
358 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
362 static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
363 unsigned long *base, unsigned long *size)
365 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
367 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
369 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
370 case GEN8_STOLEN_RESERVED_1M:
373 case GEN8_STOLEN_RESERVED_2M:
374 *size = 2 * 1024 * 1024;
376 case GEN8_STOLEN_RESERVED_4M:
377 *size = 4 * 1024 * 1024;
379 case GEN8_STOLEN_RESERVED_8M:
380 *size = 8 * 1024 * 1024;
383 *size = 8 * 1024 * 1024;
384 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
388 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
389 unsigned long *base, unsigned long *size)
391 struct i915_ggtt *ggtt = &dev_priv->ggtt;
392 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
393 unsigned long stolen_top;
395 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
397 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
399 /* On these platforms, the register doesn't have a size field, so the
400 * size is the distance between the base and the top of the stolen
401 * memory. We also have the genuine case where base is zero and there's
402 * nothing reserved. */
406 *size = stolen_top - *base;
409 int i915_gem_init_stolen(struct drm_device *dev)
411 struct drm_i915_private *dev_priv = to_i915(dev);
412 struct i915_ggtt *ggtt = &dev_priv->ggtt;
413 unsigned long reserved_total, reserved_base = 0, reserved_size;
414 unsigned long stolen_top;
416 mutex_init(&dev_priv->mm.stolen_lock);
418 if (intel_vgpu_active(dev_priv)) {
419 DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
423 if (intel_vgpu_active(dev_priv)) {
424 DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
428 #ifdef CONFIG_INTEL_IOMMU
429 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
430 DRM_INFO("DMAR active, disabling use of stolen memory\n");
435 if (ggtt->stolen_size == 0)
438 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
439 if (dev_priv->mm.stolen_base == 0)
442 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
444 switch (INTEL_INFO(dev_priv)->gen) {
450 g4x_get_stolen_reserved(dev_priv, &reserved_base,
454 /* Assume the gen6 maximum for the older platforms. */
455 reserved_size = 1024 * 1024;
456 reserved_base = stolen_top - reserved_size;
459 gen6_get_stolen_reserved(dev_priv, &reserved_base,
463 gen7_get_stolen_reserved(dev_priv, &reserved_base,
467 if (IS_BROADWELL(dev_priv) ||
468 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
469 bdw_get_stolen_reserved(dev_priv, &reserved_base,
472 gen8_get_stolen_reserved(dev_priv, &reserved_base,
477 /* It is possible for the reserved base to be zero, but the register
478 * field for size doesn't have a zero option. */
479 if (reserved_base == 0) {
481 reserved_base = stolen_top;
484 if (reserved_base < dev_priv->mm.stolen_base ||
485 reserved_base + reserved_size > stolen_top) {
486 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
487 reserved_base, reserved_base + reserved_size,
488 dev_priv->mm.stolen_base, stolen_top);
492 ggtt->stolen_reserved_base = reserved_base;
493 ggtt->stolen_reserved_size = reserved_size;
495 /* It is possible for the reserved area to end before the end of stolen
496 * memory, so just consider the start. */
497 reserved_total = stolen_top - reserved_base;
499 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
500 ggtt->stolen_size >> 10,
501 (ggtt->stolen_size - reserved_total) >> 10);
503 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
506 * Basic memrange allocator for stolen space.
508 * TODO: Notice that some platforms require us to not use the first page
509 * of the stolen memory but their BIOSes may still put the framebuffer
510 * on the first page. So we don't reserve this page for now because of
511 * that. Our current solution is to just prevent new nodes from being
512 * inserted on the first page - see the check we have at
513 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
516 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
521 static struct sg_table *
522 i915_pages_create_for_stolen(struct drm_device *dev,
523 u32 offset, u32 size)
525 struct drm_i915_private *dev_priv = to_i915(dev);
526 struct i915_ggtt *ggtt = &dev_priv->ggtt;
528 struct scatterlist *sg;
530 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
531 BUG_ON(offset > ggtt->stolen_size - size);
533 /* We hide that we have no struct page backing our stolen object
534 * by wrapping the contiguous physical allocation with a fake
535 * dma mapping in a single scatterlist.
538 st = kmalloc(sizeof(*st), GFP_KERNEL);
542 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
551 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
552 sg_dma_len(sg) = size;
557 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
563 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
565 /* Should only be called during free */
566 sg_free_table(obj->pages);
572 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
574 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
577 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
582 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
583 .get_pages = i915_gem_object_get_pages_stolen,
584 .put_pages = i915_gem_object_put_pages_stolen,
585 .release = i915_gem_object_release_stolen,
588 static struct drm_i915_gem_object *
589 _i915_gem_object_create_stolen(struct drm_device *dev,
590 struct drm_mm_node *stolen)
592 struct drm_i915_gem_object *obj;
594 obj = i915_gem_object_alloc(dev);
598 drm_gem_private_object_init(dev, &obj->base, stolen->size);
599 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
601 obj->pages = i915_pages_create_for_stolen(dev,
602 stolen->start, stolen->size);
603 if (obj->pages == NULL)
606 obj->get_page.sg = obj->pages->sgl;
607 obj->get_page.last = 0;
609 i915_gem_object_pin_pages(obj);
610 obj->stolen = stolen;
612 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
613 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
618 i915_gem_object_free(obj);
622 struct drm_i915_gem_object *
623 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
625 struct drm_i915_private *dev_priv = to_i915(dev);
626 struct drm_i915_gem_object *obj;
627 struct drm_mm_node *stolen;
630 if (!drm_mm_initialized(&dev_priv->mm.stolen))
633 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
637 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
641 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
647 obj = _i915_gem_object_create_stolen(dev, stolen);
651 i915_gem_stolen_remove_node(dev_priv, stolen);
656 struct drm_i915_gem_object *
657 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
662 struct drm_i915_private *dev_priv = to_i915(dev);
663 struct i915_ggtt *ggtt = &dev_priv->ggtt;
664 struct drm_i915_gem_object *obj;
665 struct drm_mm_node *stolen;
666 struct i915_vma *vma;
669 if (!drm_mm_initialized(&dev_priv->mm.stolen))
672 lockdep_assert_held(&dev->struct_mutex);
674 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
675 stolen_offset, gtt_offset, size);
677 /* KISS and expect everything to be page-aligned */
678 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
679 WARN_ON(stolen_offset & 4095))
682 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
686 stolen->start = stolen_offset;
688 mutex_lock(&dev_priv->mm.stolen_lock);
689 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
690 mutex_unlock(&dev_priv->mm.stolen_lock);
692 DRM_DEBUG_KMS("failed to allocate stolen space\n");
697 obj = _i915_gem_object_create_stolen(dev, stolen);
699 DRM_DEBUG_KMS("failed to allocate stolen object\n");
700 i915_gem_stolen_remove_node(dev_priv, stolen);
705 /* Some objects just need physical mem from stolen space */
706 if (gtt_offset == I915_GTT_OFFSET_NONE)
709 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
715 /* To simplify the initialisation sequence between KMS and GTT,
716 * we allow construction of the stolen object prior to
717 * setting up the GTT space. The actual reservation will occur
720 vma->node.start = gtt_offset;
721 vma->node.size = size;
723 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
725 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
729 vma->pages = obj->pages;
730 vma->flags |= I915_VMA_GLOBAL_BIND;
731 __i915_vma_set_map_and_fenceable(vma);
732 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
735 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
736 i915_gem_object_pin_pages(obj);
741 i915_gem_object_put(obj);