summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_ggtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_ggtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c430
1 files changed, 62 insertions, 368 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 57c97554393b..5263dda7f8d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,12 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/agp_backend.h>
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
@@ -20,9 +22,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -104,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if (!intel_vtd_active())
+ if (!intel_vtd_active(i915))
return false;
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
@@ -116,17 +115,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return false;
}
-void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to suspend the mappings for
+ *
+ * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ */
+void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
int open;
- mutex_lock(&ggtt->vm.mutex);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+ mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
@@ -139,11 +147,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
}
}
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->invalidate(ggtt);
- atomic_set(&ggtt->vm.open, open);
+ vm->clear_range(vm, 0, vm->total);
- mutex_unlock(&ggtt->vm.mutex);
+ atomic_set(&vm->open, open);
+
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+{
+ i915_ggtt_suspend_vm(&ggtt->vm);
+ ggtt->invalidate(ggtt);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@@ -192,7 +206,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- gen8_pte_t pte = addr | _PAGE_PRESENT;
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
if (flags & PTE_LM)
pte |= GEN12_GGTT_PTE_LM;
@@ -875,21 +889,6 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -924,6 +923,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -950,8 +950,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
@@ -1077,6 +1075,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
@@ -1100,8 +1099,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -1129,6 +1126,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
@@ -1145,8 +1143,6 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
@@ -1212,11 +1208,11 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
if (ret)
return ret;
- if (intel_vtd_active())
+ if (intel_vtd_active(i915))
drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
@@ -1253,368 +1249,66 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-void i915_ggtt_resume(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to restore the mappings for
+ *
+ * Restore the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ *
+ * Returns %true if restoring the mapping for any object that was in a write
+ * domain before suspend.
+ */
+bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
- bool flush = false;
+ bool write_domain_objs = false;
int open;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+ vm->clear_range(vm, 0, vm->total);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound =
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(&ggtt->vm, NULL, vma,
+ vma->ops->bind_vma(vm, NULL, vma,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
- flush |= fetch_and_zero(&obj->write_domain);
+ write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
- atomic_set(&ggtt->vm.open, open);
- ggtt->invalidate(ggtt);
-
- if (flush)
- wbinvd_on_all_cpus();
-
- if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
- setup_private_pat(ggtt->vm.gt->uncore);
-
- intel_ggtt_restore_fences(ggtt);
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- unsigned int left;
-
- src_idx = src_stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= src_stride;
- }
-
- left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].src_stride,
- rot_info->plane[i].dst_stride,
- st, sg);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width,
- rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int row;
-
- if (!width || !height)
- return sg;
-
- if (alignment_pad) {
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, alignment_pad * 4096, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = alignment_pad * 4096;
- sg = sg_next(sg);
- }
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += src_stride - width;
-
- left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int gtt_offset = 0;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
-
- sg = remap_pages(obj,
- rem_info->plane[i].offset, alignment_pad,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
- st, sg);
-
- gtt_offset += alignment_pad +
- rem_info->plane[i].dst_stride * rem_info->plane[i].height;
- }
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
+ atomic_set(&vm->open, open);
- drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width,
- rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
+ return write_domain_objs;
}
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
- }
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
- int ret;
+ bool flush;
- /*
- * The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- fallthrough;
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
+ flush = i915_ggtt_resume_vm(&ggtt->vm);
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
+ ggtt->invalidate(ggtt);
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
+ if (flush)
+ wbinvd_on_all_cpus();
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
+ if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
+ intel_ggtt_restore_fences(ggtt);
}