summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panfrost/panfrost_gem.h
diff options
context:
space:
mode:
authorRob Herring <robh@kernel.org>2019-07-26 16:09:43 -0600
committerRob Herring <robh@kernel.org>2019-08-12 14:21:37 -0600
commit187d2929206e6b098312c174ea873e4cedf5420d (patch)
treec0679df7d1c566543b748fadaf5e00a67bcf46c1 /drivers/gpu/drm/panfrost/panfrost_gem.h
parentb31bdd1389fc765c07ab3d5b341092cb16807d29 (diff)
drm/panfrost: Add support for GPU heap allocations
The midgard/bifrost GPUs need to allocate GPU heap memory which is allocated on GPU page faults and not pinned in memory. The vendor driver calls this functionality GROW_ON_GPF. This implementation assumes that BOs allocated with the PANFROST_BO_NOEXEC flag are never mmapped or exported. Both of those may actually work, but I'm unsure if there's some interaction there. It would cause the whole object to be pinned in memory which would defeat the point of this. On faults, we map in 2MB at a time in order to utilize huge pages (if enabled). Currently, once we've mapped pages in, they are only unmapped if the BO is freed. Once we add shrinker support, we can unmap pages with the shrinker. Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: Boris Brezillon <boris.brezillon@collabora.com> Cc: Robin Murphy <robin.murphy@arm.com> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Rob Herring <robh@kernel.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-9-robh@kernel.org
Diffstat (limited to 'drivers/gpu/drm/panfrost/panfrost_gem.h')
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index d4c7aa1790a7..e10f58316915 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -9,10 +9,12 @@
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgts;
struct drm_mm_node node;
bool is_mapped :1;
bool noexec :1;
+ bool is_heap :1;
};
static inline
@@ -21,6 +23,12 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
+static inline
+struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+{
+ return container_of(node, struct panfrost_gem_object, node);
+}
+
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *