summaryrefslogtreecommitdiff
path: root/lib/i915/intel_memory_region.c
diff options
context:
space:
mode:
authorZbigniew Kempczyński <zbigniew.kempczynski@intel.com>2022-05-09 16:55:41 +0200
committerZbigniew Kempczyński <zbigniew.kempczynski@intel.com>2022-05-10 08:33:46 +0200
commit93fa4422bd91c47fbc5699efa3725287a7754699 (patch)
tree9139af283af4f21d30aa5b68505cb2dfde7a8d6f /lib/i915/intel_memory_region.c
parentfe3c194ba90615e2402461348357c88a23864275 (diff)
lib/intel_memory_region: Use separate context for probing offset and alignment
Probing alignment/offset (A/O) in default context works properly only when there're no processes which competes on same vm space. To avoid risk that single probe will be called on already used offset in another process lets use dedicated context for this purpose if possible. In other words when forking occur without A/O cache filled (subject of COW) children will exercise A/O individually. Using same default context leads to risk of probing offset which is in flight in another child thus we can get different A/O. Such behavior is not allowed as allocator infrastructure requires same type, strategy and alignment on single vm. We expect coherent A/O in different children so we try to use separate context to fill this requirement. v2: on old gens where're no logical contexts use default context v3: adding missing condition for context destroy (Matt) Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Fixes: https://gitlab.freedesktop.org/drm/intel/-/issues/5729
Diffstat (limited to 'lib/i915/intel_memory_region.c')
-rw-r--r--lib/i915/intel_memory_region.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/lib/i915/intel_memory_region.c b/lib/i915/intel_memory_region.c
index 593f4bed..6bf6aab1 100644
--- a/lib/i915/intel_memory_region.c
+++ b/lib/i915/intel_memory_region.c
@@ -630,7 +630,7 @@ uint64_t gem_detect_min_start_offset_for_region(int i915, uint32_t region)
struct drm_i915_gem_execbuffer2 eb;
uint64_t start_offset = 0;
uint64_t bb_size = PAGE_SIZE;
- uint32_t *batch;
+ uint32_t *batch, ctx = 0;
uint16_t devid = intel_get_drm_devid(i915);
struct cache_entry *entry, *newentry;
@@ -640,12 +640,16 @@ uint64_t gem_detect_min_start_offset_for_region(int i915, uint32_t region)
goto out;
pthread_mutex_unlock(&cache_mutex);
+ /* Use separate context if possible to avoid offset overlapping */
+ __gem_context_create(i915, &ctx);
+
memset(&obj, 0, sizeof(obj));
memset(&eb, 0, sizeof(eb));
eb.buffers_ptr = to_user_pointer(&obj);
eb.buffer_count = 1;
eb.flags = I915_EXEC_DEFAULT;
+ eb.rsvd1 = ctx;
igt_assert(__gem_create_in_memory_regions(i915, &obj.handle, &bb_size, region) == 0);
obj.flags = EXEC_OBJECT_PINNED;
@@ -670,6 +674,8 @@ uint64_t gem_detect_min_start_offset_for_region(int i915, uint32_t region)
igt_assert(start_offset <= 1ull << 48);
}
gem_close(i915, obj.handle);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
newentry = malloc(sizeof(*newentry));
if (!newentry)
@@ -770,7 +776,7 @@ uint64_t gem_detect_min_alignment_for_regions(int i915,
struct drm_i915_gem_execbuffer2 eb;
uint64_t min_alignment = PAGE_SIZE;
uint64_t bb_size = PAGE_SIZE, obj_size = PAGE_SIZE;
- uint32_t *batch;
+ uint32_t *batch, ctx = 0;
uint16_t devid = intel_get_drm_devid(i915);
struct cache_entry *entry, *newentry;
@@ -780,6 +786,9 @@ uint64_t gem_detect_min_alignment_for_regions(int i915,
goto out;
pthread_mutex_unlock(&cache_mutex);
+ /* Use separate context if possible to avoid offset overlapping */
+ __gem_context_create(i915, &ctx);
+
memset(obj, 0, sizeof(obj));
memset(&eb, 0, sizeof(eb));
@@ -787,6 +796,7 @@ uint64_t gem_detect_min_alignment_for_regions(int i915,
eb.buffers_ptr = to_user_pointer(obj);
eb.buffer_count = ARRAY_SIZE(obj);
eb.flags = I915_EXEC_BATCH_FIRST | I915_EXEC_DEFAULT;
+ eb.rsvd1 = ctx;
igt_assert(__gem_create_in_memory_regions(i915, &obj[0].handle,
&bb_size, region1) == 0);
@@ -815,6 +825,8 @@ uint64_t gem_detect_min_alignment_for_regions(int i915,
gem_close(i915, obj[0].handle);
gem_close(i915, obj[1].handle);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
newentry = malloc(sizeof(*newentry));
if (!newentry)