summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZbigniew Kempczyński <zbigniew.kempczynski@intel.com>2021-03-15 17:05:40 +0100
committerZbigniew Kempczyński <zbigniew.kempczynski@intel.com>2021-04-13 15:44:38 +0200
commitf3886af97f9c77b74186bf4cf5f94e13065e0ead (patch)
tree16beefde65d9d77416a5442bd40e9035e8089e47
parent1e0c26584a1f23f63fbb71fededba0109d005e69 (diff)
lib/intel_allocator_reloc: Add reloc allocator
As relocations won't be accessible on discrete we need to support IGT with an allocator. So tests which have to cover all generations have to diverge the code and use conditional constructs. On the long term this can be cumbersome and confusing. So we can try to avoid such and use pseudo-reloc allocator which main task is to return incremented offsets. This way we can skip mentioned conditionals and just acquire offsets from an allocator. Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Petri Latvala <petri.latvala@intel.com>
-rw-r--r--lib/intel_allocator_reloc.c190
1 files changed, 190 insertions, 0 deletions
diff --git a/lib/intel_allocator_reloc.c b/lib/intel_allocator_reloc.c
new file mode 100644
index 00000000..abf9c30c
--- /dev/null
+++ b/lib/intel_allocator_reloc.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include "igt.h"
+#include "igt_x86.h"
+#include "igt_rand.h"
+#include "intel_allocator.h"
+
+struct intel_allocator *intel_allocator_reloc_create(int fd);
+
+struct intel_allocator_reloc {
+ uint64_t bias;
+ uint32_t prng;
+ uint64_t gtt_size;
+ uint64_t start;
+ uint64_t end;
+ uint64_t offset;
+
+ /* statistics */
+ uint64_t allocated_objects;
+};
+
+static uint64_t get_bias(int fd)
+{
+ (void) fd;
+
+ return 256 << 10;
+}
+
+static void intel_allocator_reloc_get_address_range(struct intel_allocator *ial,
+ uint64_t *startp,
+ uint64_t *endp)
+{
+ struct intel_allocator_reloc *ialr = ial->priv;
+
+ if (startp)
+ *startp = ialr->start;
+
+ if (endp)
+ *endp = ialr->end;
+}
+
+static uint64_t intel_allocator_reloc_alloc(struct intel_allocator *ial,
+ uint32_t handle, uint64_t size,
+ uint64_t alignment)
+{
+ struct intel_allocator_reloc *ialr = ial->priv;
+ uint64_t offset, aligned_offset;
+
+ (void) handle;
+
+ alignment = max(alignment, 4096);
+ aligned_offset = ALIGN(ialr->offset, alignment);
+
+ /* Check we won't exceed end */
+ if (aligned_offset + size > ialr->end)
+ aligned_offset = ALIGN(ialr->start, alignment);
+
+ offset = aligned_offset;
+ ialr->offset = offset + size;
+ ialr->allocated_objects++;
+
+ return offset;
+}
+
+static bool intel_allocator_reloc_free(struct intel_allocator *ial,
+ uint32_t handle)
+{
+ struct intel_allocator_reloc *ialr = ial->priv;
+
+ (void) handle;
+
+ ialr->allocated_objects--;
+
+ return false;
+}
+
+static bool intel_allocator_reloc_is_allocated(struct intel_allocator *ial,
+ uint32_t handle, uint64_t size,
+ uint64_t offset)
+{
+ (void) ial;
+ (void) handle;
+ (void) size;
+ (void) offset;
+
+ return false;
+}
+
+static void intel_allocator_reloc_destroy(struct intel_allocator *ial)
+{
+ igt_assert(ial);
+
+ free(ial->priv);
+ free(ial);
+}
+
+static bool intel_allocator_reloc_reserve(struct intel_allocator *ial,
+ uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) handle;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static bool intel_allocator_reloc_unreserve(struct intel_allocator *ial,
+ uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) handle;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static bool intel_allocator_reloc_is_reserved(struct intel_allocator *ial,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static void intel_allocator_reloc_print(struct intel_allocator *ial, bool full)
+{
+ struct intel_allocator_reloc *ialr = ial->priv;
+
+ (void) full;
+
+ igt_info("<ial: %p, fd: %d> allocated objects: %" PRIx64 "\n",
+ ial, ial->fd, ialr->allocated_objects);
+}
+
+static bool intel_allocator_reloc_is_empty(struct intel_allocator *ial)
+{
+ struct intel_allocator_reloc *ialr = ial->priv;
+
+ return !ialr->allocated_objects;
+}
+
+#define RESERVED 4096
+struct intel_allocator *intel_allocator_reloc_create(int fd)
+{
+ struct intel_allocator *ial;
+ struct intel_allocator_reloc *ialr;
+
+ igt_debug("Using reloc allocator\n");
+ ial = calloc(1, sizeof(*ial));
+ igt_assert(ial);
+
+ ial->fd = fd;
+ ial->get_address_range = intel_allocator_reloc_get_address_range;
+ ial->alloc = intel_allocator_reloc_alloc;
+ ial->free = intel_allocator_reloc_free;
+ ial->is_allocated = intel_allocator_reloc_is_allocated;
+ ial->reserve = intel_allocator_reloc_reserve;
+ ial->unreserve = intel_allocator_reloc_unreserve;
+ ial->is_reserved = intel_allocator_reloc_is_reserved;
+ ial->destroy = intel_allocator_reloc_destroy;
+ ial->print = intel_allocator_reloc_print;
+ ial->is_empty = intel_allocator_reloc_is_empty;
+
+ ialr = ial->priv = calloc(1, sizeof(*ialr));
+ igt_assert(ial->priv);
+ ialr->prng = (uint32_t) to_user_pointer(ial);
+ ialr->gtt_size = gem_aperture_size(fd);
+ igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
+ if (!gem_uses_full_ppgtt(fd))
+ ialr->gtt_size /= 2;
+
+ ialr->bias = ialr->offset = get_bias(fd);
+ ialr->start = ialr->bias;
+ ialr->end = ialr->gtt_size - RESERVED;
+
+ ialr->allocated_objects = 0;
+
+ return ial;
+}