summaryrefslogtreecommitdiff
path: root/lib/i915/gem_engine_topology.c
diff options
context:
space:
mode:
authorAndi Shyti <andi.shyti@intel.com>2019-03-28 17:09:48 +0200
committerAndi Shyti <andi.shyti@intel.com>2019-05-13 01:58:57 +0300
commit7d439890b2b9deda54dd2929bb1591f9791eed28 (patch)
tree2808a2b75b86590cbbe39bd8e026fc3b180b90f8 /lib/i915/gem_engine_topology.c
parent364a71b13ffd73607563dbb5a015a59622f942bf (diff)
lib/i915: add gem_engine_topology library and for_each loop definition
The gem_engine_topology library is a set of functions that interface with the query and getparam/setparam ioctls. The library's access point is the 'intel_init_engine_list()' function that, everytime is called, generates the list of active engines and returns them in a 'struct intel_engine_data'. The structure contains only the engines that are actively present in the GPU. The function can work in both the cases that the query and getparam ioctls are implemented or not by the running kernel. In case they are implemented, a query is made to the driver to fetch the list of active engines. In case they are not implemented, the list is taken from the 'intel_execution_engines2' array and stored only after checking their presence. The gem_engine_topology library provides some iteration helpers: - intel_get_current_engine(): provides the current engine in the iteration. - intel_get_current_physical_engine(): provides the current physical engine, if the current engine is a virtual engine, it moves forward until it finds a physical engine. - intel_next_engine() it just increments the counter so that it points to the next engine. Extend the 'for_each_engine_class_instance' so that it can loop using the new 'intel_init_engine_list()' and rename it to 'for_each_context_engine'. Move '__for_each_engine_class_instance' to gem_engine_topology.h and rename it to '__for_each_static_engine'. Update accordingly tests/perf_pmu.c to use correctly the new for_each loops. Signed-off-by: Andi Shyti <andi.shyti@intel.com>
Diffstat (limited to 'lib/i915/gem_engine_topology.c')
-rw-r--r--lib/i915/gem_engine_topology.c282
1 files changed, 282 insertions, 0 deletions
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
new file mode 100644
index 00000000..7a4a1760
--- /dev/null
+++ b/lib/i915/gem_engine_topology.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "drmtest.h"
+#include "ioctl_wrappers.h"
+
+#include "i915/gem_engine_topology.h"
+
+#define DEFINE_CONTEXT_PARAM(e__, p__, c__, N__) \
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(e__, N__); \
+ struct drm_i915_gem_context_param p__ = { \
+ .param = I915_CONTEXT_PARAM_ENGINES, \
+ .ctx_id = c__, \
+ .size = SIZEOF_CTX_PARAM, \
+ .value = to_user_pointer(&e__), \
+ }
+
+static int __gem_query(int fd, struct drm_i915_query *q)
+{
+ int err = 0;
+
+ if (igt_ioctl(fd, DRM_IOCTL_I915_QUERY, q))
+ err = -errno;
+
+ errno = 0;
+ return err;
+}
+
+static void gem_query(int fd, struct drm_i915_query *q)
+{
+ igt_assert_eq(__gem_query(fd, q), 0);
+}
+
+static void query_engines(int fd,
+ struct drm_i915_query_engine_info *query_engines,
+ int length)
+{
+ struct drm_i915_query_item item = { };
+ struct drm_i915_query query = { };
+
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ query.items_ptr = to_user_pointer(&item);
+ query.num_items = 1;
+ item.length = length;
+
+ item.data_ptr = to_user_pointer(query_engines);
+
+ gem_query(fd, &query);
+}
+
+static void ctx_map_engines(int fd, struct intel_engine_data *ed,
+ struct drm_i915_gem_context_param *param)
+{
+ struct i915_context_param_engines *engines =
+ (struct i915_context_param_engines *) param->value;
+ int i = 0;
+
+ for (typeof(engines->engines[0]) *p =
+ &engines->engines[0];
+ i < ed->nengines; i++, p++) {
+ p->engine_class = ed->engines[i].class;
+ p->engine_instance = ed->engines[i].instance;
+ }
+
+ param->size = offsetof(typeof(*engines), engines[i]);
+ engines->extensions = 0;
+
+ gem_context_set_param(fd, param);
+}
+
+static void init_engine(struct intel_execution_engine2 *e2,
+ int class, int instance, uint64_t flags)
+{
+ const struct intel_execution_engine2 *__e2;
+ static const char *unknown_name = "unknown",
+ *virtual_name = "virtual";
+
+ e2->class = class;
+ e2->instance = instance;
+ e2->flags = flags;
+
+ /* engine is a virtual engine */
+ if (class == I915_ENGINE_CLASS_INVALID) {
+ e2->name = virtual_name;
+ e2->is_virtual = true;
+ return;
+ }
+
+ __for_each_static_engine(__e2)
+ if (__e2->class == class && __e2->instance == instance)
+ break;
+
+ if (__e2->name) {
+ e2->name = __e2->name;
+ } else {
+ igt_warn("found unknown engine (%d, %d)", class, instance);
+ e2->name = unknown_name;
+ }
+
+ /* just to remark it */
+ e2->is_virtual = false;
+}
+
+static void query_engine_list(int fd, struct intel_engine_data *ed)
+{
+ uint8_t buff[SIZEOF_QUERY] = { };
+ struct drm_i915_query_engine_info *query_engine =
+ (struct drm_i915_query_engine_info *) buff;
+ int i;
+
+ query_engines(fd, query_engine, SIZEOF_QUERY);
+
+ for (i = 0; i < query_engine->num_engines; i++)
+ init_engine(&ed->engines[i],
+ query_engine->engines[i].engine.engine_class,
+ query_engine->engines[i].engine.engine_instance, i);
+
+ ed->nengines = query_engine->num_engines;
+}
+
+struct intel_execution_engine2 *
+intel_get_current_engine(struct intel_engine_data *ed)
+{
+ if (!ed->n)
+ ed->current_engine = &ed->engines[0];
+ else if (ed->n >= ed->nengines)
+ ed->current_engine = NULL;
+
+ return ed->current_engine;
+}
+
+void intel_next_engine(struct intel_engine_data *ed)
+{
+ if (ed->n + 1 < ed->nengines) {
+ ed->n++;
+ ed->current_engine = &ed->engines[ed->n];
+ } else {
+ ed->n = ed->nengines;
+ ed->current_engine = NULL;
+ }
+}
+
+struct intel_execution_engine2 *
+intel_get_current_physical_engine(struct intel_engine_data *ed)
+{
+ struct intel_execution_engine2 *e;
+
+ for (e = intel_get_current_engine(ed);
+ e && e->is_virtual;
+ intel_next_engine(ed))
+ ;
+
+ return e;
+}
+
+static int gem_topology_get_param(int fd,
+ struct drm_i915_gem_context_param *p)
+{
+ if (igt_only_list_subtests())
+ return -ENODEV;
+
+ if (__gem_context_get_param(fd, p))
+ return -1; /* using default engine map */
+
+ if (!p->size)
+ return 0;
+
+ p->size = (p->size - sizeof(struct i915_context_param_engines)) /
+ (offsetof(struct i915_context_param_engines,
+ engines[1]) -
+ sizeof(struct i915_context_param_engines));
+
+ igt_assert_f(p->size <= GEM_MAX_ENGINES, "unsupported engine count\n");
+
+ return 0;
+}
+
+struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id)
+{
+ DEFINE_CONTEXT_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+ struct intel_engine_data engine_data = { };
+ int i;
+
+ if (gem_topology_get_param(fd, &param)) {
+ /* if kernel does not support engine/context mapping */
+ const struct intel_execution_engine2 *e2;
+
+ igt_debug("using pre-allocated engine list\n");
+
+ __for_each_static_engine(e2) {
+ struct intel_execution_engine2 *__e2 =
+ &engine_data.engines[engine_data.nengines];
+
+ if (!igt_only_list_subtests()) {
+ __e2->flags = gem_class_instance_to_eb_flags(fd,
+ e2->class, e2->instance);
+
+ if (!gem_has_ring(fd, __e2->flags))
+ continue;
+ } else {
+ __e2->flags = -1; /* 0xfff... */
+ }
+
+ __e2->name = e2->name;
+ __e2->instance = e2->instance;
+ __e2->class = e2->class;
+ __e2->is_virtual = false;
+
+ engine_data.nengines++;
+ }
+ return engine_data;
+ }
+
+ if (!param.size) {
+ query_engine_list(fd, &engine_data);
+ ctx_map_engines(fd, &engine_data, &param);
+ } else {
+ for (i = 0; i < param.size; i++)
+ init_engine(&engine_data.engines[i],
+ engines.engines[i].engine_class,
+ engines.engines[i].engine_instance,
+ i);
+
+ engine_data.nengines = i;
+ }
+
+ return engine_data;
+}
+
+int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
+ struct intel_execution_engine2 *e)
+{
+ DEFINE_CONTEXT_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+
+ if (!e || gem_topology_get_param(fd, &param) || !param.size)
+ return -EINVAL;
+
+ e->class = engines.engines[engine].engine_class;
+ e->instance = engines.engines[engine].engine_instance;
+
+ return 0;
+}
+
+void gem_context_set_all_engines(int fd, uint32_t ctx)
+{
+ DEFINE_CONTEXT_PARAM(engines, param, ctx, GEM_MAX_ENGINES);
+ struct intel_engine_data engine_data = { };
+
+ if (!gem_topology_get_param(fd, &param) && !param.size) {
+ query_engine_list(fd, &engine_data);
+ ctx_map_engines(fd, &engine_data, &param);
+ }
+}
+
+bool gem_has_engine_topology(int fd)
+{
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+
+ return !__gem_context_get_param(fd, &param);
+}