diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h | 106 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 532 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 233 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_uc.c | 14 |
15 files changed, 642 insertions, 523 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h new file mode 100644 index 000000000000..90efef8a73e4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_ACTIONS_ABI_H +#define _ABI_GUC_ACTIONS_ABI_H + +enum intel_guc_action { + INTEL_GUC_ACTION_DEFAULT = 0x0, + INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, + INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, + INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, + INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, + INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, + INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, + INTEL_GUC_ACTION_LIMIT +}; + +enum intel_guc_preempt_options { + INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, + INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, +}; + +enum intel_guc_report_status { + INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, + INTEL_GUC_REPORT_STATUS_ACKED = 0x1, + INTEL_GUC_REPORT_STATUS_ERROR = 0x2, + INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, +}; + +enum intel_guc_sleep_state_status { + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 +#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 +}; + +#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) +#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 +#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) +#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) + +#endif /* _ABI_GUC_ACTIONS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h new file mode 100644 index 000000000000..d38935f47ecf --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H +#define _ABI_GUC_COMMUNICATION_CTB_ABI_H + +#include <linux/types.h> + +/** + * DOC: CTB based communication + * + * The CTB (command transport buffer) communication between Host and GuC + * is based on u32 data stream written to the shared buffer. One buffer can + * be used to transmit data only in one direction (one-directional channel). + * + * Current status of the each buffer is stored in the buffer descriptor. + * Buffer descriptor holds tail and head fields that represents active data + * stream. The tail field is updated by the data producer (sender), and head + * field is updated by the data consumer (receiver):: + * + * +------------+ + * | DESCRIPTOR | +=================+============+========+ + * +============+ | | MESSAGE(s) | | + * | address |--------->+=================+============+========+ + * +------------+ + * | head | ^-----head--------^ + * +------------+ + * | tail | ^---------tail-----------------^ + * +------------+ + * | size | ^---------------size--------------------^ + * +------------+ + * + * Each message in data stream starts with the single u32 treated as a header, + * followed by optional set of u32 data that makes message specific payload:: + * + * +------------+---------+---------+---------+ + * | MESSAGE | + * +------------+---------+---------+---------+ + * | msg[0] | [1] | ... | [n-1] | + * +------------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+ + * | | 0 | ... | n | + * +======+=====+=========+=========+=========+ + * | 31:16| code| | | | + * +------+-----+ | | | + * | 15:5|flags| | | | + * +------+-----+ | | | + * | 4:0| len| | | | + * +------+-----+---------+---------+---------+ + * + * ^-------------len-------------^ + * + * The message header consists of: + * + * - **len**, indicates length of the message payload (in u32) + * - **code**, indicates message code + * - **flags**, holds various bits to control message handling + */ + +/* + * Describes single command transport buffer. + * Used by both guc-master and clients. + */ +struct guc_ct_buffer_desc { + u32 addr; /* gfx address */ + u64 host_private; /* host private data */ + u32 size; /* size in bytes */ + u32 head; /* offset updated by GuC*/ + u32 tail; /* offset updated by owner */ + u32 is_in_error; /* error indicator */ + u32 reserved1; + u32 reserved2; + u32 owner; /* id of the channel owner */ + u32 owner_sub_id; /* owner-defined field for extra tracking */ + u32 reserved[5]; +} __packed; + +/* Type of command transport buffer */ +#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u +#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u + +/* + * Definition of the command transport message header (DW0) + * + * bit[4..0] message len (in dwords) + * bit[7..5] reserved + * bit[8] response (G2H only) + * bit[8] write fence to desc (H2G only) + * bit[9] write status to H2G buff (H2G only) + * bit[10] send status back via G2H (H2G only) + * bit[15..11] reserved + * bit[31..16] action code + */ +#define GUC_CT_MSG_LEN_SHIFT 0 +#define GUC_CT_MSG_LEN_MASK 0x1F +#define GUC_CT_MSG_IS_RESPONSE (1 << 8) +#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) +#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) +#define GUC_CT_MSG_SEND_STATUS (1 << 10) +#define GUC_CT_MSG_ACTION_SHIFT 16 +#define GUC_CT_MSG_ACTION_MASK 0xFFFF + +#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h new file mode 100644 index 000000000000..be066a62e9e0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H +#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H + +/** + * DOC: MMIO based communication + * + * The MMIO based communication between Host and GuC uses software scratch + * registers, where first register holds data treated as message header, + * and other registers are used to hold message payload. + * + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, + * but no H2G command takes more than 8 parameters and the GuC FW + * itself uses an 8-element array to store the H2G message. + * + * +-----------+---------+---------+---------+ + * | MMIO[0] | MMIO[1] | ... | MMIO[n] | + * +-----------+---------+---------+---------+ + * | header | optional payload | + * +======+====+=========+=========+=========+ + * | 31:28|type| | | | + * +------+----+ | | | + * | 27:16|data| | | | + * +------+----+ | | | + * | 15:0|code| | | | + * +------+----+---------+---------+---------+ + * + * The message header consists of: + * + * - **type**, indicates message type + * - **code**, indicates message code, is specific for **type** + * - **data**, indicates message data, optional, depends on **code** + * + * The following message **types** are supported: + * + * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code + * must be priovided in **code** field. Optional action specific parameters + * can be provided in remaining payload registers or **data** field. + * + * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, + * action response status will be provided in **code** field. Optional + * response data can be returned in remaining payload registers or **data** + * field. + */ + +#define GUC_MAX_MMIO_MSG_LEN 8 + +#endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h new file mode 100644 index 000000000000..488b6061ee89 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_ERRORS_ABI_H +#define _ABI_GUC_ERRORS_ABI_H + +enum intel_guc_response_status { + INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, +}; + +#endif /* _ABI_GUC_ERRORS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h new file mode 100644 index 000000000000..775e21f3058c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_MESSAGES_ABI_H +#define _ABI_GUC_MESSAGES_ABI_H + +#define INTEL_GUC_MSG_TYPE_SHIFT 28 +#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) +#define INTEL_GUC_MSG_DATA_SHIFT 16 +#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) +#define INTEL_GUC_MSG_CODE_SHIFT 0 +#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) + +enum intel_guc_msg_type { + INTEL_GUC_MSG_TYPE_REQUEST = 0x0, + INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, +}; + +#endif /* _ABI_GUC_MESSAGES_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index adae04c47aab..f147cb389a20 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -60,15 +60,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc) enum forcewake_domains fw_domains = 0; unsigned int i; - if (INTEL_GEN(gt->i915) >= 11) { - guc->send_regs.base = - i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); - guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; - } else { - guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; - BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); - } + GEM_BUG_ON(!guc->send_regs.base); + GEM_BUG_ON(!guc->send_regs.count); for (i = 0; i < guc->send_regs.count; i++) { fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, @@ -96,12 +89,9 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & - gt->pm_guc_events); - guc->interrupts.enabled = true; - gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - } + WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); + gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); spin_unlock_irq(>->irq_lock); } @@ -112,7 +102,6 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); @@ -134,18 +123,14 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc) static void gen11_enable_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); - intel_uncore_write(gt->uncore, - GEN11_GUC_SG_INTR_ENABLE, events); - intel_uncore_write(gt->uncore, - GEN11_GUC_SG_INTR_MASK, ~events); - guc->interrupts.enabled = true; - } + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_MASK, ~events); spin_unlock_irq(>->irq_lock); } @@ -154,7 +139,6 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc) struct intel_gt *gt = guc_to_gt(guc); spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); @@ -176,16 +160,23 @@ void intel_guc_init_early(struct intel_guc *guc) mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { guc->notify_reg = GEN11_GUC_HOST_INTERRUPT; guc->interrupts.reset = gen11_reset_guc_interrupts; guc->interrupts.enable = gen11_enable_guc_interrupts; guc->interrupts.disable = gen11_disable_guc_interrupts; + guc->send_regs.base = + i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); + guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; + } else { guc->notify_reg = GUC_SEND_INTERRUPT; guc->interrupts.reset = gen9_reset_guc_interrupts; guc->interrupts.enable = gen9_enable_guc_interrupts; guc->interrupts.disable = gen9_disable_guc_interrupts; + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); } } @@ -469,22 +460,6 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, return 0; } -int intel_guc_sample_forcewake(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; - u32 action[2]; - - action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; - /* WaRsDisableCoarsePowerGating:skl,cnl */ - if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - action[1] = 0; - else - /* bit 0 and 1 are for Render and Media domain separately */ - action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - /** * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode * @guc: intel_guc structure diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index bc2ba7d0626c..4abc59f6f3cd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -33,7 +33,6 @@ struct intel_guc { unsigned int msg_enabled_mask; struct { - bool enabled; void (*reset)(struct intel_guc *guc); void (*enable)(struct intel_guc *guc); void (*disable)(struct intel_guc *guc); @@ -128,7 +127,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len); -int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 17526717368c..9abfbc6edbd6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -6,6 +6,7 @@ #include "gt/intel_gt.h" #include "gt/intel_lrc.h" #include "intel_guc_ads.h" +#include "intel_guc_fwif.h" #include "intel_uc.h" #include "i915_drv.h" @@ -104,7 +105,7 @@ static void guc_mapping_table_init(struct intel_gt *gt, GUC_MAX_INSTANCES_PER_CLASS; for_each_engine(engine, gt, id) { - u8 guc_class = engine->class; + u8 guc_class = engine_class_to_guc_class(engine->class); system_info->mapping_table[guc_class][engine->instance] = engine->instance; @@ -124,7 +125,7 @@ static void __guc_ads_init(struct intel_guc *guc) struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; - u8 engine_class; + u8 engine_class, guc_class; /* GuC scheduling policies */ guc_policies_init(&blob->policies); @@ -140,29 +141,32 @@ static void __guc_ads_init(struct intel_guc *guc) for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { if (engine_class == OTHER_CLASS) continue; + + guc_class = engine_class_to_guc_class(engine_class); + /* * TODO: Set context pointer to default state to allow * GuC to re-init guilty contexts after internal reset. */ - blob->ads.golden_context_lrca[engine_class] = 0; - blob->ads.eng_state_size[engine_class] = + blob->ads.golden_context_lrca[guc_class] = 0; + blob->ads.eng_state_size[guc_class] = intel_engine_context_size(guc_to_gt(guc), engine_class) - skipped_size; } /* System info */ - blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1; - blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1; - blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt); - blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt); + blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1; + blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1; + blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt); + blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] = hweight8(gt->info.sseu.slice_mask); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] = gt->info.vdbox_sfc_access; - if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) { + if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) { u32 distdbreg = intel_uncore_read(gt->uncore, GEN12_DIST_DBS_POPULATED); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] = diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index fa9e048cc65f..8f7b148fef58 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -7,14 +7,62 @@ #include "intel_guc_ct.h" #include "gt/intel_gt.h" +static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) +{ + return container_of(ct, struct intel_guc, ct); +} + +static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) +{ + return guc_to_gt(ct_to_guc(ct)); +} + +static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) +{ + return ct_to_gt(ct)->i915; +} + +static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct) +{ + return &ct_to_i915(ct)->drm; +} + #define CT_ERROR(_ct, _fmt, ...) \ - DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) + drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) #ifdef CONFIG_DRM_I915_DEBUG_GUC #define CT_DEBUG(_ct, _fmt, ...) \ - DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) + drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) #else #define CT_DEBUG(...) do { } while (0) #endif +#define CT_PROBE_ERROR(_ct, _fmt, ...) \ + i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__) + +/** + * DOC: CTB Blob + * + * We allocate single blob to hold both CTB descriptors and buffers: + * + * +--------+-----------------------------------------------+------+ + * | offset | contents | size | + * +========+===============================================+======+ + * | 0x0000 | H2G `CTB Descriptor`_ (send) | | + * +--------+-----------------------------------------------+ 4K | + * | 0x0800 | G2H `CTB Descriptor`_ (recv) | | + * +--------+-----------------------------------------------+------+ + * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K | + * | | | | + * +--------+-----------------------------------------------+------+ + * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K | + * | + n*4K | | | + * +--------+-----------------------------------------------+------+ + * + * Size of each `CT Buffer`_ must be multiple of 4K. + * As we don't expect too many messages, for now use minimum sizes. + */ +#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) +#define CTB_H2G_BUFFER_SIZE (SZ_4K) +#define CTB_G2H_BUFFER_SIZE (SZ_4K) struct ct_request { struct list_head link; @@ -24,8 +72,9 @@ struct ct_request { u32 *response_buf; }; -struct ct_incoming_request { +struct ct_incoming_msg { struct list_head link; + u32 size; u32 msg[]; }; @@ -33,6 +82,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 }; enum { CTB_OWNER_HOST = 0 }; +static void ct_receive_tasklet_func(struct tasklet_struct *t); static void ct_incoming_request_worker_func(struct work_struct *w); /** @@ -41,30 +91,13 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { + spin_lock_init(&ct->ctbs.send.lock); + spin_lock_init(&ct->ctbs.recv.lock); spin_lock_init(&ct->requests.lock); INIT_LIST_HEAD(&ct->requests.pending); INIT_LIST_HEAD(&ct->requests.incoming); INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); -} - -static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) -{ - return container_of(ct, struct intel_guc, ct); -} - -static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) -{ - return guc_to_gt(ct_to_guc(ct)); -} - -static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) -{ - return ct_to_gt(ct)->i915; -} - -static inline struct device *ct_to_dev(struct intel_guc_ct *ct) -{ - return ct_to_i915(ct)->drm.dev; + tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func); } static inline const char *guc_ct_buffer_type_to_str(u32 type) @@ -88,11 +121,22 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, desc->owner = CTB_OWNER_HOST; } -static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) +static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr) { - desc->head = 0; - desc->tail = 0; - desc->is_in_error = 0; + guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size); +} + +static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb, + struct guc_ct_buffer_desc *desc, + u32 *cmds, u32 size) +{ + GEM_BUG_ON(size % 4); + + ctb->desc = desc; + ctb->cmds = cmds; + ctb->size = size; + + guc_ct_buffer_reset(ctb, 0); } static int guc_action_register_ct_buffer(struct intel_guc *guc, @@ -153,48 +197,42 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) int intel_guc_ct_init(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); + struct guc_ct_buffer_desc *desc; + u32 blob_size; + u32 cmds_size; void *blob; + u32 *cmds; int err; - int i; GEM_BUG_ON(ct->vma); - /* We allocate 1 page to hold both descriptors and both buffers. - * ___________..................... - * |desc (SEND)| : - * |___________| PAGE/4 - * :___________....................: - * |desc (RECV)| : - * |___________| PAGE/4 - * :_______________________________: - * |cmds (SEND) | - * | PAGE/4 - * |_______________________________| - * |cmds (RECV) | - * | PAGE/4 - * |_______________________________| - * - * Each message can use a maximum of 32 dwords and we don't expect to - * have more than 1 in flight at any time, so we have enough space. - * Some logic further ahead will rely on the fact that there is only 1 - * page and that it is always mapped, so if the size is changed the - * other code will need updating as well. - */ - - err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob); + blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; + err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob); if (unlikely(err)) { - CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err); + CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n", + blob_size, ERR_PTR(err)); return err; } - CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma)); + CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size); - /* store pointers to desc and cmds */ - for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i; - ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; - } + /* store pointers to desc and cmds for send ctb */ + desc = blob; + cmds = blob + 2 * CTB_DESC_SIZE; + cmds_size = CTB_H2G_BUFFER_SIZE; + CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size); + + guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size); + + /* store pointers to desc and cmds for recv ctb */ + desc = blob + CTB_DESC_SIZE; + cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE; + cmds_size = CTB_G2H_BUFFER_SIZE; + CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size); + + guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size); return 0; } @@ -209,6 +247,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) { GEM_BUG_ON(ct->enabled); + tasklet_kill(&ct->receive_tasklet); i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); memset(ct, 0, sizeof(*ct)); } @@ -222,37 +261,38 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - u32 base, cmds, size; + u32 base, cmds; + void *blob; int err; - int i; GEM_BUG_ON(ct->enabled); /* vma should be already allocated and map'ed */ GEM_BUG_ON(!ct->vma); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj)); base = intel_guc_ggtt_offset(guc, ct->vma); - /* (re)initialize descriptors - * cmds buffers are in the second half of the blob page - */ - for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2; - size = PAGE_SIZE / 4; - CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size); - guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size); - } + /* blob should start with send descriptor */ + blob = __px_vaddr(ct->vma->obj); + GEM_BUG_ON(blob != ct->ctbs.send.desc); + + /* (re)initialize descriptors */ + cmds = base + ptrdiff(ct->ctbs.send.cmds, blob); + guc_ct_buffer_reset(&ct->ctbs.send, cmds); + + cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob); + guc_ct_buffer_reset(&ct->ctbs.recv, cmds); /* * Register both CT buffers starting with RECV buffer. * Descriptors are in first half of the blob. */ - err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV, + err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob), INTEL_GUC_CT_BUFFER_TYPE_RECV); if (unlikely(err)) goto err_out; - err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND, + err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob), INTEL_GUC_CT_BUFFER_TYPE_SEND); if (unlikely(err)) goto err_deregister; @@ -264,7 +304,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) err_deregister: ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV); err_out: - CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err); + CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); return err; } @@ -292,6 +332,28 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) return ++ct->requests.last_fence; } +static void write_barrier(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_gt *gt = guc_to_gt(guc); + + if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { + GEM_BUG_ON(guc->send_regs.fw_domains); + /* + * This register is used by the i915 and GuC for MMIO based + * communication. Once we are in this code CTBs are the only + * method the i915 uses to communicate with the GuC so it is + * safe to write to this register (a value of 0 is NOP for MMIO + * communication). If we ever start mixing CTBs and MMIOs a new + * register will have to be chosen. + */ + intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); + } else { + /* wmb() sufficient for a barrier if in smem */ + wmb(); + } +} + /** * DOC: CTB Host to GuC request * @@ -313,14 +375,13 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) static int ct_write(struct intel_guc_ct *ct, const u32 *action, u32 len /* in dwords */, - u32 fence, - bool want_response) + u32 fence) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; + struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head; u32 tail = desc->tail; - u32 size = desc->size; + u32 size = ctb->size; u32 used; u32 header; u32 *cmds = ctb->cmds; @@ -329,7 +390,7 @@ static int ct_write(struct intel_guc_ct *ct, if (unlikely(desc->is_in_error)) return -EPIPE; - if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + if (unlikely(!IS_ALIGNED(head | tail, 4) || (tail | head) >= size)) goto corrupted; @@ -358,8 +419,7 @@ static int ct_write(struct intel_guc_ct *ct, * DW2+: action data */ header = (len << GUC_CT_MSG_LEN_SHIFT) | - (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | - (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | + GUC_CT_MSG_SEND_STATUS | (action[0] << GUC_CT_MSG_ACTION_SHIFT); CT_DEBUG(ct, "writing %*ph %*ph %*ph\n", @@ -377,6 +437,12 @@ static int ct_write(struct intel_guc_ct *ct, } GEM_BUG_ON(tail > size); + /* + * make sure H2G buffer update and LRC tail update (if this triggering a + * submission) are visible before updating the descriptor tail + */ + write_barrier(ct); + /* now update desc tail (back in bytes) */ desc->tail = tail * 4; return 0; @@ -389,56 +455,6 @@ corrupted: } /** - * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. - * @desc: buffer descriptor - * @fence: response fence - * @status: placeholder for status - * - * Guc will update CT buffer descriptor with new fence and status - * after processing the command identified by the fence. Wait for - * specified fence and then read from the descriptor status of the - * command. - * - * Return: - * * 0 response received (status is valid) - * * -ETIMEDOUT no response within hardcoded timeout - * * -EPROTO no response, CT buffer is in error - */ -static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, - u32 fence, - u32 *status) -{ - int err; - - /* - * Fast commands should complete in less than 10us, so sample quickly - * up to that length of time, then switch to a slower sleep-wait loop. - * No GuC command should ever take longer than 10ms. - */ -#define done (READ_ONCE(desc->fence) == fence) - err = wait_for_us(done, 10); - if (err) - err = wait_for(done, 10); -#undef done - - if (unlikely(err)) { - DRM_ERROR("CT: fence %u failed; reported fence=%u\n", - fence, desc->fence); - - if (WARN_ON(desc->is_in_error)) { - /* Something went wrong with the messaging, try to reset - * the buffer and hope for the best - */ - guc_ct_buffer_desc_reset(desc); - err = -EPROTO; - } - } - - *status = desc->status; - return err; -} - -/** * wait_for_ct_request_update - Wait for CT request state update. * @req: pointer to pending request * @status: placeholder for status @@ -481,8 +497,6 @@ static int ct_send(struct intel_guc_ct *ct, u32 response_buf_size, u32 *status) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; - struct guc_ct_buffer_desc *desc = ctb->desc; struct ct_request request; unsigned long flags; u32 fence; @@ -493,26 +507,28 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); + spin_lock_irqsave(&ct->ctbs.send.lock, flags); + fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0; request.response_len = response_buf_size; request.response_buf = response_buf; - spin_lock_irqsave(&ct->requests.lock, flags); + spin_lock(&ct->requests.lock); list_add_tail(&request.link, &ct->requests.pending); - spin_unlock_irqrestore(&ct->requests.lock, flags); + spin_unlock(&ct->requests.lock); + + err = ct_write(ct, action, len, fence); + + spin_unlock_irqrestore(&ct->ctbs.send.lock, flags); - err = ct_write(ct, action, len, fence, !!response_buf); if (unlikely(err)) goto unlink; intel_guc_notify(ct_to_guc(ct)); - if (response_buf) - err = wait_for_ct_request_update(&request, status); - else - err = wait_for_ctb_desc_update(desc, fence, status); + err = wait_for_ct_request_update(&request, status); if (unlikely(err)) goto unlink; @@ -547,7 +563,6 @@ unlink: int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct intel_guc *guc = ct_to_guc(ct); u32 status = ~0; /* undefined */ int ret; @@ -556,8 +571,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, return -ENODEV; } - mutex_lock(&guc->send_mutex); - ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n", @@ -567,7 +580,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, action[0], ret, ret); } - mutex_unlock(&guc->send_mutex); return ret; } @@ -586,22 +598,42 @@ static inline bool ct_header_is_response(u32 header) return !!(header & GUC_CT_MSG_IS_RESPONSE); } -static int ct_read(struct intel_guc_ct *ct, u32 *data) +static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV]; + struct ct_incoming_msg *msg; + + msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC); + if (msg) + msg->size = num_dwords; + return msg; +} + +static void ct_free_msg(struct ct_incoming_msg *msg) +{ + kfree(msg); +} + +/* + * Return: number available remaining dwords to read (0 if empty) + * or a negative error code on failure + */ +static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) +{ + struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv; struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head; u32 tail = desc->tail; - u32 size = desc->size; + u32 size = ctb->size; u32 *cmds = ctb->cmds; s32 available; unsigned int len; unsigned int i; + u32 header; if (unlikely(desc->is_in_error)) return -EPIPE; - if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + if (unlikely(!IS_ALIGNED(head | tail, 4) || (tail | head) >= size)) goto corrupted; @@ -612,8 +644,10 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) /* tail == head condition indicates empty */ available = tail - head; - if (unlikely(available == 0)) - return -ENODATA; + if (unlikely(available == 0)) { + *msg = NULL; + return 0; + } /* beware of buffer wrap case */ if (unlikely(available < 0)) @@ -621,14 +655,14 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail); GEM_BUG_ON(available < 0); - data[0] = cmds[head]; + header = cmds[head]; head = (head + 1) % size; /* message len with header */ - len = ct_header_get_len(data[0]) + 1; + len = ct_header_get_len(header) + 1; if (unlikely(len > (u32)available)) { CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n", - 4, data, + 4, &header, 4 * (head + available - 1 > size ? size - head : available - 1), &cmds[head], 4 * (head + available - 1 > size ? @@ -636,14 +670,27 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) goto corrupted; } + *msg = ct_alloc_msg(len); + if (!*msg) { + CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n", + 4, &header, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + return available; + } + + (*msg)->msg[0] = header; + for (i = 1; i < len; i++) { - data[i] = cmds[head]; + (*msg)->msg[i] = cmds[head]; head = (head + 1) % size; } - CT_DEBUG(ct, "received %*ph\n", 4 * len, data); + CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg); desc->head = head * 4; - return 0; + return available - len; corrupted: CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n", @@ -670,39 +717,39 @@ corrupted: * ^-----------------------len-----------------------^ */ -static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) +static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response) { - u32 header = msg[0]; + u32 header = response->msg[0]; u32 len = ct_header_get_len(header); - u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ u32 fence; u32 status; u32 datalen; struct ct_request *req; + unsigned long flags; bool found = false; + int err = 0; GEM_BUG_ON(!ct_header_is_response(header)); - GEM_BUG_ON(!in_irq()); /* Response payload shall at least include fence and status */ if (unlikely(len < 2)) { - CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); + CT_ERROR(ct, "Corrupted response (len %u)\n", len); return -EPROTO; } - fence = msg[1]; - status = msg[2]; + fence = response->msg[1]; + status = response->msg[2]; datalen = len - 2; /* Format of the status follows RESPONSE message */ if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { - CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); + CT_ERROR(ct, "Corrupted response (status %#x)\n", status); return -EPROTO; } CT_DEBUG(ct, "response fence %u status %#x\n", fence, status); - spin_lock(&ct->requests.lock); + spin_lock_irqsave(&ct->requests.lock, flags); list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { CT_DEBUG(ct, "request %u awaits response\n", @@ -710,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) continue; } if (unlikely(datalen > req->response_len)) { - CT_ERROR(ct, "Response for %u is too long %*ph\n", - req->fence, msgsize, msg); - datalen = 0; + CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n", + req->fence, datalen, req->response_len); + datalen = min(datalen, req->response_len); + err = -EMSGSIZE; } if (datalen) - memcpy(req->response_buf, msg + 3, 4 * datalen); + memcpy(req->response_buf, response->msg + 3, 4 * datalen); req->response_len = datalen; WRITE_ONCE(req->status, status); found = true; break; } - spin_unlock(&ct->requests.lock); + spin_unlock_irqrestore(&ct->requests.lock, flags); + + if (!found) { + CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence); + return -ENOKEY; + } + + if (unlikely(err)) + return err; - if (!found) - CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg); + ct_free_msg(response); return 0; } -static void ct_process_request(struct intel_guc_ct *ct, - u32 action, u32 len, const u32 *payload) +static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) { struct intel_guc *guc = ct_to_guc(ct); + u32 header, action, len; + const u32 *payload; int ret; + header = request->msg[0]; + payload = &request->msg[1]; + action = ct_header_get_action(header); + len = ct_header_get_len(header); + CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: ret = intel_guc_to_host_process_recv_msg(guc, payload, len); - if (unlikely(ret)) - goto fail_unexpected; break; - default: -fail_unexpected: - CT_ERROR(ct, "Unexpected request %x %*ph\n", - action, 4 * len, payload); + ret = -EOPNOTSUPP; break; } + + if (unlikely(ret)) { + CT_ERROR(ct, "Failed to process request %04x (%pe)\n", + action, ERR_PTR(ret)); + return ret; + } + + ct_free_msg(request); + return 0; } static bool ct_process_incoming_requests(struct intel_guc_ct *ct) { unsigned long flags; - struct ct_incoming_request *request; - u32 header; - u32 *payload; + struct ct_incoming_msg *request; bool done; + int err; spin_lock_irqsave(&ct->requests.lock, flags); request = list_first_entry_or_null(&ct->requests.incoming, - struct ct_incoming_request, link); + struct ct_incoming_msg, link); if (request) list_del(&request->link); done = !!list_empty(&ct->requests.incoming); @@ -770,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) if (!request) return true; - header = request->msg[0]; - payload = &request->msg[1]; - ct_process_request(ct, - ct_header_get_action(header), - ct_header_get_len(header), - payload); + err = ct_process_request(ct, request); + if (unlikely(err)) { + CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", + ERR_PTR(err), 4 * request->size, request->msg); + ct_free_msg(request); + } - kfree(request); return done; } @@ -810,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w) * ^-----------------------len-----------------------^ */ -static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) +static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) { - u32 header = msg[0]; - u32 len = ct_header_get_len(header); - u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ - struct ct_incoming_request *request; unsigned long flags; - GEM_BUG_ON(ct_header_is_response(header)); - - request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC); - if (unlikely(!request)) { - CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg); - return 0; /* XXX: -ENOMEM ? */ - } - memcpy(request->msg, msg, msgsize); + GEM_BUG_ON(ct_header_is_response(request->msg[0])); spin_lock_irqsave(&ct->requests.lock, flags); list_add_tail(&request->link, &ct->requests.incoming); @@ -835,28 +887,74 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) return 0; } +static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) +{ + u32 header = msg->msg[0]; + int err; + + if (ct_header_is_response(header)) + err = ct_handle_response(ct, msg); + else + err = ct_handle_request(ct, msg); + + if (unlikely(err)) { + CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", + ERR_PTR(err), 4 * msg->size, msg->msg); + ct_free_msg(msg); + } +} + +/* + * Return: number available remaining dwords to read (0 if empty) + * or a negative error code on failure + */ +static int ct_receive(struct intel_guc_ct *ct) +{ + struct ct_incoming_msg *msg = NULL; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ct->ctbs.recv.lock, flags); + ret = ct_read(ct, &msg); + spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags); + if (ret < 0) + return ret; + + if (msg) + ct_handle_msg(ct, msg); + + return ret; +} + +static void ct_try_receive_message(struct intel_guc_ct *ct) +{ + int ret; + + if (GEM_WARN_ON(!ct->enabled)) + return; + + ret = ct_receive(ct); + if (ret > 0) + tasklet_hi_schedule(&ct->receive_tasklet); +} + +static void ct_receive_tasklet_func(struct tasklet_struct *t) +{ + struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet); + + ct_try_receive_message(ct); +} + /* * When we're communicating with the GuC over CT, GuC uses events * to notify us about new messages being posted on the RECV buffer. */ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { - u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ - int err = 0; - if (unlikely(!ct->enabled)) { WARN(1, "Unexpected GuC event received while CT disabled!\n"); return; } - do { - err = ct_read(ct, msg); - if (err) - break; - - if (ct_header_is_response(msg[0])) - err = ct_handle_response(ct, msg); - else - err = ct_handle_request(ct, msg); - } while (!err); + ct_try_receive_message(ct); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 494a51a5200f..cb222f202301 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -6,6 +6,7 @@ #ifndef _INTEL_GUC_CT_H_ #define _INTEL_GUC_CT_H_ +#include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -27,12 +28,16 @@ struct intel_guc; * record (command transport buffer descriptor) and the actual buffer which * holds the commands. * + * @lock: protects access to the commands buffer and buffer descriptor * @desc: pointer to the buffer descriptor * @cmds: pointer to the commands buffer + * @size: size of the commands buffer */ struct intel_guc_ct_buffer { + spinlock_t lock; struct guc_ct_buffer_desc *desc; u32 *cmds; + u32 size; }; @@ -45,8 +50,13 @@ struct intel_guc_ct { struct i915_vma *vma; bool enabled; - /* buffers for sending(0) and receiving(1) commands */ - struct intel_guc_ct_buffer ctbs[2]; + /* buffers for sending and receiving commands */ + struct { + struct intel_guc_ct_buffer send; + struct intel_guc_ct_buffer recv; + } ctbs; + + struct tasklet_struct receive_tasklet; struct { u32 last_fence; /* last fence used to send request */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 2270d6b3b272..76fe766ad1bc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -30,7 +30,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore) else intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - if (IS_GEN(uncore->i915, 9)) { + if (GRAPHICS_VER(uncore->i915) == 9) { /* DOP Clock Gating Enable for GuC clocks */ intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 79c560d9c0b6..e9a9d85e2aa3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -9,6 +9,13 @@ #include <linux/bits.h> #include <linux/compiler.h> #include <linux/types.h> +#include "gt/intel_engine_types.h" + +#include "abi/guc_actions_abi.h" +#include "abi/guc_errors_abi.h" +#include "abi/guc_communication_mmio_abi.h" +#include "abi/guc_communication_ctb_abi.h" +#include "abi/guc_messages_abi.h" #define GUC_CLIENT_PRIORITY_KMD_HIGH 0 #define GUC_CLIENT_PRIORITY_HIGH 1 @@ -26,6 +33,12 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) +#define GUC_RENDER_CLASS 0 +#define GUC_VIDEO_CLASS 1 +#define GUC_VIDEOENHANCE_CLASS 2 +#define GUC_BLITTER_CLASS 3 +#define GUC_RESERVED_CLASS 4 +#define GUC_LAST_ENGINE_CLASS GUC_RESERVED_CLASS #define GUC_MAX_ENGINE_CLASSES 16 #define GUC_MAX_INSTANCES_PER_CLASS 32 @@ -123,6 +136,25 @@ #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \ (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT) +static inline u8 engine_class_to_guc_class(u8 class) +{ + BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS); + BUILD_BUG_ON(GUC_BLITTER_CLASS != COPY_ENGINE_CLASS); + BUILD_BUG_ON(GUC_VIDEO_CLASS != VIDEO_DECODE_CLASS); + BUILD_BUG_ON(GUC_VIDEOENHANCE_CLASS != VIDEO_ENHANCEMENT_CLASS); + GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS); + + return class; +} + +static inline u8 guc_class_to_engine_class(u8 guc_class) +{ + GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS); + GEM_BUG_ON(guc_class == GUC_RESERVED_CLASS); + + return guc_class; +} + /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; @@ -207,104 +239,6 @@ struct guc_stage_desc { u64 desc_private; } __packed; -/** - * DOC: CTB based communication - * - * The CTB (command transport buffer) communication between Host and GuC - * is based on u32 data stream written to the shared buffer. One buffer can - * be used to transmit data only in one direction (one-directional channel). - * - * Current status of the each buffer is stored in the buffer descriptor. - * Buffer descriptor holds tail and head fields that represents active data - * stream. The tail field is updated by the data producer (sender), and head - * field is updated by the data consumer (receiver):: - * - * +------------+ - * | DESCRIPTOR | +=================+============+========+ - * +============+ | | MESSAGE(s) | | - * | address |--------->+=================+============+========+ - * +------------+ - * | head | ^-----head--------^ - * +------------+ - * | tail | ^---------tail-----------------^ - * +------------+ - * | size | ^---------------size--------------------^ - * +------------+ - * - * Each message in data stream starts with the single u32 treated as a header, - * followed by optional set of u32 data that makes message specific payload:: - * - * +------------+---------+---------+---------+ - * | MESSAGE | - * +------------+---------+---------+---------+ - * | msg[0] | [1] | ... | [n-1] | - * +------------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+ - * | | 0 | ... | n | - * +======+=====+=========+=========+=========+ - * | 31:16| code| | | | - * +------+-----+ | | | - * | 15:5|flags| | | | - * +------+-----+ | | | - * | 4:0| len| | | | - * +------+-----+---------+---------+---------+ - * - * ^-------------len-------------^ - * - * The message header consists of: - * - * - **len**, indicates length of the message payload (in u32) - * - **code**, indicates message code - * - **flags**, holds various bits to control message handling - */ - -/* - * Describes single command transport buffer. - * Used by both guc-master and clients. - */ -struct guc_ct_buffer_desc { - u32 addr; /* gfx address */ - u64 host_private; /* host private data */ - u32 size; /* size in bytes */ - u32 head; /* offset updated by GuC*/ - u32 tail; /* offset updated by owner */ - u32 is_in_error; /* error indicator */ - u32 fence; /* fence updated by GuC */ - u32 status; /* status updated by GuC */ - u32 owner; /* id of the channel owner */ - u32 owner_sub_id; /* owner-defined field for extra tracking */ - u32 reserved[5]; -} __packed; - -/* Type of command transport buffer */ -#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u -#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u - -/* - * Definition of the command transport message header (DW0) - * - * bit[4..0] message len (in dwords) - * bit[7..5] reserved - * bit[8] response (G2H only) - * bit[8] write fence to desc (H2G only) - * bit[9] write status to H2G buff (H2G only) - * bit[10] send status back via G2H (H2G only) - * bit[15..11] reserved - * bit[31..16] action code - */ -#define GUC_CT_MSG_LEN_SHIFT 0 -#define GUC_CT_MSG_LEN_MASK 0x1F -#define GUC_CT_MSG_IS_RESPONSE (1 << 8) -#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) -#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) -#define GUC_CT_MSG_SEND_STATUS (1 << 10) -#define GUC_CT_MSG_ACTION_SHIFT 16 -#define GUC_CT_MSG_ACTION_MASK 0xFFFF - -#define GUC_FORCEWAKE_RENDER (1 << 0) -#define GUC_FORCEWAKE_MEDIA (1 << 1) - #define GUC_POWER_UNSPECIFIED 0 #define GUC_POWER_D0 1 #define GUC_POWER_D1 2 @@ -480,120 +414,17 @@ struct guc_shared_ctx_data { struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; } __packed; -/** - * DOC: MMIO based communication - * - * The MMIO based communication between Host and GuC uses software scratch - * registers, where first register holds data treated as message header, - * and other registers are used to hold message payload. - * - * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, - * but no H2G command takes more than 8 parameters and the GuC FW - * itself uses an 8-element array to store the H2G message. - * - * +-----------+---------+---------+---------+ - * | MMIO[0] | MMIO[1] | ... | MMIO[n] | - * +-----------+---------+---------+---------+ - * | header | optional payload | - * +======+====+=========+=========+=========+ - * | 31:28|type| | | | - * +------+----+ | | | - * | 27:16|data| | | | - * +------+----+ | | | - * | 15:0|code| | | | - * +------+----+---------+---------+---------+ - * - * The message header consists of: - * - * - **type**, indicates message type - * - **code**, indicates message code, is specific for **type** - * - **data**, indicates message data, optional, depends on **code** - * - * The following message **types** are supported: - * - * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code - * must be priovided in **code** field. Optional action specific parameters - * can be provided in remaining payload registers or **data** field. - * - * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, - * action response status will be provided in **code** field. Optional - * response data can be returned in remaining payload registers or **data** - * field. - */ - -#define GUC_MAX_MMIO_MSG_LEN 8 - -#define INTEL_GUC_MSG_TYPE_SHIFT 28 -#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) -#define INTEL_GUC_MSG_DATA_SHIFT 16 -#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) -#define INTEL_GUC_MSG_CODE_SHIFT 0 -#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) - #define __INTEL_GUC_MSG_GET(T, m) \ (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) #define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) #define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) #define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) -enum intel_guc_msg_type { - INTEL_GUC_MSG_TYPE_REQUEST = 0x0, - INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, -}; - #define __INTEL_GUC_MSG_TYPE_IS(T, m) \ (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) #define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) #define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) -enum intel_guc_action { - INTEL_GUC_ACTION_DEFAULT = 0x0, - INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, - INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, - INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, - INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, - INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, - INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, - INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, - INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, - INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, - INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, - INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, - INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, - INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, - INTEL_GUC_ACTION_LIMIT -}; - -enum intel_guc_preempt_options { - INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, - INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, -}; - -enum intel_guc_report_status { - INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, - INTEL_GUC_REPORT_STATUS_ACKED = 0x1, - INTEL_GUC_REPORT_STATUS_ERROR = 0x2, - INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, -}; - -enum intel_guc_sleep_state_status { - INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, - INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, - INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 -#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 -}; - -#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) -#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 -#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) -#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) - -enum intel_guc_response_status { - INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, - INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, -}; - #define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ (typecheck(u32, (m)) && \ ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 335719f17490..7c8ff9792f7b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -432,32 +432,6 @@ void intel_guc_submission_fini(struct intel_guc *guc) } } -static void guc_interrupts_capture(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; - u32 dmask = irqs << 16 | irqs; - - GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); - - /* Don't handle the ctx switch interrupt in GuC submission mode */ - intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0); - intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0); -} - -static void guc_interrupts_release(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; - u32 dmask = irqs << 16 | irqs; - - GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); - - /* Handle ctx switch interrupts again */ - intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask); - intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask); -} - static int guc_context_alloc(struct intel_context *ce) { return lrc_alloc(ce, ce->engine); @@ -648,7 +622,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen8_emit_flush_xcs; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; - if (INTEL_GEN(engine->i915) >= 12) { + if (GRAPHICS_VER(engine->i915) >= 12) { engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; engine->emit_flush = gen12_emit_flush_xcs; } @@ -670,7 +644,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) static void rcs_submission_override(struct intel_engine_cs *engine) { - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 12: engine->emit_flush = gen12_emit_flush_rcs; engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; @@ -700,7 +674,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) * The setup relies on several assumptions (e.g. irqs always enabled) * that are only valid on gen11+ */ - GEM_BUG_ON(INTEL_GEN(i915) < 11); + GEM_BUG_ON(GRAPHICS_VER(i915) < 11); tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet); @@ -722,9 +696,6 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_stage_desc_init(guc); - - /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(guc_to_gt(guc)); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -735,8 +706,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) /* Note: By the time we're here, GuC may have already been reset */ - guc_interrupts_release(gt); - guc_stage_desc_fini(guc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 56d2144dc6a0..fc5387b410a2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -43,7 +43,7 @@ void intel_huc_init_early(struct intel_huc *huc) intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC); - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; huc->status.mask = HUC_LOAD_SUCCESSFUL; huc->status.value = HUC_LOAD_SUCCESSFUL; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 6abb8f2dc33d..6d8b9233214e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -23,7 +23,7 @@ static void uc_expand_default_options(struct intel_uc *uc) return; /* Don't enable GuC/HuC on pre-Gen12 */ - if (INTEL_GEN(i915) < 12) { + if (GRAPHICS_VER(i915) < 12) { i915->params.enable_guc = 0; return; } @@ -467,7 +467,7 @@ static int __uc_init_hw(struct intel_uc *uc) /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN(i915, 9)) + if (GRAPHICS_VER(i915) == 9) attempts = 3; else attempts = 1; @@ -502,10 +502,6 @@ static int __uc_init_hw(struct intel_uc *uc) intel_huc_auth(huc); - ret = intel_guc_sample_forcewake(guc); - if (ret) - goto err_communication; - if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_enable(guc); @@ -529,8 +525,6 @@ static int __uc_init_hw(struct intel_uc *uc) /* * We've failed to load the firmware :( */ -err_communication: - guc_disable_communication(guc); err_log_capture: __uc_capture_load_err_log(uc); err_out: @@ -558,9 +552,6 @@ static void __uc_fini_hw(struct intel_uc *uc) if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_disable(guc); - if (guc_communication_enabled(guc)) - guc_disable_communication(guc); - __uc_sanitize(uc); } @@ -577,7 +568,6 @@ void intel_uc_reset_prepare(struct intel_uc *uc) if (!intel_guc_is_ready(guc)) return; - guc_disable_communication(guc); __uc_sanitize(uc); } |