From 938b793fdede518e10c67dc1dcfba1804faf98a6 Mon Sep 17 00:00:00 2001 From: Yuri Nudelman Date: Sun, 6 Jun 2021 10:28:51 +0300 Subject: habanalabs: expose state dump To improve the user's ability to debug the case where a workload that is part of executing training/inference of a topology is getting stuck, we need to add a 'core dump' each time a CS times-out. The 'core dump' shall contain all relevant Sync Manager information and corresponding fence values. The most recent dumps shall be accessible via debugfs, under 'state_dump' node. Reading from the node will provide the oldest dump available. Writing an integer value X will discard X dumps, starting with the oldest one, i.e. subsequent read will now return newer dumps. Signed-off-by: Yuri Nudelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- Documentation/ABI/testing/debugfs-driver-habanalabs | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'Documentation') diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index a5c28f606865..e29156511388 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -215,6 +215,17 @@ Description: Sets the skip reset on timeout option for the device. Value of "0" means device will be reset in case some CS has timed out, otherwise it will not be reset. +What: /sys/kernel/debug/habanalabs/hl/state_dump +Date: Oct 2021 +KernelVersion: 5.15 +Contact: ynudelman@habana.ai +Description: Gets the state dump occurring on a CS timeout or failure. + State dump is used for debug and is created each time in case of + a problem in a CS execution, before reset. + Reading from the node returns the newest state dump available. + Writing an integer X discards X state dumps, so that the + next read would return X+1-st newest state dump. + What: /sys/kernel/debug/habanalabs/hl/stop_on_err Date: Mar 2020 KernelVersion: 5.6 -- cgit v1.2.3 From 89b213657c71aa292921f72a81d405679b1ea8df Mon Sep 17 00:00:00 2001 From: Yuri Nudelman Date: Thu, 29 Jul 2021 11:44:28 +0300 Subject: habanalabs: add userptr_lookup node in debugfs It is useful to have the ability to see which user address was pinned to which physical address during the initial mapping. We already have all that info stored, but no means to search this data (which may be quite large). Signed-off-by: Yuri Nudelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../ABI/testing/debugfs-driver-habanalabs | 8 +++ drivers/misc/habanalabs/common/debugfs.c | 72 ++++++++++++++++++++++ drivers/misc/habanalabs/common/habanalabs.h | 19 ++++++ drivers/misc/habanalabs/common/memory.c | 21 +------ 4 files changed, 101 insertions(+), 19 deletions(-) (limited to 'Documentation') diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index e29156511388..284e2dfa61cd 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -241,6 +241,14 @@ Description: Displays a list with information about the currently user pointers (user virtual addresses) that are pinned and mapped to DMA addresses +What: /sys/kernel/debug/habanalabs/hl/userptr_lookup +Date: Aug 2021 +KernelVersion: 5.15 +Contact: ogabbay@kernel.org +Description: Allows to search for specific user pointers (user virtual + addresses) that are pinned and mapped to DMA addresses, and see + their resolution to the specific dma address. + What: /sys/kernel/debug/habanalabs/hl/vm Date: Jan 2019 KernelVersion: 5.1 diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 6a7df57883d9..985f1f3dbd20 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -320,6 +320,77 @@ static int vm_show(struct seq_file *s, void *data) return 0; } +static int userptr_lookup_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct scatterlist *sg; + struct hl_userptr *userptr; + bool first = true; + u64 total_npages, npages, sg_start, sg_end; + dma_addr_t dma_addr; + int i; + + spin_lock(&dev_entry->userptr_spinlock); + + list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) { + if (dev_entry->userptr_lookup >= userptr->addr && + dev_entry->userptr_lookup < userptr->addr + userptr->size) { + total_npages = 0; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, + i) { + npages = hl_get_sg_info(sg, &dma_addr); + sg_start = userptr->addr + + total_npages * PAGE_SIZE; + sg_end = userptr->addr + + (total_npages + npages) * PAGE_SIZE; + + if (dev_entry->userptr_lookup >= sg_start && + dev_entry->userptr_lookup < sg_end) { + dma_addr += (dev_entry->userptr_lookup - + sg_start); + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " user virtual address dma address pid region start region size\n"); + seq_puts(s, "---------------------------------------------------------------------------------------\n"); + } + seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n", + dev_entry->userptr_lookup, + (u64)dma_addr, userptr->pid, + userptr->addr, userptr->size); + } + total_npages += npages; + } + } + } + + spin_unlock(&dev_entry->userptr_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t userptr_lookup_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct seq_file *s = file->private_data; + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + ssize_t rc; + u64 value; + + rc = kstrtoull_from_user(buf, count, 16, &value); + if (rc) + return rc; + + dev_entry->userptr_lookup = value; + + return count; +} + static int mmu_show(struct seq_file *s, void *data) { struct hl_debugfs_entry *entry = s->private; @@ -1175,6 +1246,7 @@ static const struct hl_info_list hl_debugfs_list[] = { {"command_submission_jobs", command_submission_jobs_show, NULL}, {"userptr", userptr_show, NULL}, {"vm", vm_show, NULL}, + {"userptr_lookup", userptr_lookup_show, userptr_lookup_write}, {"mmu", mmu_show, mmu_asid_va_write}, {"engines", engines_show, NULL} }; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index c4a482bdfb7c..1ca3a920ea6d 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1816,6 +1816,7 @@ struct hl_debugfs_entry { * @state_dump_sem: protects state_dump. * @addr: next address to read/write from/to in read/write32. * @mmu_addr: next virtual address to translate to physical address in mmu_show. + * @userptr_lookup: the target user ptr to look up for on demand. * @mmu_asid: ASID to use while translating in mmu_show. * @state_dump_head: index of the latest state dump * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read. @@ -1843,6 +1844,7 @@ struct hl_dbg_device_entry { struct rw_semaphore state_dump_sem; u64 addr; u64 mmu_addr; + u64 userptr_lookup; u32 mmu_asid; u32 state_dump_head; u8 i2c_bus; @@ -2647,6 +2649,23 @@ struct hl_ioctl_desc { * Kernel module functions that can be accessed by entire module */ +/** + * hl_get_sg_info() - get number of pages and the DMA address from SG list. + * @sg: the SG list. + * @dma_addr: pointer to DMA address to return. + * + * Calculate the number of consecutive pages described by the SG list. Take the + * offset of the address in the first page, add to it the length and round it up + * to the number of needed pages. + */ +static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) +{ + *dma_addr = sg_dma_address(sg); + + return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; +} + /** * hl_mem_area_inside_range() - Checks whether address+size are inside a range. * @address: The start address of the area we want to validate. diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 908a3277abaa..b64f87b5e05c 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -796,23 +796,6 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, return rc; } -/** - * get_sg_info() - get number of pages and the DMA address from SG list. - * @sg: the SG list. - * @dma_addr: pointer to DMA address to return. - * - * Calculate the number of consecutive pages described by the SG list. Take the - * offset of the address in the first page, add to it the length and round it up - * to the number of needed pages. - */ -static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) -{ - *dma_addr = sg_dma_address(sg); - - return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + - (PAGE_SIZE - 1)) >> PAGE_SHIFT; -} - /** * init_phys_pg_pack_from_userptr() - initialize physical page pack from host * memory @@ -863,7 +846,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, */ total_npages = 0; for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { - npages = get_sg_info(sg, &dma_addr); + npages = hl_get_sg_info(sg, &dma_addr); total_npages += npages; @@ -892,7 +875,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, j = 0; for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { - npages = get_sg_info(sg, &dma_addr); + npages = hl_get_sg_info(sg, &dma_addr); /* align down to physical page size and save the offset */ if (first) { -- cgit v1.2.3