From 93b90414c33f59b7960bc8d607da0ce83377e021 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 3 Dec 2019 12:10:13 +0000 Subject: arm64: mm: Fix initialisation of DMA zones on non-NUMA systems John reports that the recently merged commit 1a8e1cef7603 ("arm64: use both ZONE_DMA and ZONE_DMA32") breaks the boot on his DB845C board: | Booting Linux on physical CPU 0x0000000000 [0x517f803c] | Linux version 5.4.0-mainline-10675-g957a03b9e38f | Machine model: Thundercomm Dragonboard 845c | [...] | Built 1 zonelists, mobility grouping on. Total pages: -188245 | Kernel command line: earlycon | firmware_class.path=/vendor/firmware/ androidboot.hardware=db845c | init=/init androidboot.boot_devices=soc/1d84000.ufshc | printk.devkmsg=on buildvariant=userdebug root=/dev/sda2 | androidboot.bootdevice=1d84000.ufshc androidboot.serialno=c4e1189c | androidboot.baseband=sda | msm_drm.dsi_display0=dsi_lt9611_1080_video_display: | androidboot.slot_suffix=_a skip_initramfs rootwait ro init=/init | | This is because, when CONFIG_NUMA=n, zone_sizes_init() fails to handle memblocks that fall entirely within the ZONE_DMA region and erroneously ends up trying to add a negatively-sized region into the following ZONE_DMA32, which is later interpreted as a large unsigned region by the core MM code. Rework the non-NUMA implementation of zone_sizes_init() so that the start address of the memblock being processed is adjusted according to the end of the previous zone, which is then range-checked before updating the hole information of subsequent zones. Cc: Nicolas Saenz Julienne Cc: Christoph Hellwig Cc: Bjorn Andersson Link: https://lore.kernel.org/lkml/CALAqxLVVcsmFrDKLRGRq7GewcW405yTOxG=KR3csVzQ6bXutkA@mail.gmail.com Fixes: 1a8e1cef7603 ("arm64: use both ZONE_DMA and ZONE_DMA32") Reported-by: John Stultz Tested-by: John Stultz Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index be9481cdf3b9..b65dffdfb201 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long max_dma32 = min; - unsigned long __maybe_unused max_dma = min; + unsigned long __maybe_unused max_dma, max_dma32; memset(zone_size, 0, sizeof(zone_size)); + max_dma = max_dma32 = min; #ifdef CONFIG_ZONE_DMA - max_dma = PFN_DOWN(arm64_dma_phys_limit); + max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; - max_dma32 = max_dma; #endif #ifdef CONFIG_ZONE_DMA32 max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); @@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); - if (start >= max) - continue; #ifdef CONFIG_ZONE_DMA - if (start < max_dma) { - unsigned long dma_end = min_not_zero(end, max_dma); + if (start >= min && start < max_dma) { + unsigned long dma_end = min(end, max_dma); zhole_size[ZONE_DMA] -= dma_end - start; + start = dma_end; } #endif #ifdef CONFIG_ZONE_DMA32 - if (start < max_dma32) { + if (start >= max_dma && start < max_dma32) { unsigned long dma32_end = min(end, max_dma32); - unsigned long dma32_start = max(start, max_dma); - zhole_size[ZONE_DMA32] -= dma32_end - dma32_start; + zhole_size[ZONE_DMA32] -= dma32_end - start; + start = dma32_end; } #endif - if (end > max_dma32) { + if (start >= max_dma32 && start < max) { unsigned long normal_end = min(end, max); - unsigned long normal_start = max(start, max_dma32); - zhole_size[ZONE_NORMAL] -= normal_end - normal_start; + zhole_size[ZONE_NORMAL] -= normal_end - start; } } -- cgit v1.2.3 From cba779d80a5d4ccb8bdeb799abd02bf7ba9be111 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 21 Nov 2019 13:51:32 +0000 Subject: arm64: mm: Fix column alignment for UXN in kernel_page_tables UXN is the only individual PTE bit other than the PTE_ATTRINDX_MASK ones which doesn't have both a set and a clear value provided, meaning that the columns in the table won't all be aligned. The PTE_ATTRINDX_MASK values are all both mutually exclusive and longer so are listed last to make a single final column for those values. Ensure everything is aligned by providing a clear value for UXN. Acked-by: Mark Rutland Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/mm/dump.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 93f9f77582ae..0a920b538a89 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_UXN, .val = PTE_UXN, .set = "UXN", + .clear = " ", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), -- cgit v1.2.3