diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 125 |
1 files changed, 73 insertions, 52 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7a2c89b21115..783913e41f65 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) return false; + if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) + return true; /* * We start only with one section of pages, more pages are added as * needed until the rest of deferred pages are initialized. @@ -1205,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages) /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) { + u8 tag = page_kasan_tag(page + i); page_kasan_tag_reset(page + i); clear_highpage(page + i); + page_kasan_tag_set(page + i, tag); } kasan_enable_current(); } @@ -2860,20 +2864,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; -#ifdef CONFIG_CMA - /* - * Balance movable allocations between regular and CMA areas by - * allocating from CMA when over half of the zone's free memory - * is in the CMA area. - */ - if (alloc_flags & ALLOC_CMA && - zone_page_state(zone, NR_FREE_CMA_PAGES) > - zone_page_state(zone, NR_FREE_PAGES) / 2) { - page = __rmqueue_cma_fallback(zone, order); - if (page) - return page; + if (IS_ENABLED(CONFIG_CMA)) { + /* + * Balance movable allocations between regular and CMA areas by + * allocating from CMA when over half of the zone's free memory + * is in the CMA area. + */ + if (alloc_flags & ALLOC_CMA && + zone_page_state(zone, NR_FREE_CMA_PAGES) > + zone_page_state(zone, NR_FREE_PAGES) / 2) { + page = __rmqueue_cma_fallback(zone, order); + if (page) + goto out; + } } -#endif retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { @@ -2884,8 +2888,9 @@ retry: alloc_flags)) goto retry; } - - trace_mm_page_alloc_zone_locked(page, order, migratetype); +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } @@ -6116,7 +6121,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) * zone stats (e.g., nr_isolate_pageblock) are touched. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, + unsigned long start_pfn, unsigned long zone_end_pfn, enum meminit_context context, struct vmem_altmap *altmap, int migratetype) { @@ -6152,7 +6157,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, if (context == MEMINIT_EARLY) { if (overlap_memmap_init(zone, &pfn)) continue; - if (defer_init(nid, pfn, end_pfn)) + if (defer_init(nid, pfn, zone_end_pfn)) break; } @@ -6266,7 +6271,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid, if (end_pfn > start_pfn) { size = end_pfn - start_pfn; - memmap_init_zone(size, nid, zone, start_pfn, + memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } } @@ -7075,23 +7080,26 @@ void __init free_area_init_memoryless_node(int nid) * Initialize all valid struct pages in the range [spfn, epfn) and mark them * PageReserved(). Return the number of struct pages that were initialized. */ -static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) +static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn, + int zone, int nid) { - unsigned long pfn; + unsigned long pfn, zone_spfn, zone_epfn; u64 pgcnt = 0; + zone_spfn = arch_zone_lowest_possible_pfn[zone]; + zone_epfn = arch_zone_highest_possible_pfn[zone]; + + spfn = clamp(spfn, zone_spfn, zone_epfn); + epfn = clamp(epfn, zone_spfn, zone_epfn); + for (pfn = spfn; pfn < epfn; pfn++) { if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) + pageblock_nr_pages - 1; continue; } - /* - * Use a fake node/zone (0) for now. Some of these pages - * (in memblock.reserved but not in memblock.memory) will - * get re-initialized via reserve_bootmem_region() later. - */ - __init_single_page(pfn_to_page(pfn), pfn, 0, 0); + + __init_single_page(pfn_to_page(pfn), pfn, zone, nid); __SetPageReserved(pfn_to_page(pfn)); pgcnt++; } @@ -7100,51 +7108,64 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) } /* - * Only struct pages that are backed by physical memory are zeroed and - * initialized by going through __init_single_page(). But, there are some - * struct pages which are reserved in memblock allocator and their fields - * may be accessed (for example page_to_pfn() on some configuration accesses - * flags). We must explicitly initialize those struct pages. + * Only struct pages that correspond to ranges defined by memblock.memory + * are zeroed and initialized by going through __init_single_page() during + * memmap_init(). + * + * But, there could be struct pages that correspond to holes in + * memblock.memory. This can happen because of the following reasons: + * - phyiscal memory bank size is not necessarily the exact multiple of the + * arbitrary section size + * - early reserved memory may not be listed in memblock.memory + * - memory layouts defined with memmap= kernel parameter may not align + * nicely with memmap sections * - * This function also addresses a similar issue where struct pages are left - * uninitialized because the physical address range is not covered by - * memblock.memory or memblock.reserved. That could happen when memblock - * layout is manually configured via memmap=, or when the highest physical - * address (max_pfn) does not end on a section boundary. + * Explicitly initialize those struct pages so that: + * - PG_Reserved is set + * - zone link is set accorging to the architecture constrains + * - node is set to node id of the next populated region except for the + * trailing hole where last node id is used */ -static void __init init_unavailable_mem(void) +static void __init init_zone_unavailable_mem(int zone) { - phys_addr_t start, end; - u64 i, pgcnt; - phys_addr_t next = 0; + unsigned long start, end; + int i, nid; + u64 pgcnt; + unsigned long next = 0; /* - * Loop through unavailable ranges not covered by memblock.memory. + * Loop through holes in memblock.memory and initialize struct + * pages corresponding to these holes */ pgcnt = 0; - for_each_mem_range(i, &start, &end) { + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { if (next < start) - pgcnt += init_unavailable_range(PFN_DOWN(next), - PFN_UP(start)); + pgcnt += init_unavailable_range(next, start, zone, nid); next = end; } /* - * Early sections always have a fully populated memmap for the whole - * section - see pfn_valid(). If the last section has holes at the - * end and that section is marked "online", the memmap will be - * considered initialized. Make sure that memmap has a well defined - * state. + * Last section may surpass the actual end of memory (e.g. we can + * have 1Gb section and 512Mb of RAM pouplated). + * Make sure that memmap has a well defined state in this case. */ - pgcnt += init_unavailable_range(PFN_DOWN(next), - round_up(max_pfn, PAGES_PER_SECTION)); + end = round_up(max_pfn, PAGES_PER_SECTION); + pgcnt += init_unavailable_range(next, end, zone, nid); /* * Struct pages that do not have backing memory. This could be because * firmware is using some of this memory, or for some other reasons. */ if (pgcnt) - pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); + pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt); +} + +static void __init init_unavailable_mem(void) +{ + int zone; + + for (zone = 0; zone < ZONE_MOVABLE; zone++) + init_zone_unavailable_mem(zone); } #else static inline void __init init_unavailable_mem(void) |