summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-02-08 10:27:37 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2017-02-09 21:53:04 +1100
commit7e487b51b3e6b8de4aa64f7d399ebc91c3d92906 (patch)
treea79e66b27fd70286a14c4b4c7430115eb93eff88 /mm
parent9900747eb5d14f0e45bb95fd4b8b7822b8663765 (diff)
mm: alloc_contig_range: allow to specify GFP mask
Currently alloc_contig_range assumes that the compaction should be done with the default GFP_KERNEL flags. This is probably right for all current uses of this interface, but may change as CMA is used in more use-cases (including being the default DMA memory allocator on some platforms). Change the function prototype, to allow for passing through the GFP mask set by upper layers. Also respect global restrictions by applying memalloc_noio_flags to the passed in flags. Link: http://lkml.kernel.org/r/20170127172328.18574-1-l.stach@pengutronix.de Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Alexander Graf <agraf@suse.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c3
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/page_alloc.c5
3 files changed, 7 insertions, 4 deletions
diff --git a/mm/cma.c b/mm/cma.c
index c960459eda7e..fbd67d866f67 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -407,7 +407,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
+ GFP_KERNEL);
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1b8789a2395d..d0d1d083c432 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1052,7 +1052,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+ GFP_KERNEL);
}
static bool pfn_range_valid_gigantic(struct zone *z,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a962a8a1858d..2325fa09030f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7401,6 +7401,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
+ * @gfp_mask: GFP mask to use during compaction
*
* The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
* aligned, however it's the caller's responsibility to guarantee that
@@ -7414,7 +7415,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* need to be freed with free_contig_range().
*/
int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype)
+ unsigned migratetype, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
unsigned int order;
@@ -7426,7 +7427,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
- .gfp_mask = GFP_KERNEL,
+ .gfp_mask = memalloc_noio_flags(gfp_mask),
};
INIT_LIST_HEAD(&cc.migratepages);