summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2021-01-19 10:55:19 -0400
committerJason Gunthorpe <jgg@nvidia.com>2021-01-19 10:55:19 -0400
commit02487bcc84cdec676b667096f896ba8ad254aae9 (patch)
treedf66e03b2c63f3488326357882f84b16ed532e1b /mm/page_alloc.c
parenta6dc16b6996388d016df83fb92eae16242ab7ac5 (diff)
parent1368ead04c361bb4595ace9122c79dd75e54a650 (diff)
Merge branch 'devx_set_get' into rdma.git for-next
Leon Romanovsky says: ==================== Be more strict with DEVX get/set operations for the obj_id. ==================== Based on the mlx5-next branch at git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux due to dependencies. * branch 'devx_set_get': RDMA/mlx5: Use strict get/set operations for obj_id RDMA/mlx5: Use the correct obj_id upon DEVX TIR creation net/mlx5: Expose ifc bits for query modify header
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bdbec4c98173..027f6481ba59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
{
struct page *page;
-#ifdef CONFIG_CMA
- /*
- * Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
- */
- if (alloc_flags & ALLOC_CMA &&
- zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
- page = __rmqueue_cma_fallback(zone, order);
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_CMA)) {
+ /*
+ * Balance movable allocations between regular and CMA areas by
+ * allocating from CMA when over half of the zone's free memory
+ * is in the CMA area.
+ */
+ if (alloc_flags & ALLOC_CMA &&
+ zone_page_state(zone, NR_FREE_CMA_PAGES) >
+ zone_page_state(zone, NR_FREE_PAGES) / 2) {
+ page = __rmqueue_cma_fallback(zone, order);
+ if (page)
+ goto out;
+ }
}
-#endif
retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
@@ -2886,8 +2886,9 @@ retry:
alloc_flags))
goto retry;
}
-
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+ if (page)
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}