From e5146b12e2d02af04608301c958d95b2fc47a0f9 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 28 Jul 2016 15:46:47 -0700 Subject: mm, vmscan: add classzone information to tracepoints This is convenient when tracking down why the skip count is high because it'll show what classzone kswapd woke up at and what zones are being isolated. Link: http://lkml.kernel.org/r/1467970510-21195-29-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka Cc: Hillf Danton Acked-by: Johannes Weiner Cc: Joonsoo Kim Cc: Michal Hocko Cc: Minchan Kim Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index b3829c7e3a7d..5eaf83bf11d1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1439,7 +1439,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, if (!list_empty(&pages_skipped)) list_splice(&pages_skipped, src); *nr_scanned = scan; - trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, + trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan, nr_taken, mode, is_file_lru(lru)); for (scan = 0; scan < MAX_NR_ZONES; scan++) { nr_pages = nr_zone_taken[scan]; @@ -2889,7 +2889,8 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, trace_mm_vmscan_direct_reclaim_begin(order, sc.may_writepage, - gfp_mask); + gfp_mask, + sc.reclaim_idx); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); @@ -2920,7 +2921,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.may_writepage, - sc.gfp_mask); + sc.gfp_mask, + sc.reclaim_idx); /* * NOTE: Although we can get the priority field, using it @@ -2968,7 +2970,8 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, trace_mm_vmscan_memcg_reclaim_begin(0, sc.may_writepage, - sc.gfp_mask); + sc.gfp_mask, + sc.reclaim_idx); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); @@ -3386,7 +3389,8 @@ kswapd_try_sleep: * but kcompactd is woken to compact for the original * request (alloc_order). */ - trace_mm_vmscan_kswapd_wake(pgdat->node_id, alloc_order); + trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, + alloc_order); reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); if (reclaim_order < alloc_order) goto kswapd_try_sleep; -- cgit v1.2.3