summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h17
1 files changed, 8 insertions, 9 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c27fb1faffe5..fb3bf696c05e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -266,6 +266,8 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+#define ANON_AND_FILE 2
+
enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages
* backed by a congested BDI
@@ -283,8 +285,8 @@ struct lruvec {
unsigned long file_cost;
/* Non-resident age, driven by LRU movement */
atomic_long_t nonresident_age;
- /* Refaults at the time of last reclaim cycle, anon=0, file=1 */
- unsigned long refaults[2];
+ /* Refaults at the time of last reclaim cycle */
+ unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
#ifdef CONFIG_MEMCG
@@ -441,6 +443,8 @@ enum zone_type {
#ifndef __GENERATING_BOUNDS_H
+#define ASYNC_AND_SYNC 2
+
struct zone {
/* Read-mostly fields */
@@ -560,8 +564,8 @@ struct zone {
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn;
- /* pfn where async and sync compaction migration scanner should start */
- unsigned long compact_cached_migrate_pfn[2];
+ /* pfn where compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
unsigned long compact_init_migrate_pfn;
unsigned long compact_init_free_pfn;
#endif
@@ -1416,7 +1420,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif
-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
@@ -1436,10 +1439,6 @@ struct mminit_pfnnid_cache {
int last_nid;
};
-#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
-#endif
-
/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.