summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArve Hjønnevåg <arve@android.com>2009-02-17 14:51:02 -0800
committerColin Cross <ccross@android.com>2012-04-09 13:53:12 -0700
commitcde8949795629dc5e9e3781fad26afc8dd8d767b (patch)
treef9fda0a83fb6111bd50dbe8fb1d4663319fe8400
parented275aa26f3bcde6da3356755ae5ad8eeab7444f (diff)
mm: Add min_free_order_shift tunable.
By default the kernel tries to keep half as much memory free at each order as it does for one order below. This can be too agressive when running without swap. Change-Id: I5efc1a0b50f41ff3ac71e92d2efd175dedd54ead Signed-off-by: Arve Hjønnevåg <arve@android.com>
-rw-r--r--kernel/sysctl.c8
-rw-r--r--mm/page_alloc.c3
2 files changed, 10 insertions, 1 deletions
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4ab11879aeb..49f47258272 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -102,6 +102,7 @@ extern char core_pattern[];
extern unsigned int core_pipe_limit;
extern int pid_max;
extern int min_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
@@ -1199,6 +1200,13 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a712fb9e04c..dbd52e2449a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -192,6 +192,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -1565,7 +1566,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
- min >>= 1;
+ min >>= min_free_order_shift;
if (free_pages <= min)
return false;