summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@oracle.com>2017-02-08 10:44:11 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2017-02-17 16:03:17 +1100
commit5ba8974f69c28a4cd3586d66e5c376760da0f94b (patch)
tree5283f96299856f232e2e9f45b100f289742b93ba /kernel
parent803d7fc24ac8eb2bd495eb271e0989b4c1803d3d (diff)
mm: add new mmgrab() helper
Apart from adding the helper function itself, the rest of the kernel is converted mechanically using: git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/' git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. (Michal Hocko provided most of the kerneldoc comment.) Link: http://lkml.kernel.org/r/20161218123229.22952-1-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/sched/core.c4
3 files changed, 4 insertions, 4 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 90b09ca35c84..8a768a3672a5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -539,7 +539,7 @@ static void exit_mm(void)
__set_current_state(TASK_RUNNING);
down_read(&mm->mmap_sem);
}
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
diff --git a/kernel/futex.c b/kernel/futex.c
index cdf365036141..b687cb22301c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared)
static inline void futex_get_mm(union futex_key *key)
{
- atomic_inc(&key->private.mm->mm_count);
+ mmgrab(key->private.mm);
/*
* Ensure futex_get_mm() implies a full barrier such that
* get_futex_key() implies a full barrier. This is relied upon
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 34e2291a9a6c..c7ded3311ec4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2844,7 +2844,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
if (!mm) {
next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
+ mmgrab(oldmm);
enter_lazy_tlb(oldmm, next);
} else
switch_mm_irqs_off(oldmm, mm, next);
@@ -6095,7 +6095,7 @@ void __init sched_init(void)
/*
* The boot idle thread does lazy MMU switching as well:
*/
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
enter_lazy_tlb(&init_mm, current);
/*