summaryrefslogtreecommitdiff
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 19:42:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 19:42:40 -0800
commit7c225c69f86c934e3be9be63ecde754e286838d7 (patch)
treeff2df419b0c4886b37407235f7d21215e4cf45e4 /mm/mlock.c
parent6363b3f3ac5be096d08c8c504128befa0c033529 (diff)
parent1b7176aea0a924ac59c6a283129d3e8eb00aa915 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc bits - ocfs2 updates - almost all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits) memory hotplug: fix comments when adding section mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP mm: simplify nodemask printing mm,oom_reaper: remove pointless kthread_run() error check mm/page_ext.c: check if page_ext is not prepared writeback: remove unused function parameter mm: do not rely on preempt_count in print_vma_addr mm, sparse: do not swamp log with huge vmemmap allocation failures mm/hmm: remove redundant variable align_end mm/list_lru.c: mark expected switch fall-through mm/shmem.c: mark expected switch fall-through mm/page_alloc.c: broken deferred calculation mm: don't warn about allocations which stall for too long fs: fuse: account fuse_inode slab memory as reclaimable mm, page_alloc: fix potential false positive in __zone_watermark_ok mm: mlock: remove lru_add_drain_all() mm, sysctl: make NUMA stats configurable shmem: convert shmem_init_inodecache() to void Unify migrate_pages and move_pages access checks mm, pagevec: rename pagevec drained field ...
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 46af369c13e5..30472d438794 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -289,7 +289,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
struct pagevec pvec_putback;
int pgrescued = 0;
- pagevec_init(&pvec_putback, 0);
+ pagevec_init(&pvec_putback);
/* Phase 1: page isolation */
spin_lock_irq(zone_lru_lock(zone));
@@ -448,7 +448,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
struct pagevec pvec;
struct zone *zone;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -670,8 +670,6 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
if (!can_do_mlock())
return -EPERM;
- lru_add_drain_all(); /* flush pagevec */
-
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
@@ -798,9 +796,6 @@ SYSCALL_DEFINE1(mlockall, int, flags)
if (!can_do_mlock())
return -EPERM;
- if (flags & MCL_CURRENT)
- lru_add_drain_all(); /* flush pagevec */
-
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;