summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-04-30 09:56:41 +0200
committerIngo Molnar <mingo@elte.hu>2010-04-30 09:56:44 +0200
commit3ca50496c2677a2b3fdd3ede86660fd1433beac6 (patch)
tree97a76d8479a8d8a96e04ed0694b8dbf89457bfcc /mm
parent462b04e28a7ec1339c892117c3f20a40e55d0e83 (diff)
parent66f41d4c5c8a5deed66fdcc84509376c9a0bf9d8 (diff)
Merge commit 'v2.6.34-rc6' into perf/core
Merge reason: update to the latest -rc. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c34
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/ksm.c12
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/rmap.c9
6 files changed, 53 insertions, 14 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f13e067e146..707d0dc6da0 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -11,6 +11,8 @@
#include <linux/writeback.h>
#include <linux/device.h>
+static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
}
@@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = {
};
EXPORT_SYMBOL_GPL(default_backing_dev_info);
+struct backing_dev_info noop_backing_dev_info = {
+ .name = "noop",
+};
+EXPORT_SYMBOL_GPL(noop_backing_dev_info);
+
static struct class *bdi_class;
/*
@@ -715,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(bdi_destroy);
+/*
+ * For use from filesystems to quickly init and register a bdi associated
+ * with dirty writeback
+ */
+int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+ unsigned int cap)
+{
+ char tmp[32];
+ int err;
+
+ bdi->name = name;
+ bdi->capabilities = cap;
+ err = bdi_init(bdi);
+ if (err)
+ return err;
+
+ sprintf(tmp, "%.28s%s", name, "-%d");
+ err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
+ if (err) {
+ bdi_destroy(bdi);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bdi_setup_and_register);
+
static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6034dc9e979..ffbdfc86aed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page)
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
+ page->mapping = NULL;
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
@@ -2447,8 +2448,10 @@ retry:
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- } else
+ } else {
lock_page(page);
+ page->mapping = HUGETLB_POISON;
+ }
}
/*
diff --git a/mm/ksm.c b/mm/ksm.c
index 8cdfc2a1e8b..956880f2ff4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
do {
cond_resched();
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma->vm_mm, vma, addr,
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
goto out;
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
goto out;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
tree_page = get_mergeable_page(tree_rmap_item);
- if (!tree_page)
+ if (IS_ERR_OR_NULL(tree_page))
return NULL;
/*
@@ -1294,7 +1294,7 @@ next_mm:
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (*page && PageAnon(*page)) {
+ if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
@@ -1308,7 +1308,7 @@ next_mm:
up_read(&mm->mmap_sem);
return rmap_item;
}
- if (*page)
+ if (!IS_ERR_OR_NULL(*page))
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
@@ -1367,7 +1367,7 @@ next_mm:
static void ksm_do_scan(unsigned int scan_npages)
{
struct rmap_item *rmap_item;
- struct page *page;
+ struct page *uninitialized_var(page);
while (scan_npages--) {
cond_resched();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f4ede99c8b9..6c755de385f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2429,11 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
}
unlock_page_cgroup(pc);
+ *ptr = mem;
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
css_put(&mem->css);
}
- *ptr = mem;
return ret;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f90ea92f755..456ec6f2788 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1977,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
return 0;
/* Clean everything up if vma_adjust failed. */
- new->vm_ops->close(new);
+ if (new->vm_ops && new->vm_ops->close)
+ new->vm_ops->close(new);
if (new->vm_file) {
if (vma->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm);
diff --git a/mm/rmap.c b/mm/rmap.c
index 526704e8215..07fc9475879 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
goto out_enomem_free_avc;
allocated = anon_vma;
}
- spin_lock(&anon_vma->lock);
+ spin_lock(&anon_vma->lock);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma)
list_add(&avc->same_vma, &vma->anon_vma_chain);
list_add(&avc->same_anon_vma, &anon_vma->head);
allocated = NULL;
+ avc = NULL;
}
spin_unlock(&mm->page_table_lock);
-
spin_unlock(&anon_vma->lock);
- if (unlikely(allocated)) {
+
+ if (unlikely(allocated))
anon_vma_free(allocated);
+ if (unlikely(avc))
anon_vma_chain_free(avc);
- }
}
return 0;