summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/gup.c12
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c21
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c12
-rw-r--r--arch/powerpc/mm/numa.c3
6 files changed, 29 insertions, 28 deletions
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index fec13200868..d7efdbf640c 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -16,16 +16,6 @@
#ifdef __HAVE_ARCH_PTE_SPECIAL
-static inline void get_huge_page_tail(struct page *page)
-{
- /*
- * __split_huge_page_refcount() cannot run
- * from under us.
- */
- VM_BUG_ON(atomic_read(&page->_count) < 0);
- atomic_inc(&page->_count);
-}
-
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
@@ -57,8 +47,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
put_page(page);
return 0;
}
- if (PageTail(page))
- get_huge_page_tail(page);
pages[*nr] = page;
(*nr)++;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 26b2872b3d0..07f9e9f0d87 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -534,11 +534,11 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void create_section_mapping(unsigned long start, unsigned long end)
+int create_section_mapping(unsigned long start, unsigned long end)
{
- BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+ return htab_bolt_mapping(start, end, __pa(start),
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
- mmu_kernel_ssize));
+ mmu_kernel_ssize);
}
int remove_section_mapping(unsigned long start, unsigned long end)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0b9a5c1901b..da5eb388570 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -390,7 +390,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
{
unsigned long mask;
unsigned long pte_end;
- struct page *head, *page;
+ struct page *head, *page, *tail;
pte_t pte;
int refs;
@@ -413,6 +413,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
head = pte_page(pte);
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ tail = page;
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
@@ -428,10 +429,20 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
/* Could be optimized better */
- while (*nr) {
- put_page(page);
- (*nr)--;
- }
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ /*
+ * Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
}
return 1;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c781bbcf733..95985f286e3 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -123,7 +123,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
pgdata = NODE_DATA(nid);
start = (unsigned long)__va(start);
- create_section_mapping(start, start + size);
+ if (create_section_mapping(start, start + size))
+ return -EINVAL;
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones;
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 3bafc3deca6..4ff587e3825 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
if (!mm || !acop)
return -EINVAL;
- /* We need to make sure mm_users doesn't change */
- down_read(&mm->mmap_sem);
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
if (mm->context.cop_pid == COP_PID_NONE) {
@@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
out:
spin_unlock(mm->context.cop_lockp);
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->page_table_lock);
return ret;
}
@@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
if (WARN_ON_ONCE(!mm))
return;
- /* We need to make sure mm_users doesn't change */
- down_read(&mm->mmap_sem);
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
mm->context.acop &= ~acop;
@@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
}
spin_unlock(mm->context.cop_lockp);
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->page_table_lock);
}
EXPORT_SYMBOL_GPL(drop_cop);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2164006fe17..2c1ae7a5fb5 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1214,11 +1214,12 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
break;
}
- of_node_put(memory);
if (nid >= 0)
break;
}
+ of_node_put(memory);
+
return nid;
}