From cba48b98f2348c814316c4b4f411a07a0e4a2bf9 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 9 Aug 2010 17:18:38 -0700 Subject: mm: change direct call of spin_lock(anon_vma->lock) to inline function Subsitute a direct call of spin_lock(anon_vma->lock) with an inline function doing exactly the same. This makes it easier to do the substitution to the root anon_vma lock in a following patch. We will deal with the handful of special locks (nested, dec_and_lock, etc) separately. Signed-off-by: Rik van Riel Acked-by: Mel Gorman Acked-by: KAMEZAWA Hiroyuki Tested-by: Larry Woodman Acked-by: Larry Woodman Reviewed-by: Minchan Kim Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/ksm.c | 18 +++++++++--------- mm/migrate.c | 2 +- mm/mmap.c | 2 +- mm/rmap.c | 20 ++++++++++---------- 4 files changed, 21 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/mm/ksm.c b/mm/ksm.c index 6c3e99b4ae7..eb9f6806ed5 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -327,7 +327,7 @@ static void drop_anon_vma(struct rmap_item *rmap_item) if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { int empty = list_empty(&anon_vma->head); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); if (empty) anon_vma_free(anon_vma); } @@ -1566,7 +1566,7 @@ again: struct anon_vma_chain *vmac; struct vm_area_struct *vma; - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || @@ -1589,7 +1589,7 @@ again: if (!search_new_forks || !mapcount) break; } - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); if (!mapcount) goto out; } @@ -1619,7 +1619,7 @@ again: struct anon_vma_chain *vmac; struct vm_area_struct *vma; - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || @@ -1637,11 +1637,11 @@ again: ret = try_to_unmap_one(page, vma, rmap_item->address, flags); if (ret != SWAP_AGAIN || !page_mapped(page)) { - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); goto out; } } - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; @@ -1671,7 +1671,7 @@ again: struct anon_vma_chain *vmac; struct vm_area_struct *vma; - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || @@ -1688,11 +1688,11 @@ again: ret = rmap_one(page, vma, rmap_item->address, arg); if (ret != SWAP_AGAIN) { - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); goto out; } } - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; diff --git a/mm/migrate.c b/mm/migrate.c index 4205b1d6049..1855f869917 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -684,7 +684,7 @@ rcu_unlock: /* Drop an anon_vma reference if we took one */ if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { int empty = list_empty(&anon_vma->head); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); if (empty) anon_vma_free(anon_vma); } diff --git a/mm/mmap.c b/mm/mmap.c index e26f1ea7c90..f5db18decc2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2593,7 +2593,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma) if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->head.next)) BUG(); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); } } diff --git a/mm/rmap.c b/mm/rmap.c index 38a336e2eea..b65f00d1707 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -134,7 +134,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) allocated = anon_vma; } - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { @@ -147,7 +147,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) avc = NULL; } spin_unlock(&mm->page_table_lock); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); if (unlikely(allocated)) anon_vma_free(allocated); @@ -170,9 +170,9 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_add_tail(&avc->same_anon_vma, &anon_vma->head); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); } /* @@ -246,12 +246,12 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) if (!anon_vma) return; - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_del(&anon_vma_chain->same_anon_vma); /* We must garbage collect the anon_vma if it's empty */ empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); if (empty) anon_vma_free(anon_vma); @@ -302,7 +302,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); return anon_vma; out: rcu_read_unlock(); @@ -311,7 +311,7 @@ out: void page_unlock_anon_vma(struct anon_vma *anon_vma) { - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); rcu_read_unlock(); } @@ -1389,7 +1389,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, anon_vma = page_anon_vma(page); if (!anon_vma) return ret; - spin_lock(&anon_vma->lock); + anon_vma_lock(anon_vma); list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); @@ -1399,7 +1399,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, if (ret != SWAP_AGAIN) break; } - spin_unlock(&anon_vma->lock); + anon_vma_unlock(anon_vma); return ret; } -- cgit v1.2.3