diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-28 16:19:54 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-09-07 04:28:04 -0300 |
commit | 7b86ac3371b70c3fd8fd95501719beb1faab719f (patch) | |
tree | b7f61e4615d249563f09567a22ee399634c898dd /arch/openrisc | |
parent | a520110e4a15ceb385304d9cab22bb51438f6080 (diff) |
pagewalk: separate function pointers from iterator data
The mm_walk structure currently mixed data and code. Split out the
operations vectors into a new mm_walk_ops structure, and while we are
changing the API also declare the mm_walk structure inside the
walk_page_range and walk_page_vma functions.
Based on patch from Linus Torvalds.
Link: https://lore.kernel.org/r/20190828141955.22210-3-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'arch/openrisc')
-rw-r--r-- | arch/openrisc/kernel/dma.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index c7812e6effa2..4d5b8bd1d795 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -44,6 +44,10 @@ page_set_nocache(pte_t *pte, unsigned long addr, return 0; } +static const struct mm_walk_ops set_nocache_walk_ops = { + .pte_entry = page_set_nocache, +}; + static int page_clear_nocache(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) @@ -59,6 +63,10 @@ page_clear_nocache(pte_t *pte, unsigned long addr, return 0; } +static const struct mm_walk_ops clear_nocache_walk_ops = { + .pte_entry = page_clear_nocache, +}; + /* * Alloc "coherent" memory, which for OpenRISC means simply uncached. * @@ -81,10 +89,6 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, { unsigned long va; void *page; - struct mm_walk walk = { - .pte_entry = page_set_nocache, - .mm = &init_mm - }; page = alloc_pages_exact(size, gfp | __GFP_ZERO); if (!page) @@ -99,7 +103,8 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. */ - if (walk_page_range(va, va + size, &walk)) { + if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, + NULL)) { free_pages_exact(page, size); return NULL; } @@ -112,13 +117,10 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { unsigned long va = (unsigned long)vaddr; - struct mm_walk walk = { - .pte_entry = page_clear_nocache, - .mm = &init_mm - }; /* walk_page_range shouldn't be able to fail here */ - WARN_ON(walk_page_range(va, va + size, &walk)); + WARN_ON(walk_page_range(&init_mm, va, va + size, + &clear_nocache_walk_ops, NULL)); free_pages_exact(vaddr, size); } |