diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-01-21 19:36:59 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-01-21 19:36:59 +0100 |
commit | dd7d99dc688d0fc448976f52f8517fbdeccdccda (patch) | |
tree | 4ad59de5d584cf03e772ee153260c24ba0d717bd /include/asm-generic/cacheflush.h | |
parent | 42bbdd99221bac206dde2b5e87098177fcd2a48e (diff) | |
parent | def9d2780727cec3313ed3522d0123158d87224d (diff) |
Merge 5.5-rc7 into usb-next
We need the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/asm-generic/cacheflush.h')
-rw-r--r-- | include/asm-generic/cacheflush.h | 33 |
1 files changed, 32 insertions, 1 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index a950a22c4890..cac7404b2bdd 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -11,71 +11,102 @@ * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ +#ifndef flush_cache_all static inline void flush_cache_all(void) { } +#endif +#ifndef flush_cache_mm static inline void flush_cache_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_dup_mm static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_range static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_page static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } +#endif +#ifndef flush_dcache_page static inline void flush_dcache_page(struct page *page) { } +#endif +#ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +#endif +#ifndef flush_dcache_mmap_unlock static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +#endif +#ifndef flush_icache_range static inline void flush_icache_range(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { } +#endif +#ifndef flush_icache_user_range static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { } +#endif +#ifndef flush_cache_vmap static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +#endif -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) +#endif + +#ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) +#endif #endif /* __ASM_CACHEFLUSH_H */ |