diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 14:38:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 14:38:01 -0700 |
commit | d42f323a7df0b298c07313db00b44b78555ca8e6 (patch) | |
tree | e9ac2b9f20fed683ff78b294c3792acb157787e5 /mm/filemap.c | |
parent | 65ec0a7d24913b146cd1500d759b8c340319d55e (diff) | |
parent | 4d75136be8bf3ae01b0bc3e725b2cdc921e103bd (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
"A few misc subsystems and some of MM.
175 patches.
Subsystems affected by this patch series: ia64, kbuild, scripts, sh,
ocfs2, kfifo, vfs, kernel/watchdog, and mm (slab-generic, slub,
kmemleak, debug, pagecache, msync, gup, memremap, memcg, pagemap,
mremap, dma, sparsemem, vmalloc, documentation, kasan, initialization,
pagealloc, and memory-failure)"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (175 commits)
mm/memory-failure: unnecessary amount of unmapping
mm/mmzone.h: fix existing kernel-doc comments and link them to core-api
mm: page_alloc: ignore init_on_free=1 for debug_pagealloc=1
net: page_pool: use alloc_pages_bulk in refill code path
net: page_pool: refactor dma_map into own function page_pool_dma_map
SUNRPC: refresh rq_pages using a bulk page allocator
SUNRPC: set rq_page_end differently
mm/page_alloc: inline __rmqueue_pcplist
mm/page_alloc: optimize code layout for __alloc_pages_bulk
mm/page_alloc: add an array-based interface to the bulk page allocator
mm/page_alloc: add a bulk page allocator
mm/page_alloc: rename alloced to allocated
mm/page_alloc: duplicate include linux/vmalloc.h
mm, page_alloc: avoid page_to_pfn() in move_freepages()
mm/Kconfig: remove default DISCONTIGMEM_MANUAL
mm: page_alloc: dump migrate-failed pages
mm/mempolicy: fix mpol_misplaced kernel-doc
mm/mempolicy: rewrite alloc_pages_vma documentation
mm/mempolicy: rewrite alloc_pages documentation
mm/mempolicy: rename alloc_pages_current to alloc_pages
...
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 61 |
1 files changed, 47 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 151090fdcf29..5be57ba01d33 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -636,6 +636,49 @@ static bool mapping_needs_writeback(struct address_space *mapping) } /** + * filemap_range_needs_writeback - check if range potentially needs writeback + * @mapping: address space within which to check + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Find at least one page in the range supplied, usually used to check if + * direct writing in this range will trigger a writeback. Used by O_DIRECT + * read/write with IOCB_NOWAIT, to see if the caller needs to do + * filemap_write_and_wait_range() before proceeding. + * + * Return: %true if the caller should do filemap_write_and_wait_range() before + * doing O_DIRECT to a page in this range, %false otherwise. + */ +bool filemap_range_needs_writeback(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); + pgoff_t max = end_byte >> PAGE_SHIFT; + struct page *page; + + if (!mapping_needs_writeback(mapping)) + return false; + if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && + !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + return false; + if (end_byte < start_byte) + return false; + + rcu_read_lock(); + xas_for_each(&xas, page, max) { + if (xas_retry(&xas, page)) + continue; + if (xa_is_value(page)) + continue; + if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) + break; + } + rcu_read_unlock(); + return page != NULL; +} +EXPORT_SYMBOL_GPL(filemap_range_needs_writeback); + +/** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages * @lstart: offset in bytes where the range starts @@ -1724,7 +1767,7 @@ EXPORT_SYMBOL(page_cache_prev_miss); * @mapping: the address_space to search * @index: The page cache index. * - * Looks up the page cache slot at @mapping & @offset. If there is a + * Looks up the page cache slot at @mapping & @index. If there is a * page cache page, the head page is returned with an increased refcount. * * If the slot holds a shadow entry of a previously evicted page, or a @@ -2305,8 +2348,6 @@ static int filemap_read_page(struct file *file, struct address_space *mapping, return error; if (PageUptodate(page)) return 0; - if (!page->mapping) /* page truncated */ - return AOP_TRUNCATED_PAGE; shrink_readahead_size_eio(&file->f_ra); return -EIO; } @@ -2638,8 +2679,8 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) size = i_size_read(inode); if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_has_page(mapping, iocb->ki_pos, - iocb->ki_pos + count - 1)) + if (filemap_range_needs_writeback(mapping, iocb->ki_pos, + iocb->ki_pos + count - 1)) return -EAGAIN; } else { retval = filemap_write_and_wait_range(mapping, @@ -2937,7 +2978,6 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) struct file *file = vmf->vma->vm_file; struct file *fpin = NULL; struct address_space *mapping = file->f_mapping; - struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; pgoff_t offset = vmf->pgoff; pgoff_t max_off; @@ -3024,14 +3064,8 @@ page_not_uptodate: * because there really aren't any performance issues here * and we need to check for errors. */ - ClearPageError(page); fpin = maybe_unlock_mmap_for_io(vmf, fpin); - error = mapping->a_ops->readpage(file, page); - if (!error) { - wait_on_page_locked(page); - if (!PageUptodate(page)) - error = -EIO; - } + error = filemap_read_page(file, mapping, page); if (fpin) goto out_retry; put_page(page); @@ -3039,7 +3073,6 @@ page_not_uptodate: if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; - shrink_readahead_size_eio(ra); return VM_FAULT_SIGBUS; out_retry: |