summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/readahead.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 4a58befbde4..fd588ffc508 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -380,6 +380,29 @@ ondemand_readahead(struct address_space *mapping,
}
/*
+ * Hit a marked page without valid readahead state.
+ * E.g. interleaved reads.
+ * Query the pagecache for async_size, which normally equals to
+ * readahead size. Ramp it up and use it as the new readahead size.
+ */
+ if (hit_readahead_marker) {
+ pgoff_t start;
+
+ read_lock_irq(&mapping->tree_lock);
+ start = radix_tree_next_hole(&mapping->page_tree, offset, max+1);
+ read_unlock_irq(&mapping->tree_lock);
+
+ if (!start || start - offset > max)
+ return 0;
+
+ ra->start = start;
+ ra->size = start - offset; /* old async_size */
+ ra->size = get_next_ra_size(ra, max);
+ ra->async_size = ra->size;
+ goto readit;
+ }
+
+ /*
* It may be one of
* - first read on start of file
* - sequential cache miss
@@ -390,16 +413,6 @@ ondemand_readahead(struct address_space *mapping,
ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
- /*
- * Hit on a marked page without valid readahead state.
- * E.g. interleaved reads.
- * Not knowing its readahead pos/size, bet on the minimal possible one.
- */
- if (hit_readahead_marker) {
- ra->start++;
- ra->size = get_next_ra_size(ra, max);
- }
-
readit:
return ra_submit(ra, mapping, filp);
}