summaryrefslogtreecommitdiff
path: root/fs/ext4/ext4.h
diff options
context:
space:
mode:
authorCurt Wohlgemuth <curtw@google.com>2010-05-16 15:00:00 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-05-16 15:00:00 -0400
commit8a57d9d61a6e361c7bb159dda797672c1df1a691 (patch)
tree39a01022ed2294f0acc94b45554c9a292db671dc /fs/ext4/ext4.h
parent6d19c42b7cf81c39632b6d4dbc514e8449bcd346 (diff)
ext4: check for a good block group before loading buddy pages
This adds a new field in ext4_group_info to cache the largest available block range in a block group; and don't load the buddy pages until *after* we've done a sanity check on the block group. With large allocation requests (e.g., fallocate(), 8MiB) and relatively full partitions, it's easy to have no block groups with a block extent large enough to satisfy the input request length. This currently causes the loop during cr == 0 in ext4_mb_regular_allocator() to load the buddy bitmap pages for EVERY block group. That can be a lot of pages. The patch below allows us to call ext4_mb_good_group() BEFORE we load the buddy pages (although we have check again after we lock the block group). Addresses-Google-Bug: #2578108 Addresses-Google-Bug: #2704453 Signed-off-by: Curt Wohlgemuth <curtw@google.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/ext4.h')
-rw-r--r--fs/ext4/ext4.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index bf938cf7c5f..d266003cac3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1678,6 +1678,7 @@ struct ext4_group_info {
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
+ ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
struct list_head bb_prealloc_list;
#ifdef DOUBLE_CHECK
void *bb_bitmap;