diff options
author | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2009-11-14 15:54:27 +0900 |
---|---|---|
committer | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2009-11-20 10:05:50 +0900 |
commit | db38d5ad323362bfca118b52fe5906f97a69fb45 (patch) | |
tree | 11940fedf0eb0ff80527094bd435ec89641f8adc /fs/nilfs2 | |
parent | 141bbdba9c2c1592d56b019277774099a5412aea (diff) |
nilfs2: add cache framework for persistent object allocator
This adds setup and cleanup routines of the persistent object
allocator cache.
According to ftrace analyses, accessing buffers of the DAT file
suffers indispensable overhead many times. To mitigate the overhead,
This introduce cache framework for the persistent object allocator
(palloc) which the DAT file and ifile are using.
struct nilfs_palloc_cache represents the cache object per metadata
file using palloc.
The cache is initialized through nilfs_palloc_setup_cache() and
destroyed by nilfs_palloc_destroy_cache(); callers of the former
function will be added to individual allocators of DAT and ifile on
successive patches.
nilfs_palloc_destroy_cache() will be called from nilfs_mdt_destroy()
if the cache is attached to a metadata file. A companion function
nilfs_palloc_clear_cache() is provided to allow releasing buffer head
references independently with the cleanup task. This adjunctive
function will be used before invalidating pages of metadata file with
the cache.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2')
-rw-r--r-- | fs/nilfs2/alloc.c | 27 | ||||
-rw-r--r-- | fs/nilfs2/alloc.h | 21 | ||||
-rw-r--r-- | fs/nilfs2/mdt.c | 2 | ||||
-rw-r--r-- | fs/nilfs2/mdt.h | 2 |
4 files changed, 52 insertions, 0 deletions
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 7e2b3ce57aa9..c56300d8d177 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -491,3 +491,30 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) } return 0; } + +void nilfs_palloc_setup_cache(struct inode *inode, + struct nilfs_palloc_cache *cache) +{ + NILFS_MDT(inode)->mi_palloc_cache = cache; + spin_lock_init(&cache->lock); +} + +void nilfs_palloc_clear_cache(struct inode *inode) +{ + struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; + + spin_lock(&cache->lock); + brelse(cache->prev_desc.bh); + brelse(cache->prev_bitmap.bh); + brelse(cache->prev_entry.bh); + cache->prev_desc.bh = NULL; + cache->prev_bitmap.bh = NULL; + cache->prev_entry.bh = NULL; + spin_unlock(&cache->lock); +} + +void nilfs_palloc_destroy_cache(struct inode *inode) +{ + nilfs_palloc_clear_cache(inode); + NILFS_MDT(inode)->mi_palloc_cache = NULL; +} diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 4ace5475c2c7..f4543ac4f560 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -69,4 +69,25 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t); #define nilfs_clear_bit_atomic ext2_clear_bit_atomic #define nilfs_find_next_zero_bit ext2_find_next_zero_bit +/* + * persistent object allocator cache + */ + +struct nilfs_bh_assoc { + unsigned long blkoff; + struct buffer_head *bh; +}; + +struct nilfs_palloc_cache { + spinlock_t lock; + struct nilfs_bh_assoc prev_desc; + struct nilfs_bh_assoc prev_bitmap; + struct nilfs_bh_assoc prev_entry; +}; + +void nilfs_palloc_setup_cache(struct inode *inode, + struct nilfs_palloc_cache *cache); +void nilfs_palloc_clear_cache(struct inode *inode); +void nilfs_palloc_destroy_cache(struct inode *inode); + #endif /* _NILFS_ALLOC_H */ diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 948b1f8bc505..06713ffcc7f2 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -571,6 +571,8 @@ void nilfs_mdt_destroy(struct inode *inode) { struct nilfs_mdt_info *mdi = NILFS_MDT(inode); + if (mdi->mi_palloc_cache) + nilfs_palloc_destroy_cache(inode); nilfs_mdt_clear(inode); kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index c396b6c03931..6c4bbb0470fc 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h @@ -36,6 +36,7 @@ * @mi_entry_size: size of an entry * @mi_first_entry_offset: offset to the first entry * @mi_entries_per_block: number of entries in a block + * @mi_palloc_cache: persistent object allocator cache * @mi_blocks_per_group: number of blocks in a group * @mi_blocks_per_desc_block: number of blocks per descriptor block */ @@ -46,6 +47,7 @@ struct nilfs_mdt_info { unsigned mi_entry_size; unsigned mi_first_entry_offset; unsigned long mi_entries_per_block; + struct nilfs_palloc_cache *mi_palloc_cache; unsigned long mi_blocks_per_group; unsigned long mi_blocks_per_desc_block; }; |