summaryrefslogtreecommitdiff
path: root/fs/f2fs/segment.c
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2014-09-04 18:13:01 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2014-09-09 13:15:05 -0700
commit184a5cd2ce281f1207d72adb9ae18e416ca371db (patch)
treeb91e2cfcf93adc065377908fa601659ca24fcefc /fs/f2fs/segment.c
parentd3a14afd5ed1970519a2d6ed59f4062ec3ba821f (diff)
f2fs: refactor flush_sit_entries codes for reducing SIT writes
In commit aec71382c681 ("f2fs: refactor flush_nat_entries codes for reducing NAT writes"), we descripte the issue as below: "Although building NAT journal in cursum reduce the read/write work for NAT block, but previous design leave us lower performance when write checkpoint frequently for these cases: 1. if journal in cursum has already full, it's a bit of waste that we flush all nat entries to page for persistence, but not to cache any entries. 2. if journal in cursum is not full, we fill nat entries to journal util journal is full, then flush the left dirty entries to disk without merge journaled entries, so these journaled entries may be flushed to disk at next checkpoint but lost chance to flushed last time." Actually, we have the same problem in using SIT journal area. In this patch, firstly we will update sit journal with dirty entries as many as possible. Secondly if there is no space in sit journal, we will remove all entries in journal and walk through the whole dirty entry bitmap of sit, accounting dirty sit entries located in same SIT block to sit entry set. All entry sets are linked to list sit_entry_set in sm_info, sorted ascending order by count of entries in set. Later we flush entries in set which have fewest entries into journal as many as we can, and then flush dense set with merged entries to disk. In this way we can use sit journal area more effectively, also we will reduce SIT update, result in gaining in performance and saving lifetime of flash device. In my testing environment, it shows this patch can help to reduce SIT block update obviously. virtual machine + hard disk: fsstress -p 20 -n 400 -l 5 sit page num cp count sit pages/cp based 2006.50 1349.75 1.486 patched 1566.25 1463.25 1.070 Our latency of merging op is small when handling a great number of dirty SIT entries in flush_sit_entries: latency(ns) dirty sit count 36038 2151 49168 2123 37174 2232 Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r--fs/f2fs/segment.c227
1 files changed, 166 insertions, 61 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a6b90a520894..d1ff2250cc35 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -25,6 +25,7 @@
#define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab;
+static struct kmem_cache *sit_entry_set_slab;
/*
* __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
@@ -492,11 +493,16 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi)
}
}
-static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
+static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
+
+ if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
sit_i->dirty_sentries++;
+ return false;
+ }
+
+ return true;
}
static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
@@ -1443,27 +1449,86 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
return dst_page;
}
-static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
+static struct sit_entry_set *grab_sit_entry_set(void)
+{
+ struct sit_entry_set *ses =
+ f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
+
+ ses->entry_cnt = 0;
+ INIT_LIST_HEAD(&ses->set_list);
+ return ses;
+}
+
+static void release_sit_entry_set(struct sit_entry_set *ses)
+{
+ list_del(&ses->set_list);
+ kmem_cache_free(sit_entry_set_slab, ses);
+}
+
+static void adjust_sit_entry_set(struct sit_entry_set *ses,
+ struct list_head *head)
+{
+ struct sit_entry_set *next = ses;
+
+ if (list_is_last(&ses->set_list, head))
+ return;
+
+ list_for_each_entry_continue(next, head, set_list)
+ if (ses->entry_cnt <= next->entry_cnt)
+ break;
+
+ list_move_tail(&ses->set_list, &next->set_list);
+}
+
+static void add_sit_entry(unsigned int segno, struct list_head *head)
+{
+ struct sit_entry_set *ses;
+ unsigned int start_segno = START_SEGNO(segno);
+
+ list_for_each_entry(ses, head, set_list) {
+ if (ses->start_segno == start_segno) {
+ ses->entry_cnt++;
+ adjust_sit_entry_set(ses, head);
+ return;
+ }
+ }
+
+ ses = grab_sit_entry_set();
+
+ ses->start_segno = start_segno;
+ ses->entry_cnt++;
+ list_add(&ses->set_list, head);
+}
+
+static void add_sits_in_set(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_sm_info *sm_info = SM_I(sbi);
+ struct list_head *set_list = &sm_info->sit_entry_set;
+ unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
+ unsigned long nsegs = TOTAL_SEGS(sbi);
+ unsigned int segno;
+
+ for_each_set_bit(segno, bitmap, nsegs)
+ add_sit_entry(segno, set_list);
+}
+
+static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
int i;
- /*
- * If the journal area in the current summary is full of sit entries,
- * all the sit entries will be flushed. Otherwise the sit entries
- * are not able to replace with newly hot sit entries.
- */
- if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
- for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
- unsigned int segno;
- segno = le32_to_cpu(segno_in_journal(sum, i));
- __mark_sit_entry_dirty(sbi, segno);
- }
- update_sits_in_cursum(sum, -sits_in_cursum(sum));
- return true;
+ for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
+ unsigned int segno;
+ bool dirtied;
+
+ segno = le32_to_cpu(segno_in_journal(sum, i));
+ dirtied = __mark_sit_entry_dirty(sbi, segno);
+
+ if (!dirtied)
+ add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
}
- return false;
+ update_sits_in_cursum(sum, -sits_in_cursum(sum));
}
/*
@@ -1476,68 +1541,95 @@ void flush_sit_entries(struct f2fs_sb_info *sbi)
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct sit_entry_set *ses, *tmp;
+ struct list_head *head = &SM_I(sbi)->sit_entry_set;
unsigned long nsegs = TOTAL_SEGS(sbi);
- struct page *page = NULL;
- struct f2fs_sit_block *raw_sit = NULL;
- unsigned int start = 0, end = 0;
- unsigned int segno;
- bool flushed;
+ bool to_journal = true;
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
/*
- * "flushed" indicates whether sit entries in journal are flushed
- * to the SIT area or not.
+ * add and account sit entries of dirty bitmap in sit entry
+ * set temporarily
*/
- flushed = flush_sits_in_journal(sbi);
-
- for_each_set_bit(segno, bitmap, nsegs) {
- struct seg_entry *se = get_seg_entry(sbi, segno);
- int sit_offset, offset;
+ add_sits_in_set(sbi);
- sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
-
- /* add discard candidates */
- if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
- add_discard_addrs(sbi, segno, se);
+ /*
+ * if there are no enough space in journal to store dirty sit
+ * entries, remove all entries from journal and add and account
+ * them in sit entry set.
+ */
+ if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
+ remove_sits_in_journal(sbi);
- if (flushed)
- goto to_sit_page;
+ if (!sit_i->dirty_sentries)
+ goto out;
- offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
- if (offset >= 0) {
- segno_in_journal(sum, offset) = cpu_to_le32(segno);
- seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
- goto flush_done;
+ /*
+ * there are two steps to flush sit entries:
+ * #1, flush sit entries to journal in current cold data summary block.
+ * #2, flush sit entries to sit page.
+ */
+ list_for_each_entry_safe(ses, tmp, head, set_list) {
+ struct page *page;
+ struct f2fs_sit_block *raw_sit = NULL;
+ unsigned int start_segno = ses->start_segno;
+ unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
+ nsegs);
+ unsigned int segno = start_segno;
+
+ if (to_journal &&
+ !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
+ to_journal = false;
+
+ if (!to_journal) {
+ page = get_next_sit_page(sbi, start_segno);
+ raw_sit = page_address(page);
}
-to_sit_page:
- if (!page || (start > segno) || (segno > end)) {
- if (page) {
- f2fs_put_page(page, 1);
- page = NULL;
- }
- start = START_SEGNO(segno);
- end = start + SIT_ENTRY_PER_BLOCK - 1;
+ /* flush dirty sit entries in region of current sit set */
+ for_each_set_bit_from(segno, bitmap, end) {
+ int offset, sit_offset;
+ struct seg_entry *se = get_seg_entry(sbi, segno);
+
+ /* add discard candidates */
+ if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
+ add_discard_addrs(sbi, segno, se);
+
+ if (to_journal) {
+ offset = lookup_journal_in_cursum(sum,
+ SIT_JOURNAL, segno, 1);
+ f2fs_bug_on(sbi, offset < 0);
+ segno_in_journal(sum, offset) =
+ cpu_to_le32(segno);
+ seg_info_to_raw_sit(se,
+ &sit_in_journal(sum, offset));
+ } else {
+ sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
+ seg_info_to_raw_sit(se,
+ &raw_sit->entries[sit_offset]);
+ }
- /* read sit block that will be updated */
- page = get_next_sit_page(sbi, start);
- raw_sit = page_address(page);
+ __clear_bit(segno, bitmap);
+ sit_i->dirty_sentries--;
+ ses->entry_cnt--;
}
- /* udpate entry in SIT block */
- seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
-flush_done:
- __clear_bit(segno, bitmap);
- sit_i->dirty_sentries--;
+ if (!to_journal)
+ f2fs_put_page(page, 1);
+
+ f2fs_bug_on(sbi, ses->entry_cnt);
+ release_sit_entry_set(ses);
}
+
+ f2fs_bug_on(sbi, !list_empty(head));
+ f2fs_bug_on(sbi, sit_i->dirty_sentries);
+
+out:
mutex_unlock(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex);
- /* writeout last modified SIT block */
- f2fs_put_page(page, 1);
-
set_prefree_as_free_segments(sbi);
}
@@ -1854,6 +1946,8 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->nr_discards = 0;
sm_info->max_discards = 0;
+ INIT_LIST_HEAD(&sm_info->sit_entry_set);
+
if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
@@ -1983,11 +2077,22 @@ int __init create_segment_manager_caches(void)
discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
sizeof(struct discard_entry));
if (!discard_entry_slab)
- return -ENOMEM;
+ goto fail;
+
+ sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
+ sizeof(struct nat_entry_set));
+ if (!sit_entry_set_slab)
+ goto destory_discard_entry;
return 0;
+
+destory_discard_entry:
+ kmem_cache_destroy(discard_entry_slab);
+fail:
+ return -ENOMEM;
}
void destroy_segment_manager_caches(void)
{
+ kmem_cache_destroy(sit_entry_set_slab);
kmem_cache_destroy(discard_entry_slab);
}