aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/gc.c')
-rw-r--r--fs/f2fs/gc.c158
1 files changed, 63 insertions, 95 deletions
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b0ec721e984..94b8a0c4845 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -44,10 +44,10 @@ static int gc_thread_func(void *data)
if (kthread_should_stop())
break;
- f2fs_balance_fs(sbi);
-
- if (!test_opt(sbi, BG_GC))
+ if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
+ wait_ms = GC_THREAD_MAX_SLEEP_TIME;
continue;
+ }
/*
* [GC triggering condition]
@@ -78,7 +78,8 @@ static int gc_thread_func(void *data)
sbi->bg_gc++;
- if (f2fs_gc(sbi, 1) == GC_NONE)
+ /* if return value is not zero, no victim was selected */
+ if (f2fs_gc(sbi))
wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -90,7 +91,10 @@ static int gc_thread_func(void *data)
int start_gc_thread(struct f2fs_sb_info *sbi)
{
struct f2fs_gc_kthread *gc_th;
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ if (!test_opt(sbi, BG_GC))
+ return 0;
gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th)
return -ENOMEM;
@@ -98,9 +102,10 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
- GC_THREAD_NAME);
+ "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
kfree(gc_th);
+ sbi->gc_thread = NULL;
return -ENOMEM;
}
return 0;
@@ -141,6 +146,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
struct victim_sel_policy *p)
{
+ /* SSR allocates in a segment unit */
+ if (p->alloc_mode == SSR)
+ return 1 << sbi->log_blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
else if (p->gc_mode == GC_CB)
@@ -356,7 +364,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
sentry = get_seg_entry(sbi, segno);
ret = f2fs_test_bit(offset, sentry->cur_valid_map);
mutex_unlock(&sit_i->sentry_lock);
- return ret ? GC_OK : GC_NEXT;
+ return ret;
}
/*
@@ -364,7 +372,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
bool initial = true;
@@ -376,21 +384,12 @@ next_step:
for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
- int err;
- /*
- * It makes sure that free segments are able to write
- * all the dirty node pages before CP after this CP.
- * So let's check the space of dirty node pages.
- */
- if (should_do_checkpoint(sbi)) {
- mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
- return GC_BLOCKED;
- }
+ /* stop BG_GC if there is not enough free sections. */
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ return;
- err = check_valid_map(sbi, segno, off);
- if (err == GC_NEXT)
+ if (check_valid_map(sbi, segno, off) == 0)
continue;
if (initial) {
@@ -420,11 +419,14 @@ next_step:
};
sync_node_pages(sbi, 0, &wbc);
}
- return GC_DONE;
}
/*
- * Calculate start block index that this node page contains
+ * Calculate start block index indicating the given node offset.
+ * Be careful, caller should give this node offset only indicating direct node
+ * blocks. If any node offsets, which point the other types of node blocks such
+ * as indirect or double indirect node blocks, are given, it must be a caller's
+ * bug.
*/
block_t start_bidx_of_node(unsigned int node_ofs)
{
@@ -459,13 +461,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
- return GC_NEXT;
+ return 0;
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
f2fs_put_page(node_page, 1);
- return GC_NEXT;
+ return 0;
}
*nofs = ofs_of_node(node_page);
@@ -473,8 +475,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
f2fs_put_page(node_page, 1);
if (source_blkaddr != blkaddr)
- return GC_NEXT;
- return GC_OK;
+ return 0;
+ return 1;
}
static void move_data_page(struct inode *inode, struct page *page, int gc_type)
@@ -515,13 +517,13 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct list_head *ilist, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
struct f2fs_summary *entry;
block_t start_addr;
- int err, off;
+ int off;
int phase = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -535,20 +537,11 @@ next_step:
unsigned int ofs_in_node, nofs;
block_t start_bidx;
- /*
- * It makes sure that free segments are able to write
- * all the dirty node pages before CP after this CP.
- * So let's check the space of dirty node pages.
- */
- if (should_do_checkpoint(sbi)) {
- mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
- err = GC_BLOCKED;
- goto stop;
- }
+ /* stop BG_GC if there is not enough free sections. */
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ return;
- err = check_valid_map(sbi, segno, off);
- if (err == GC_NEXT)
+ if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
@@ -557,8 +550,7 @@ next_step:
}
/* Get an inode by ino with checking validity */
- err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
- if (err == GC_NEXT)
+ if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
continue;
if (phase == 1) {
@@ -570,7 +562,7 @@ next_step:
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 2) {
- inode = f2fs_iget_nowait(sb, dni.ino);
+ inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode))
continue;
@@ -598,11 +590,9 @@ next_iput:
}
if (++phase < 4)
goto next_step;
- err = GC_DONE;
-stop:
+
if (gc_type == FG_GC)
f2fs_submit_bio(sbi, DATA, true);
- return err;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -616,17 +606,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
struct list_head *ilist, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
- int ret = GC_DONE;
/* read segment summary of victim */
sum_page = get_sum_page(sbi, segno);
if (IS_ERR(sum_page))
- return GC_ERROR;
+ return;
/*
* CP needs to lock sum_page. In this time, we don't need
@@ -638,76 +627,55 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
switch (GET_SUM_TYPE((&sum->footer))) {
case SUM_TYPE_NODE:
- ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
+ gc_node_segment(sbi, sum->entries, segno, gc_type);
break;
case SUM_TYPE_DATA:
- ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
+ gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
break;
}
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
stat_inc_call_count(sbi->stat_info);
f2fs_put_page(sum_page, 0);
- return ret;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, int nGC)
+int f2fs_gc(struct f2fs_sb_info *sbi)
{
- unsigned int segno;
- int old_free_secs, cur_free_secs;
- int gc_status, nfree;
struct list_head ilist;
+ unsigned int segno, i;
int gc_type = BG_GC;
+ int nfree = 0;
+ int ret = -1;
INIT_LIST_HEAD(&ilist);
gc_more:
- nfree = 0;
- gc_status = GC_NONE;
+ if (!(sbi->sb->s_flags & MS_ACTIVE))
+ goto stop;
- if (has_not_enough_free_secs(sbi))
- old_free_secs = reserved_sections(sbi);
- else
- old_free_secs = free_sections(sbi);
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
+ gc_type = FG_GC;
- while (sbi->sb->s_flags & MS_ACTIVE) {
- int i;
- if (has_not_enough_free_secs(sbi))
- gc_type = FG_GC;
+ if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+ goto stop;
+ ret = 0;
- cur_free_secs = free_sections(sbi) + nfree;
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ do_garbage_collect(sbi, segno + i, &ilist, gc_type);
- /* We got free space successfully. */
- if (nGC < cur_free_secs - old_free_secs)
- break;
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ nfree++;
- if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
- break;
+ if (has_not_enough_free_secs(sbi, nfree))
+ goto gc_more;
- for (i = 0; i < sbi->segs_per_sec; i++) {
- /*
- * do_garbage_collect will give us three gc_status:
- * GC_ERROR, GC_DONE, and GC_BLOCKED.
- * If GC is finished uncleanly, we have to return
- * the victim to dirty segment list.
- */
- gc_status = do_garbage_collect(sbi, segno + i,
- &ilist, gc_type);
- if (gc_status != GC_DONE)
- goto stop;
- nfree++;
- }
- }
+ if (gc_type == FG_GC)
+ write_checkpoint(sbi, false);
stop:
- if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
- write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
- if (nfree)
- goto gc_more;
- }
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&ilist);
- BUG_ON(!list_empty(&ilist));
- return gc_status;
+ return ret;
}
void build_gc_manager(struct f2fs_sb_info *sbi)
@@ -715,7 +683,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
DIRTY_I(sbi)->v_ops = &default_v_ops;
}
-int create_gc_caches(void)
+int __init create_gc_caches(void)
{
winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
sizeof(struct inode_entry), NULL);