mirror of
https://github.com/schoebel/mars
synced 2025-02-09 00:27:28 +00:00
all: new mapfree dirtifying
This commit is contained in:
parent
ff9beb91e4
commit
d8eac2ecba
@ -154,6 +154,8 @@ struct mapfree_info *mapfree_get(const char *name, int flags)
|
||||
for (;;) {
|
||||
struct address_space *mapping;
|
||||
struct inode *inode;
|
||||
loff_t length;
|
||||
int i;
|
||||
int ra = 1;
|
||||
int prot = 0600;
|
||||
mm_segment_t oldfs;
|
||||
@ -205,7 +207,12 @@ struct mapfree_info *mapfree_get(const char *name, int flags)
|
||||
|
||||
mapping_set_gfp_mask(mapping, mapping_gfp_mask(mapping) & ~(__GFP_IO | __GFP_FS));
|
||||
|
||||
mf->mf_max = i_size_read(inode);
|
||||
length = i_size_read(inode);
|
||||
mf->mf_max = length;
|
||||
for (i = 0; i < DIRTY_MAX; i++) {
|
||||
rwlock_init(&mf->mf_length[i].dl_lock);
|
||||
mf->mf_length[i].dl_length = length;
|
||||
}
|
||||
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
MARS_INF("changing blkdev readahead from %lu to %d\n", inode->i_bdev->bd_disk->queue->backing_dev_info.ra_pages, ra);
|
||||
@ -295,6 +302,54 @@ int mapfree_thread(void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
////////////////// dirty IOs in append mode //////////////////
|
||||
|
||||
static
|
||||
struct dirty_length *_get_dl(struct mapfree_info *mf, enum dirty_stage stage)
|
||||
{
|
||||
#ifdef MARS_DEBUGGING
|
||||
if (unlikely(stage < 0)) {
|
||||
MARS_ERR("bad stage=%d\n", stage);
|
||||
stage = 0;
|
||||
}
|
||||
if (unlikely(stage >= DIRTY_MAX)) {
|
||||
MARS_ERR("bad stage=%d\n", stage);
|
||||
stage = DIRTY_MAX - 1;
|
||||
}
|
||||
#endif
|
||||
return &mf->mf_length[stage];
|
||||
}
|
||||
|
||||
void mf_dirty_append(struct mapfree_info *mf, enum dirty_stage stage, loff_t newlen)
|
||||
{
|
||||
struct dirty_length *dl = _get_dl(mf, stage);
|
||||
unsigned long flags;
|
||||
|
||||
traced_writelock(&dl->dl_lock, flags);
|
||||
if (dl->dl_length < newlen)
|
||||
dl->dl_length = newlen;
|
||||
traced_writeunlock(&dl->dl_lock, flags);
|
||||
}
|
||||
|
||||
loff_t mf_dirty_length(struct mapfree_info *mf, enum dirty_stage stage)
|
||||
{
|
||||
struct dirty_length *dl = _get_dl(mf, stage);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Avoid locking by assuming that 64bit reads are atomic in itself */
|
||||
smp_read_barrier_depends();
|
||||
return ACCESS_ONCE(dl->dl_length);
|
||||
#else /* cannot rely on atomic read of two 32bit values */
|
||||
loff_t res;
|
||||
unsigned long flags;
|
||||
|
||||
traced_readlock(&dl->dl_lock, flags);
|
||||
res = dl->dl_length;
|
||||
traced_readunlock(&dl->dl_lock, flags);
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////// dirty IOs on the fly //////////////////
|
||||
|
||||
void mf_insert_dirty(struct mapfree_info *mf, struct dirty_info *di)
|
||||
|
@ -47,6 +47,19 @@
|
||||
extern int mapfree_period_sec;
|
||||
extern int mapfree_grace_keep_mb;
|
||||
|
||||
enum dirty_stage {
|
||||
DIRTY_SUBMITTED,
|
||||
DIRTY_COMPLETED,
|
||||
DIRTY_FINISHED,
|
||||
/* Keep this the last element */
|
||||
DIRTY_MAX
|
||||
};
|
||||
|
||||
struct dirty_length {
|
||||
rwlock_t dl_lock;
|
||||
loff_t dl_length;
|
||||
};
|
||||
|
||||
struct mapfree_info {
|
||||
struct list_head mf_head;
|
||||
struct list_head mf_dirty_anchor;
|
||||
@ -60,6 +73,7 @@ struct mapfree_info {
|
||||
loff_t mf_last;
|
||||
loff_t mf_max;
|
||||
long long mf_jiffies;
|
||||
struct dirty_length mf_length[DIRTY_MAX];
|
||||
};
|
||||
|
||||
struct dirty_info {
|
||||
@ -76,6 +90,11 @@ void mapfree_set(struct mapfree_info *mf, loff_t min, loff_t max);
|
||||
|
||||
void mapfree_pages(struct mapfree_info *mf, int grace_keep);
|
||||
|
||||
////////////////// dirty IOs in append mode //////////////////
|
||||
|
||||
void mf_dirty_append(struct mapfree_info *mf, enum dirty_stage stage, loff_t newlen);
|
||||
loff_t mf_dirty_length(struct mapfree_info *mf, enum dirty_stage stage);
|
||||
|
||||
////////////////// dirty IOs on the fly //////////////////
|
||||
|
||||
void mf_insert_dirty(struct mapfree_info *mf, struct dirty_info *di);
|
||||
|
@ -300,6 +300,7 @@ void _complete(struct aio_output *output, struct aio_mref_aspect *mref_a, int er
|
||||
|
||||
done:
|
||||
if (mref->ref_rw) {
|
||||
mf_dirty_append(output->mf, DIRTY_FINISHED, mref->ref_pos + mref->ref_len);
|
||||
atomic_dec(&output->write_count);
|
||||
} else {
|
||||
atomic_dec(&output->read_count);
|
||||
@ -693,6 +694,8 @@ static int aio_event_thread(void *data)
|
||||
MARS_IO("AIO done %p pos = %lld len = %d rw = %d\n", mref, mref->ref_pos, mref->ref_len, mref->ref_rw);
|
||||
|
||||
mapfree_set(output->mf, mref->ref_pos, mref->ref_pos + mref->ref_len);
|
||||
if (mref->ref_rw)
|
||||
mf_dirty_append(output->mf, DIRTY_COMPLETED, mref->ref_pos + mref->ref_len);
|
||||
|
||||
if (output->brick->o_fdsync
|
||||
&& err >= 0
|
||||
@ -917,6 +920,7 @@ static int aio_submit_thread(void *data)
|
||||
|
||||
mref_a->di.dirty_stage = 0;
|
||||
if (mref->ref_rw) {
|
||||
mf_dirty_append(output->mf, DIRTY_SUBMITTED, mref->ref_pos + mref->ref_len);
|
||||
mf_insert_dirty(output->mf, &mref_a->di);
|
||||
}
|
||||
|
||||
|
@ -402,9 +402,13 @@ void _sio_ref_io(struct sio_threadinfo *tinfo, struct mref_object *mref)
|
||||
if (mref->ref_rw == READ) {
|
||||
status = read_aops(output, mref);
|
||||
} else {
|
||||
mf_dirty_append(output->mf, DIRTY_SUBMITTED, mref->ref_pos + mref->ref_len);
|
||||
status = write_aops(output, mref);
|
||||
if (barrier || output->brick->o_fdsync)
|
||||
sync_file(output);
|
||||
if (status >= 0) {
|
||||
if (barrier || output->brick->o_fdsync)
|
||||
sync_file(output);
|
||||
mf_dirty_append(output->mf, DIRTY_COMPLETED, mref->ref_pos + mref->ref_len);
|
||||
}
|
||||
}
|
||||
|
||||
mapfree_set(output->mf, mref->ref_pos, mref->ref_pos + mref->ref_len);
|
||||
@ -413,6 +417,8 @@ done:
|
||||
_complete(output, mref, status);
|
||||
|
||||
atomic_dec(&tinfo->fly_count);
|
||||
if (mref->ref_rw && status >= 0)
|
||||
mf_dirty_append(output->mf, DIRTY_FINISHED, mref->ref_pos + mref->ref_len);
|
||||
}
|
||||
|
||||
/* This is called from outside
|
||||
|
Loading…
Reference in New Issue
Block a user