mars/kernel/mars_bio.c

1132 lines
28 KiB
C
Raw Normal View History

2014-11-21 10:51:34 +00:00
/*
* MARS Long Distance Replication Software
*
* This file is part of MARS project: http://schoebel.github.io/mars/
*
* Copyright (C) 2010-2014 Thomas Schoebel-Theuer
* Copyright (C) 2011-2014 1&1 Internet AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
2011-03-18 13:15:40 +00:00
2012-02-12 11:19:57 +00:00
2011-03-18 13:15:40 +00:00
// Bio brick (interface to blkdev IO via kernel bios)
//#define BRICK_DEBUGGING
//#define MARS_DEBUGGING
//#define IO_DEBUGGING
//#define FAKE_IO
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bio.h>
#include "mars.h"
2012-10-12 12:17:34 +00:00
#include "lib_timing.h"
2013-01-23 11:30:28 +00:00
#include "lib_mapfree.h"
2021-02-05 07:01:01 +00:00
#include "lib_limiter.h"
2012-10-12 12:17:34 +00:00
#include "mars_bio.h"
2017-04-07 06:18:29 +00:00
int bio_nr_requests = 1024;
2012-10-12 12:17:34 +00:00
static struct timing_stats timings[2] = {};
2011-03-18 13:15:40 +00:00
struct threshold bio_submit_threshold = {
.thr_ban = &mars_global_ban,
2015-02-10 10:33:20 +00:00
.thr_parent = &global_io_threshold,
.thr_limit = BIO_SUBMIT_MAX_LATENCY,
.thr_factor = 100,
.thr_plus = 0,
};
EXPORT_SYMBOL_GPL(bio_submit_threshold);
struct threshold bio_io_threshold[2] = {
[0] = {
.thr_ban = &mars_global_ban,
2015-02-10 10:33:20 +00:00
.thr_parent = &global_io_threshold,
.thr_limit = BIO_IO_R_MAX_LATENCY,
.thr_factor = 10,
.thr_plus = 10000,
},
[1] = {
.thr_ban = &mars_global_ban,
2015-02-10 10:33:20 +00:00
.thr_parent = &global_io_threshold,
.thr_limit = BIO_IO_W_MAX_LATENCY,
.thr_factor = 10,
.thr_plus = 10000,
},
};
EXPORT_SYMBOL_GPL(bio_io_threshold);
2011-03-18 13:15:40 +00:00
2021-02-05 07:01:01 +00:00
#ifdef CONFIG_MARS_DEBUG
struct mars_limiter bio_throttle_read = {
};
struct mars_limiter bio_throttle_write = {
};
#endif
///////////////////////// own type definitions ////////////////////////
2011-03-18 13:15:40 +00:00
///////////////////////// own helper functions ////////////////////////
/* This is called from the kernel bio layer.
*/
2015-09-29 06:20:06 +00:00
// remove_this
2019-08-06 07:01:08 +00:00
#if defined(MARS_HAS_BI_STATUS) || defined(MARS_HAS_BI_ERROR)
2015-09-29 06:20:06 +00:00
// end_remove_this
static
void bio_callback(struct bio *bio)
// remove_this
#else
2011-03-18 13:15:40 +00:00
static
void bio_callback(struct bio *bio, int code)
2015-09-29 06:20:06 +00:00
#endif
// end_remove_this
2011-03-18 13:15:40 +00:00
{
struct bio_mref_aspect *mref_a = bio->bi_private;
struct bio_brick *brick;
2017-02-25 08:43:55 +00:00
unsigned int rsp_nr;
2011-03-18 13:15:40 +00:00
unsigned long flags;
CHECK_PTR(mref_a, err);
CHECK_PTR(mref_a->output, err);
brick = mref_a->output->brick;
CHECK_PTR(brick, err);
2015-09-29 06:20:06 +00:00
// remove_this
2019-08-06 07:01:08 +00:00
#ifdef MARS_HAS_BI_STATUS
2015-09-29 06:20:06 +00:00
// end_remove_this
2019-08-06 07:01:08 +00:00
mref_a->status_code = blk_status_to_errno(bio->bi_status);
2015-09-29 06:20:06 +00:00
// remove_this
2019-08-06 07:01:08 +00:00
#else
#ifdef MARS_HAS_BI_ERROR
mref_a->status_code = bio->bi_error;
2015-09-29 06:20:06 +00:00
#else
2011-03-18 13:15:40 +00:00
mref_a->status_code = code;
2015-09-29 06:20:06 +00:00
#endif
2019-08-06 07:01:08 +00:00
#endif
2015-09-29 06:20:06 +00:00
// end_remove_this
2011-03-18 13:15:40 +00:00
spin_lock_irqsave(&brick->lock, flags);
list_del(&mref_a->io_head);
2017-02-25 08:43:55 +00:00
rsp_nr = (brick->rsp_nr + 1) % BIO_RESPONSE_THREADS;
brick->rsp_nr = rsp_nr;
list_add_tail(&mref_a->io_head, &brick->rsp[rsp_nr].completed_list);
atomic_inc(&brick->completed_count);
2011-03-18 13:15:40 +00:00
spin_unlock_irqrestore(&brick->lock, flags);
2017-02-25 08:43:55 +00:00
wake_up_interruptible(&brick->rsp[rsp_nr].response_event);
2011-03-18 13:15:40 +00:00
return;
err:
MARS_FAT("cannot handle bio callback\n");
}
2011-03-31 16:16:00 +00:00
/* Map from kernel address/length to struct page (if not already known),
* check alignment constraints, create bio from it.
* Return the length (may be smaller than requested).
2011-03-18 13:15:40 +00:00
*/
static
2011-04-21 16:03:04 +00:00
int make_bio(struct bio_brick *brick, void *data, int len, loff_t pos, struct bio_mref_aspect *private, struct bio **_bio)
2011-03-18 13:15:40 +00:00
{
unsigned long long sector;
int sector_offset;
int data_offset;
int page_offset;
int page_len;
int bvec_count;
2011-03-31 16:16:00 +00:00
int rest_len = len;
int result_len = 0;
2011-03-18 13:15:40 +00:00
int status;
int i;
struct bio *bio = NULL;
struct block_device *bdev;
status = -EINVAL;
CHECK_PTR(brick, out);
bdev = brick->bdev;
CHECK_PTR(bdev, out);
2011-03-31 16:16:00 +00:00
if (unlikely(rest_len <= 0)) {
MARS_ERR("bad bio len %d\n", rest_len);
2011-03-18 13:15:40 +00:00
goto out;
}
sector = pos >> 9; // TODO: make dynamic
sector_offset = pos & ((1 << 9) - 1); // TODO: make dynamic
data_offset = ((unsigned long)data) & ((1 << 9) - 1); // TODO: make dynamic
if (unlikely(sector_offset > 0)) {
MARS_ERR("odd sector offset %d\n", sector_offset);
goto out;
}
if (unlikely(sector_offset != 0)) {
MARS_ERR("bad alignment: sector_offset %d != 0\n",
sector_offset);
2011-03-30 12:02:50 +00:00
goto out;
}
2011-03-31 16:16:00 +00:00
if (unlikely(rest_len & ((1 << 9) - 1))) {
MARS_ERR("odd length %d\n", rest_len);
2011-03-18 13:15:40 +00:00
goto out;
}
2011-04-21 16:03:04 +00:00
page_offset = ((unsigned long)data) & (PAGE_SIZE-1);
2011-03-31 16:16:00 +00:00
page_len = rest_len + page_offset;
2011-03-18 13:15:40 +00:00
bvec_count = (page_len - 1) / PAGE_SIZE + 1;
2011-03-30 12:02:50 +00:00
if (bvec_count > brick->bvec_max) {
2011-03-18 13:15:40 +00:00
bvec_count = brick->bvec_max;
} else if (unlikely(bvec_count <= 0)) {
MARS_WRN("bvec_count=%d\n", bvec_count);
bvec_count = 1;
2011-03-30 12:02:50 +00:00
}
2011-03-18 13:15:40 +00:00
2011-03-31 16:16:00 +00:00
MARS_IO("sector_offset = %d data = %p pos = %lld rest_len = %d page_offset = %d page_len = %d bvec_count = %d\n", sector_offset, data, pos, rest_len, page_offset, page_len, bvec_count);
2011-03-18 13:15:40 +00:00
bio = bio_alloc(GFP_MARS, bvec_count);
status = -ENOMEM;
2011-03-31 16:16:00 +00:00
if (unlikely(!bio)) {
2011-03-18 13:15:40 +00:00
goto out;
}
2011-03-31 16:16:00 +00:00
for (i = 0; i < bvec_count && rest_len > 0; i++) {
2011-04-21 16:03:04 +00:00
struct page *page;
2011-03-31 16:16:00 +00:00
int this_rest = PAGE_SIZE - page_offset;
int this_len = rest_len;
2011-03-18 13:15:40 +00:00
2011-03-31 16:16:00 +00:00
if (this_len > this_rest) {
this_len = this_rest;
}
#ifdef MARS_DEBUGGING
2011-03-18 13:15:40 +00:00
if (unlikely(!virt_addr_valid(data))) {
MARS_ERR("invalid virtual kernel address %p\n", data);
status = -EINVAL;
goto out;
}
2011-03-31 16:16:00 +00:00
#endif
2011-03-18 13:15:40 +00:00
2011-08-12 11:09:48 +00:00
page = brick_iomap(data, &page_offset, &this_len);
2011-04-21 16:03:04 +00:00
if (unlikely(!page)) {
MARS_ERR("cannot iomap() kernel address %p\n", data);
status = -EINVAL;
goto out;
}
2011-03-31 16:16:00 +00:00
MARS_IO(" i = %d page = %p bv_len = %d bv_offset = %d\n", i, page, this_len, page_offset);
2011-03-18 13:15:40 +00:00
bio->bi_io_vec[i].bv_page = page;
2011-03-31 16:16:00 +00:00
bio->bi_io_vec[i].bv_len = this_len;
2011-03-18 13:15:40 +00:00
bio->bi_io_vec[i].bv_offset = page_offset;
2011-03-31 16:16:00 +00:00
data += this_len;
rest_len -= this_len;
result_len += this_len;
2011-03-18 13:15:40 +00:00
page_offset = 0;
2011-03-31 16:16:00 +00:00
//MARS_IO("page_offset=%d this_len=%d (new len=%d, new status=%d)\n", page_offset, this_len, rest_len, status);
2011-03-18 13:15:40 +00:00
}
2011-03-31 16:16:00 +00:00
if (unlikely(rest_len != 0)) {
MARS_ERR("computation of bvec_count %d was wrong, diff=%d\n", bvec_count, rest_len);
status = -EINVAL;
2011-03-18 13:15:40 +00:00
goto out;
}
bio->bi_vcnt = i;
// remove_this
2019-08-06 06:53:28 +00:00
#ifdef MARS_HAS_BVEC_ITER
// end_remove_this
bio->bi_iter.bi_idx = 0;
bio->bi_iter.bi_size = result_len;
bio->bi_iter.bi_sector = sector;
// remove_this
#else
2011-03-18 13:15:40 +00:00
bio->bi_idx = 0;
2011-03-31 16:16:00 +00:00
bio->bi_size = result_len;
2011-03-18 13:15:40 +00:00
bio->bi_sector = sector;
#endif
2019-02-20 06:29:47 +00:00
#ifdef MARS_HAS_SET_DEV
// end_remove_this
2019-02-20 06:29:47 +00:00
bio_set_dev(bio, bdev);
// remove_this
#else
2011-03-18 13:15:40 +00:00
bio->bi_bdev = bdev;
2019-02-20 06:29:47 +00:00
#endif
// end_remove_this
2011-03-18 13:15:40 +00:00
bio->bi_private = private;
bio->bi_end_io = bio_callback;
2019-08-06 06:53:28 +00:00
#ifndef MARS_HAS_NEW_BIO_OP
2011-03-18 13:15:40 +00:00
bio->bi_rw = 0; // must be filled in later
#endif
2011-03-31 16:16:00 +00:00
status = result_len;
2011-03-18 13:15:40 +00:00
out:
if (unlikely(status < 0)) {
MARS_ERR("error %d\n", status);
if (bio) {
bio_put(bio);
bio = NULL;
}
}
*_bio = bio;
return status;
}
////////////////// own brick / input / output operations //////////////////
2012-02-25 21:22:52 +00:00
#define PRIO_INDEX(mref) ((mref)->ref_prio + 1)
2011-03-18 13:15:40 +00:00
static int bio_get_info(struct bio_output *output, struct mars_info *info)
{
struct bio_brick *brick = output->brick;
struct inode *inode;
struct request_queue *q;
2011-03-18 13:15:40 +00:00
int status = 0;
2013-01-23 11:30:28 +00:00
if (unlikely(!brick->mf ||
!brick->mf->mf_filp ||
!brick->mf->mf_filp->f_mapping ||
!(inode = brick->mf->mf_filp->f_mapping->host))) {
status = -ENOENT;
goto done;
}
info->tf_align = 512;
info->tf_min_size = 512;
q = bdev_get_queue(inode->i_bdev);
if (q) {
info->tf_align = queue_physical_block_size(q);
info->tf_min_size = queue_logical_block_size(q);
}
2013-09-25 13:16:31 +00:00
brick->total_size = i_size_read(inode);
2011-03-18 13:15:40 +00:00
info->current_size = brick->total_size;
MARS_DBG("determined device size = %lld\n", info->current_size);
done:
2011-03-18 13:15:40 +00:00
return status;
}
static int bio_ref_get(struct bio_output *output, struct mref_object *mref)
{
struct bio_mref_aspect *mref_a;
2011-03-18 13:15:40 +00:00
int status = -EINVAL;
CHECK_PTR(output, done);
2011-03-18 13:15:40 +00:00
CHECK_PTR(output->brick, done);
if (mref->ref_initialized) {
_mref_get(mref);
return mref->ref_len;
}
2012-02-25 21:22:52 +00:00
mref_a = bio_mref_get_aspect(output->brick, mref);
CHECK_PTR(mref_a, done);
2011-03-18 13:15:40 +00:00
mref_a->output = output;
mref_a->bio = NULL;
2011-03-31 16:16:00 +00:00
if (!mref->ref_data) { // buffered IO.
2014-12-02 11:23:45 +00:00
if (unlikely(mref->ref_len <= 0)) {
goto done;
}
2011-03-18 13:15:40 +00:00
status = -ENOMEM;
2011-08-12 11:09:48 +00:00
mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len));
2011-03-31 16:16:00 +00:00
if (unlikely(!mref->ref_data)) {
2011-03-18 13:15:40 +00:00
goto done;
2011-03-31 16:16:00 +00:00
}
mref_a->do_dealloc = true;
2011-03-18 13:15:40 +00:00
}
2021-02-05 07:01:01 +00:00
#ifdef CONFIG_MARS_DEBUG
/* Only for testing, e.g. simulation of degraded RAID etc
*/
if (mref->ref_flags & MREF_WRITE)
mars_limit_sleep(&bio_throttle_write, (mref->ref_len + 512) / 1024);
else
mars_limit_sleep(&bio_throttle_read, (mref->ref_len + 512) / 1024);
#endif
2011-04-21 16:03:04 +00:00
status = make_bio(output->brick, mref->ref_data, mref->ref_len, mref->ref_pos, mref_a, &mref_a->bio);
2011-03-18 13:15:40 +00:00
if (unlikely(status < 0 || !mref_a->bio)) {
MARS_ERR("could not create bio, status = %d\n", status);
goto done;
}
2012-02-25 21:22:52 +00:00
if (unlikely(mref->ref_prio < MARS_PRIO_HIGH))
mref->ref_prio = MARS_PRIO_HIGH;
else if (unlikely(mref->ref_prio > MARS_PRIO_LOW))
mref->ref_prio = MARS_PRIO_LOW;
MARS_IO("len = %d status = %d prio = %d fly = %d\n", mref->ref_len, status, mref->ref_prio, atomic_read(&output->brick->fly_count[PRIO_INDEX(mref)]));
2011-03-18 13:15:40 +00:00
mref->ref_len = status;
_mref_get_first(mref);
2011-03-18 13:15:40 +00:00
status = 0;
done:
return status;
}
static
2013-04-15 12:45:36 +00:00
void _bio_ref_put(struct bio_output *output, struct mref_object *mref)
2011-03-18 13:15:40 +00:00
{
struct bio_mref_aspect *mref_a;
MARS_IO("deallocating\n");
mref->ref_total_size = output->brick->total_size;
mref_a = bio_mref_get_aspect(output->brick, mref);
2011-03-18 13:15:40 +00:00
CHECK_PTR(mref_a, err);
if (likely(mref_a->bio)) {
2011-05-26 14:32:32 +00:00
#ifdef MARS_DEBUGGING
int bi_cnt = atomic_read(&mref_a->bio->bi_cnt);
if (bi_cnt > 1) {
MARS_DBG("bi_cnt = %d\n", bi_cnt);
}
#endif
2011-03-18 13:15:40 +00:00
bio_put(mref_a->bio);
mref_a->bio = NULL;
}
if (mref_a->do_dealloc) {
MARS_IO("free page\n");
2011-08-12 11:09:48 +00:00
brick_block_free(mref->ref_data, mref_a->alloc_len);
2011-03-18 13:15:40 +00:00
}
bio_free_mref(mref);
return;
err:
MARS_FAT("cannot work\n");
}
2013-04-15 12:45:36 +00:00
#define BIO_REF_PUT(output,mref) \
({ \
if (_mref_put(mref)) { \
_bio_ref_put(output, mref); \
} \
})
static
void bio_ref_put(struct bio_output *output, struct mref_object *mref)
{
BIO_REF_PUT(output, mref);
}
2011-05-26 14:32:32 +00:00
static
2012-02-25 21:22:52 +00:00
void _bio_ref_io(struct bio_output *output, struct mref_object *mref, bool cork)
2011-03-18 13:15:40 +00:00
{
2011-04-29 09:36:10 +00:00
struct bio_brick *brick = output->brick;
struct bio_mref_aspect *mref_a = bio_mref_get_aspect(output->brick, mref);
2011-03-18 13:15:40 +00:00
struct bio *bio;
unsigned long long latency;
unsigned long flags;
2011-03-18 13:15:40 +00:00
int rw;
int status = -EINVAL;
CHECK_PTR(mref_a, err);
bio = mref_a->bio;
CHECK_PTR(bio, err);
_mref_get(mref);
2012-02-25 21:22:52 +00:00
atomic_inc(&brick->fly_count[PRIO_INDEX(mref)]);
2011-03-18 13:15:40 +00:00
bio_get(bio);
rw = (mref->ref_flags & MREF_WRITE) ? WRITE : READ;
if (cork) {
// adapt to different kernel versions (TBD: improve)
#ifdef REQ_IDLE
rw |= REQ_IDLE;
#else /* sorry this went clumsy over time, adaptation to _any_ kernel is a hell */
} else {
#if defined(BIO_RW_RQ_MASK) || defined(BIO_FLUSH)
2011-05-06 10:25:52 +00:00
rw |= (1 << BIO_RW_NOIDLE);
#elif defined(REQ_NOIDLE)
rw |= REQ_NOIDLE;
#else
#warning Cannot control the NOIDLE flag
#endif
2012-09-18 06:13:33 +00:00
#endif
}
2019-03-25 08:00:45 +00:00
if (!(mref->ref_flags & MREF_SKIP_SYNC)) {
2011-05-06 10:25:52 +00:00
if (brick->do_sync) {
#if defined(BIO_RW_RQ_MASK) || defined(BIO_FLUSH)
2011-05-06 10:25:52 +00:00
rw |= (1 << BIO_RW_SYNCIO);
#elif defined(REQ_SYNC)
rw |= REQ_SYNC;
#else
#warning Cannot control the SYNC flag
2012-09-18 06:13:33 +00:00
#endif
}
#if defined(BIO_RW_RQ_MASK) || defined(BIO_FLUSH)
2012-02-25 21:22:52 +00:00
if (brick->do_unplug && !cork) {
2011-05-06 10:25:52 +00:00
rw |= (1 << BIO_RW_UNPLUG);
}
#else
// there is no substitute, but the above NOIDLE should do the job (CHECK!)
2012-09-18 06:13:33 +00:00
#endif
2011-05-06 10:25:52 +00:00
}
2012-02-25 21:22:52 +00:00
MARS_IO("starting IO rw = %d prio 0 %d fly = %d\n", rw, mref->ref_prio, atomic_read(&brick->fly_count[PRIO_INDEX(mref)]));
2011-03-27 15:18:38 +00:00
mars_trace(mref, "bio_submit");
2011-03-18 13:15:40 +00:00
mref_a->start_stamp = cpu_clock(raw_smp_processor_id());
spin_lock_irqsave(&brick->lock, flags);
list_add_tail(&mref_a->io_head, &brick->submitted_list[rw & 1]);
spin_unlock_irqrestore(&brick->lock, flags);
2011-03-18 13:15:40 +00:00
#ifdef FAKE_IO
bio->bi_end_io(bio, 0);
#else
2019-08-06 06:53:28 +00:00
#ifdef MARS_HAS_NEW_BIO_OP
2019-02-19 14:05:35 +00:00
#ifdef REQ_SYNC
/* Unsure: REQ_SYNC had been defined since around 2010,
* but has not been used accidentally.
* Thus it is now a _potential_ bug fix.
* Theoretically, I could wipe out the old code.
* However, I know of certain Frankenstein kernels
* which don't conform to anything. And I am not their
* maintainer. Let the old code in place for now.
*/
if (rw & 1) {
bio_set_op_attrs(bio, REQ_OP_WRITE,
mref->ref_skip_sync ? 0 : REQ_SYNC);
} else {
bio_set_op_attrs(bio, REQ_OP_READ,
mref->ref_skip_sync ? 0 : REQ_META);
}
#else
if (rw & 1) {
bio_set_op_attrs(bio, REQ_OP_WRITE,
2019-03-25 08:00:45 +00:00
mref->ref_flags & MREF_SKIP_SYNC ? 0 : WRITE_SYNC);
} else {
bio_set_op_attrs(bio, REQ_OP_READ,
2019-03-25 08:00:45 +00:00
mref->ref_flags & MREF_SKIP_SYNC ? 0 : READ_SYNC);
}
2019-02-19 14:05:35 +00:00
#endif
latency = TIME_STATS(
&timings[rw & 1],
submit_bio(bio)
);
2011-03-18 13:15:40 +00:00
#else
2011-05-06 10:25:52 +00:00
bio->bi_rw = rw;
latency = TIME_STATS(
&timings[rw & 1],
submit_bio(rw, bio)
);
#endif
2011-03-18 13:15:40 +00:00
#endif
threshold_check(&bio_submit_threshold, latency);
2011-03-18 13:15:40 +00:00
status = 0;
#ifdef BIO_EOPNOTSUPP /* missing since b25de9d6da49b1a8760a89672283128aa8c78345 */
2011-03-18 13:15:40 +00:00
if (unlikely(bio_flagged(bio, BIO_EOPNOTSUPP)))
status = -EOPNOTSUPP;
#endif
2011-03-18 13:15:40 +00:00
MARS_IO("submitted\n");
if (likely(status >= 0))
goto done;
bio_put(bio);
2012-02-25 21:22:52 +00:00
atomic_dec(&brick->fly_count[PRIO_INDEX(mref)]);
2011-05-26 14:32:32 +00:00
2011-03-18 13:15:40 +00:00
err:
MARS_ERR("IO error %d\n", status);
CHECKED_CALLBACK(mref, status, done);
atomic_dec(&mars_global_io_flying);
2011-04-29 09:36:10 +00:00
done: ;
2011-03-18 13:15:40 +00:00
}
2011-05-26 14:32:32 +00:00
static
void bio_ref_io(struct bio_output *output, struct mref_object *mref)
{
2014-04-17 07:01:46 +00:00
CHECK_PTR(mref, fatal);
_mref_get(mref);
atomic_inc(&mars_global_io_flying);
2014-04-17 07:01:46 +00:00
if (mref->ref_prio == MARS_PRIO_LOW ||
(mref->ref_prio == MARS_PRIO_NORMAL &&
(mref->ref_flags & MREF_WRITE))) {
struct bio_mref_aspect *mref_a = bio_mref_get_aspect(output->brick, mref);
2011-05-26 14:32:32 +00:00
struct bio_brick *brick = output->brick;
unsigned long flags;
2011-05-26 14:32:32 +00:00
spin_lock_irqsave(&brick->lock, flags);
list_add_tail(&mref_a->io_head, &brick->queue_list[PRIO_INDEX(mref)]);
atomic_inc(&brick->queue_count[PRIO_INDEX(mref)]);
2011-05-26 14:32:32 +00:00
spin_unlock_irqrestore(&brick->lock, flags);
brick->submitted = true;
wake_up_interruptible(&brick->submit_event);
2011-05-26 14:32:32 +00:00
return;
}
2014-04-17 07:01:46 +00:00
// realtime IO: start immediately
2012-02-25 21:22:52 +00:00
_bio_ref_io(output, mref, false);
2014-04-17 07:01:46 +00:00
BIO_REF_PUT(output, mref);
return;
fatal:
MARS_FAT("cannot handle mref %p on output %p\n", mref, output);
2012-02-25 21:22:52 +00:00
}
static
int bio_response_thread(void *data)
2011-03-18 13:15:40 +00:00
{
2017-02-25 08:43:55 +00:00
struct bio_response *rsp = data;
struct bio_brick *brick = rsp->brick;
#ifdef IO_DEBUGGING
int round = 0;
#endif
2011-03-18 13:15:40 +00:00
MARS_INF("bio response thread has started on '%s'.\n", brick->brick_path);
2011-03-18 13:15:40 +00:00
for (;;) {
LIST_HEAD(tmp_list);
unsigned long flags;
int thr_limit;
int sleeptime;
int count;
int i;
thr_limit = bio_io_threshold[0].thr_limit;
if (bio_io_threshold[1].thr_limit < thr_limit)
thr_limit = bio_io_threshold[1].thr_limit;
sleeptime = HZ / 10;
if (thr_limit > 0) {
sleeptime = thr_limit / (1000000 * 2 / HZ);
if (unlikely(sleeptime < 2))
sleeptime = 2;
}
2011-03-18 13:15:40 +00:00
#ifdef IO_DEBUGGING
round++;
MARS_IO("%d sleeping %d...\n", round, sleeptime);
#endif
2011-05-26 14:32:32 +00:00
wait_event_interruptible_timeout(
2017-02-25 08:43:55 +00:00
rsp->response_event,
atomic_read(&brick->completed_count) > 0 ||
(brick_thread_should_stop() &&
atomic_read(&brick->fly_count[0]) +
atomic_read(&brick->fly_count[1]) +
atomic_read(&brick->fly_count[2]) <= 0),
sleeptime);
2012-02-25 21:22:52 +00:00
MARS_IO("%d woken up, completed_count = %d fly_count[0] = %d fly_count[1] = %d fly_count[2] = %d\n",
2012-02-25 21:22:52 +00:00
round,
atomic_read(&brick->completed_count),
atomic_read(&brick->fly_count[0]),
atomic_read(&brick->fly_count[1]),
atomic_read(&brick->fly_count[2]));
2011-03-18 13:15:40 +00:00
2016-08-08 11:47:25 +00:00
#ifdef CONFIG_MARS_DEBUG
if (mars_hang_mode & 2) {
brick_msleep(100);
continue;
}
#endif
2011-03-18 13:15:40 +00:00
spin_lock_irqsave(&brick->lock, flags);
2017-02-25 08:43:55 +00:00
list_replace_init(&rsp->completed_list, &tmp_list);
2011-03-18 13:15:40 +00:00
spin_unlock_irqrestore(&brick->lock, flags);
count = 0;
2011-03-18 13:15:40 +00:00
for (;;) {
struct list_head *tmp;
struct bio_mref_aspect *mref_a;
struct mref_object *mref;
unsigned long long latency;
int rw;
2011-03-18 13:15:40 +00:00
int code;
if (list_empty(&tmp_list)) {
2014-11-11 17:10:28 +00:00
if (brick_thread_should_stop() &&
atomic_read(&brick->fly_count[0]) +
atomic_read(&brick->fly_count[1]) +
atomic_read(&brick->fly_count[2]) <= 0)
2011-03-18 13:15:40 +00:00
goto done;
break;
}
tmp = tmp_list.next;
list_del_init(tmp);
2011-04-29 09:36:10 +00:00
atomic_dec(&brick->completed_count);
2011-03-18 13:15:40 +00:00
mref_a = container_of(tmp, struct bio_mref_aspect, io_head);
mref = mref_a->object;
rw = mref->ref_flags & MREF_WRITE ? 1 : 0;
latency = cpu_clock(raw_smp_processor_id()) - mref_a->start_stamp;
threshold_check(&bio_io_threshold[rw], latency);
2011-03-18 13:15:40 +00:00
code = mref_a->status_code;
#ifdef IO_DEBUGGING
round++;
MARS_IO("%d completed , status = %d\n", round, code);
#endif
2011-03-18 13:15:40 +00:00
2011-03-27 15:18:38 +00:00
mars_trace(mref, "bio_endio");
2011-03-18 13:15:40 +00:00
if (code < 0) {
MARS_ERR("IO error %d\n", code);
} else {
2012-08-01 10:09:49 +00:00
mref_checksum(mref);
2011-03-18 13:15:40 +00:00
mref->ref_flags |= MREF_UPTODATE;
}
SIMPLE_CALLBACK(mref, code);
MARS_IO("%d callback done.\n", round);
2011-03-18 13:15:40 +00:00
2012-02-25 21:22:52 +00:00
atomic_dec(&brick->fly_count[PRIO_INDEX(mref)]);
#ifdef MARS_BIO_DEBUG
2012-02-25 21:22:52 +00:00
atomic_inc(&brick->total_completed_count[PRIO_INDEX(mref)]);
#endif
count++;
2012-02-25 21:22:52 +00:00
MARS_IO("%d completed_count = %d fly_count = %d\n", round, atomic_read(&brick->completed_count), atomic_read(&brick->fly_count[PRIO_INDEX(mref)]));
2012-02-25 21:22:52 +00:00
2011-05-26 14:32:32 +00:00
if (likely(mref_a->bio)) {
bio_put(mref_a->bio);
}
2013-04-15 12:45:36 +00:00
BIO_REF_PUT(mref_a->output, mref);
atomic_dec(&mars_global_io_flying);
2011-03-18 13:15:40 +00:00
}
/* Try to detect slow requests as early as possible,
* even before they have completed.
*/
for (i = 0; i < 2; i++) {
unsigned long long eldest = 0;
spin_lock_irqsave(&brick->lock, flags);
if (!list_empty(&brick->submitted_list[i])) {
struct bio_mref_aspect *mref_a;
mref_a = container_of(brick->submitted_list[i].next, struct bio_mref_aspect, io_head);
eldest = mref_a->start_stamp;
}
spin_unlock_irqrestore(&brick->lock, flags);
if (eldest) {
threshold_check(&bio_io_threshold[i], cpu_clock(raw_smp_processor_id()) - eldest);
}
}
if (count) {
brick->submitted = true;
wake_up_interruptible(&brick->submit_event);
}
}
done:
MARS_INF("bio response thread has stopped.\n");
return 0;
}
2011-05-26 14:32:32 +00:00
static
bool _bg_should_run(struct bio_brick *brick)
{
return (atomic_read(&brick->queue_count[2]) > 0 &&
atomic_read(&brick->fly_count[0]) + atomic_read(&brick->fly_count[1]) <= brick->bg_threshold &&
(brick->bg_maxfly <= 0 || atomic_read(&brick->fly_count[2]) < brick->bg_maxfly));
}
static
int bio_submit_thread(void *data)
{
struct bio_brick *brick = data;
#ifdef IO_DEBUGGING
int round = 0;
#endif
2011-05-26 14:32:32 +00:00
MARS_INF("bio submit thread has started on '%s'.\n", brick->brick_path);
while (!brick_thread_should_stop()) {
int prio;
#ifdef IO_DEBUGGING
round++;
MARS_IO("%d sleeping...\n", round);
#endif
wait_event_interruptible_timeout(
brick->submit_event,
2017-01-19 08:06:31 +00:00
brick->submitted || brick_thread_should_stop(),
HZ / 2);
brick->submitted = false;
MARS_IO("%d woken up, completed_count = %d fly_count[0] = %d fly_count[1] = %d fly_count[2] = %d\n",
round,
atomic_read(&brick->completed_count),
atomic_read(&brick->fly_count[0]),
atomic_read(&brick->fly_count[1]),
atomic_read(&brick->fly_count[2]));
for (prio = 0; prio < MARS_PRIO_NR; prio++) {
LIST_HEAD(tmp_list);
unsigned long flags;
if (prio == MARS_PRIO_NR-1 && !_bg_should_run(brick)) {
break;
}
MARS_IO("%d pushing prio %d to foreground, completed_count = %d\n", round, prio, atomic_read(&brick->completed_count));
2011-05-26 14:32:32 +00:00
spin_lock_irqsave(&brick->lock, flags);
list_replace_init(&brick->queue_list[prio], &tmp_list);
2011-05-26 14:32:32 +00:00
spin_unlock_irqrestore(&brick->lock, flags);
while (!list_empty(&tmp_list)) {
struct list_head *tmp = tmp_list.next;
struct bio_mref_aspect *mref_a;
struct mref_object *mref;
bool cork;
list_del_init(tmp);
mref_a = container_of(tmp, struct bio_mref_aspect, io_head);
mref = mref_a->object;
if (unlikely(!mref)) {
MARS_ERR("invalid mref\n");
continue;
}
atomic_dec(&brick->queue_count[PRIO_INDEX(mref)]);
cork = atomic_read(&brick->queue_count[PRIO_INDEX(mref)]) > 0;
_bio_ref_io(mref_a->output, mref, cork);
2013-04-15 12:45:36 +00:00
BIO_REF_PUT(mref_a->output, mref);
2011-05-26 14:32:32 +00:00
}
}
2011-03-18 13:15:40 +00:00
}
MARS_INF("bio submit thread has stopped.\n");
2011-03-18 13:15:40 +00:00
return 0;
}
static int bio_switch(struct bio_brick *brick)
{
int status = 0;
2017-02-25 08:43:55 +00:00
int i;
2011-03-18 13:15:40 +00:00
if (brick->power.button) {
if (brick->power.led_on)
goto done;
2011-03-18 13:15:40 +00:00
mars_power_led_off((void*)brick, false);
if (!brick->bdev) {
static int index = 0;
const char *path = brick->brick_path;
2013-01-10 11:09:23 +00:00
int flags = O_RDWR | O_EXCL | O_LARGEFILE;
struct address_space *mapping;
2011-03-18 13:15:40 +00:00
struct inode *inode;
struct request_queue *q;
#ifdef MARS_HAS_BDI_GET
struct backing_dev_info *bdi;
#endif
2011-03-18 13:15:40 +00:00
2020-09-30 08:34:14 +00:00
brick->error = 0;
brick->mf = mapfree_get(path, flags, &brick->error);
2013-01-23 11:30:28 +00:00
if (unlikely(!brick->mf)) {
2020-09-30 08:34:14 +00:00
status = brick->error;
if (!status)
status = -ENOENT;
MARS_ERR("cannot open file '%s', error=%d\n",
path, brick->error);
2011-03-18 13:15:40 +00:00
goto done;
}
mapfree_pages(brick->mf, -1);
2013-01-23 11:30:28 +00:00
if (unlikely(!(mapping = brick->mf->mf_filp->f_mapping) ||
!(inode = mapping->host))) {
2013-01-10 11:09:23 +00:00
MARS_ERR("internal problem with '%s'\n", path);
status = -EINVAL;
goto done;
}
if (unlikely(!S_ISBLK(inode->i_mode) || !inode->i_bdev)) {
2011-03-18 13:15:40 +00:00
MARS_ERR("sorry, '%s' is not a block device\n", path);
2013-01-10 11:09:23 +00:00
status = -ENODEV;
2011-03-18 13:15:40 +00:00
goto done;
}
mapping_set_gfp_mask(mapping, mapping_gfp_mask(mapping) & ~(__GFP_IO | __GFP_FS));
2011-03-18 13:15:40 +00:00
q = bdev_get_queue(inode->i_bdev);
2013-01-10 11:09:23 +00:00
if (unlikely(!q)) {
MARS_ERR("internal queue '%s' does not exist\n", path);
status = -EINVAL;
goto done;
}
2011-03-18 13:15:40 +00:00
#ifdef MARS_HAS_BDI_GET
bdi = I_BDEV(inode)->bd_bdi;
MARS_INF("'%s' ra_pages OLD=%lu NEW=%d\n", path,
bdi->ra_pages, brick->ra_pages);
bdi->ra_pages = brick->ra_pages;
#else
2013-01-10 11:09:23 +00:00
MARS_INF("'%s' ra_pages OLD=%lu NEW=%d\n", path, q->backing_dev_info.ra_pages, brick->ra_pages);
2011-03-18 13:15:40 +00:00
q->backing_dev_info.ra_pages = brick->ra_pages;
#endif
2013-01-10 11:09:23 +00:00
2017-04-07 06:18:29 +00:00
q->nr_requests = bio_nr_requests;
2011-03-18 13:15:40 +00:00
brick->bvec_max = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
if (brick->bvec_max > BIO_MAX_PAGES)
brick->bvec_max = BIO_MAX_PAGES;
else if (brick->bvec_max <= 1)
brick->bvec_max = 1;
2013-09-25 13:16:31 +00:00
brick->total_size = i_size_read(inode);
MARS_INF("'%s' size=%lld bvec_max=%d\n",
path, brick->total_size, brick->bvec_max);
2017-02-25 08:43:55 +00:00
for (i = 0; i < BIO_RESPONSE_THREADS; i++) {
brick->rsp[i].response_thread =
brick_thread_create(bio_response_thread,
&brick->rsp[i],
"mars_bio_r%d",
index);
}
brick->submit_thread = brick_thread_create(bio_submit_thread, brick, "mars_bio_s%d", index);
2013-01-10 11:09:23 +00:00
status = -ENOMEM;
2017-02-25 08:43:55 +00:00
if (likely(brick->submit_thread)) {
2011-03-18 13:15:40 +00:00
brick->bdev = inode->i_bdev;
brick->mode_ptr = &brick->mf->mf_mode;
2013-01-10 11:09:23 +00:00
index++;
2011-03-18 13:15:40 +00:00
status = 0;
}
}
2013-01-10 11:09:23 +00:00
}
mars_power_led_on((void*)brick, brick->power.button && brick->bdev != NULL);
done:
if (status < 0 || !brick->power.button) {
if (brick->submit_thread) {
brick_thread_stop(brick->submit_thread);
brick->submit_thread = NULL;
}
2017-02-25 08:43:55 +00:00
for (i = 0; i < BIO_RESPONSE_THREADS; i++) {
if (brick->rsp[i].response_thread) {
brick_thread_stop(brick->rsp[i].response_thread);
brick->rsp[i].response_thread = NULL;
}
2013-01-10 11:09:23 +00:00
}
if (brick->mf) {
mapfree_put(brick->mf);
brick->mf = NULL;
}
brick->mode_ptr = NULL;
2011-03-18 13:15:40 +00:00
brick->bdev = NULL;
2013-01-10 11:09:23 +00:00
if (!brick->power.button) {
mars_power_led_off((void*)brick, true);
brick->total_size = 0;
}
2011-03-18 13:15:40 +00:00
}
return status;
}
2011-04-29 09:36:10 +00:00
//////////////// informational / statistics ///////////////
static noinline
char *bio_statistics(struct bio_brick *brick, int verbose)
{
2012-10-12 12:17:34 +00:00
char *res = brick_string_alloc(4096);
int pos = 0;
2011-04-29 09:36:10 +00:00
if (!res)
return NULL;
2012-10-12 12:17:34 +00:00
pos += report_timing(&timings[0], res + pos, 4096 - pos);
pos += report_timing(&timings[1], res + pos, 4096 - pos);
2011-04-29 09:36:10 +00:00
2012-10-12 12:17:34 +00:00
snprintf(res + pos, 4096 - pos,
#ifdef MARS_BIO_DEBUG
2012-02-25 21:22:52 +00:00
"total "
"completed[0] = %d "
"completed[1] = %d "
"completed[2] = %d | "
#endif
"queued[0] = %d "
"queued[1] = %d "
"queued[2] = %d "
2012-02-25 21:22:52 +00:00
"flying[0] = %d "
"flying[1] = %d "
"flying[2] = %d "
"completing = %d\n",
#ifdef MARS_BIO_DEBUG
2012-02-25 21:22:52 +00:00
atomic_read(&brick->total_completed_count[0]),
atomic_read(&brick->total_completed_count[1]),
atomic_read(&brick->total_completed_count[2]),
#endif
2012-02-25 21:22:52 +00:00
atomic_read(&brick->fly_count[0]),
atomic_read(&brick->queue_count[0]),
atomic_read(&brick->queue_count[1]),
atomic_read(&brick->queue_count[2]),
2012-02-25 21:22:52 +00:00
atomic_read(&brick->fly_count[1]),
atomic_read(&brick->fly_count[2]),
atomic_read(&brick->completed_count));
2011-04-29 09:36:10 +00:00
return res;
}
static noinline
void bio_reset_statistics(struct bio_brick *brick)
{
#ifdef MARS_BIO_DEBUG
2012-02-25 21:22:52 +00:00
atomic_set(&brick->total_completed_count[0], 0);
atomic_set(&brick->total_completed_count[1], 0);
atomic_set(&brick->total_completed_count[2], 0);
#endif
2011-04-29 09:36:10 +00:00
}
2011-03-18 13:15:40 +00:00
//////////////// object / aspect constructors / destructors ///////////////
static int bio_mref_aspect_init_fn(struct generic_aspect *_ini)
2011-03-18 13:15:40 +00:00
{
struct bio_mref_aspect *ini = (void*)_ini;
INIT_LIST_HEAD(&ini->io_head);
return 0;
}
static void bio_mref_aspect_exit_fn(struct generic_aspect *_ini)
2011-03-18 13:15:40 +00:00
{
struct bio_mref_aspect *ini = (void*)_ini;
(void)ini;
}
MARS_MAKE_STATICS(bio);
////////////////////// brick constructors / destructors ////////////////////
static int bio_brick_construct(struct bio_brick *brick)
{
2017-02-25 08:43:55 +00:00
int i;
2011-03-18 13:15:40 +00:00
spin_lock_init(&brick->lock);
INIT_LIST_HEAD(&brick->queue_list[0]);
INIT_LIST_HEAD(&brick->queue_list[1]);
INIT_LIST_HEAD(&brick->queue_list[2]);
INIT_LIST_HEAD(&brick->submitted_list[0]);
INIT_LIST_HEAD(&brick->submitted_list[1]);
init_waitqueue_head(&brick->submit_event);
2017-02-25 08:43:55 +00:00
for (i = 0; i < BIO_RESPONSE_THREADS; i++) {
INIT_LIST_HEAD(&brick->rsp[i].completed_list);
init_waitqueue_head(&brick->rsp[i].response_event);
brick->rsp[i].brick = brick;
}
2011-03-18 13:15:40 +00:00
return 0;
}
static int bio_brick_destruct(struct bio_brick *brick)
{
2017-04-05 14:00:18 +00:00
int i;
CHECK_HEAD_EMPTY(&brick->queue_list[0]);
CHECK_HEAD_EMPTY(&brick->queue_list[1]);
CHECK_HEAD_EMPTY(&brick->queue_list[2]);
CHECK_HEAD_EMPTY(&brick->submitted_list[0]);
CHECK_HEAD_EMPTY(&brick->submitted_list[1]);
for (i = 0; i < BIO_RESPONSE_THREADS; i++) {
CHECK_HEAD_EMPTY(&brick->rsp[i].completed_list);
}
2011-03-18 13:15:40 +00:00
return 0;
}
static int bio_output_construct(struct bio_output *output)
{
return 0;
}
static int bio_output_destruct(struct bio_output *output)
{
return 0;
}
///////////////////////// static structs ////////////////////////
static struct bio_brick_ops bio_brick_ops = {
.brick_switch = bio_switch,
2011-04-29 09:36:10 +00:00
.brick_statistics = bio_statistics,
.reset_statistics = bio_reset_statistics,
2011-03-18 13:15:40 +00:00
};
static struct bio_output_ops bio_output_ops = {
.mars_get_info = bio_get_info,
.mref_get = bio_ref_get,
.mref_put = bio_ref_put,
.mref_io = bio_ref_io,
};
const struct bio_input_type bio_input_type = {
.type_name = "bio_input",
.input_size = sizeof(struct bio_input),
};
static const struct bio_input_type *bio_input_types[] = {
&bio_input_type,
};
const struct bio_output_type bio_output_type = {
.type_name = "bio_output",
.output_size = sizeof(struct bio_output),
.master_ops = &bio_output_ops,
.output_construct = &bio_output_construct,
.output_destruct = &bio_output_destruct,
};
static const struct bio_output_type *bio_output_types[] = {
&bio_output_type,
};
const struct bio_brick_type bio_brick_type = {
.type_name = "bio_brick",
.brick_size = sizeof(struct bio_brick),
.max_inputs = 0,
.max_outputs = 1,
.master_ops = &bio_brick_ops,
.aspect_types = bio_aspect_types,
2011-03-18 13:15:40 +00:00
.default_input_types = bio_input_types,
.default_output_types = bio_output_types,
.brick_construct = &bio_brick_construct,
.brick_destruct = &bio_brick_destruct,
};
EXPORT_SYMBOL_GPL(bio_brick_type);
////////////////// module init stuff /////////////////////////
2011-08-25 10:16:32 +00:00
int __init init_mars_bio(void)
2011-03-18 13:15:40 +00:00
{
MARS_INF("init_bio()\n");
_bio_brick_type = (void*)&bio_brick_type;
return bio_register_brick_type();
}
2014-04-23 11:16:26 +00:00
void exit_mars_bio(void)
2011-03-18 13:15:40 +00:00
{
MARS_INF("exit_bio()\n");
bio_unregister_brick_type();
}