mars/mars_usebuf.c

428 lines
11 KiB
C
Raw Normal View History

2010-07-30 05:46:22 +00:00
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
/* Usebuf brick.
2010-08-04 22:38:48 +00:00
* translates from unbuffered IO to buffered IO (mars_{get,put}_buf)
2010-07-30 05:46:22 +00:00
*/
//#define BRICK_DEBUGGING
//#define MARS_DEBUGGING
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bio.h>
#include "mars.h"
///////////////////////// own type definitions ////////////////////////
#include "mars_usebuf.h"
///////////////////////// own helper functions ////////////////////////
/* currently we have copy semantics :(
*/
2010-08-05 15:54:48 +00:00
static void _usebuf_copy(struct usebuf_mars_ref_aspect *mref_a, int rw)
2010-07-30 05:46:22 +00:00
{
2010-08-05 15:54:48 +00:00
void *ref_data = mref_a->object->ref_data;
void *bio_base = kmap_atomic(mref_a->bvec->bv_page, KM_USER0);
void *bio_data = bio_base + mref_a->bvec_offset;
int len = mref_a->bvec_len;
2010-07-30 05:46:22 +00:00
if (rw == READ) {
2010-08-05 15:54:48 +00:00
memcpy(bio_data, ref_data, len);
2010-07-30 05:46:22 +00:00
} else {
2010-08-05 15:54:48 +00:00
memcpy(ref_data, bio_data, len);
2010-07-30 05:46:22 +00:00
}
2010-08-03 16:03:32 +00:00
2010-07-30 05:46:22 +00:00
kunmap_atomic(bio_base, KM_USER0);
}
2010-08-06 07:38:18 +00:00
static void usebuf_ref_put(struct usebuf_output *output, struct mars_ref_object *origmref);
2010-08-05 15:54:48 +00:00
static void _usebuf_origmref_endio(struct usebuf_output *output, struct mars_ref_object *origmref)
2010-07-30 05:46:22 +00:00
{
2010-08-05 15:54:48 +00:00
struct usebuf_mars_ref_aspect *origmref_a;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb;
2010-07-30 05:46:22 +00:00
2010-08-05 15:54:48 +00:00
origmref_a = usebuf_mars_ref_get_aspect(output, origmref);
if (unlikely(!origmref_a)) {
MARS_FAT("cannot get origmref_a from origmref %p\n", origmref);
2010-07-30 05:46:22 +00:00
goto out;
}
2010-08-06 07:38:18 +00:00
MARS_DBG("origmref=%p subref_count=%d error=%d\n", origmref, atomic_read(&origmref_a->subref_count), origmref->cb_error);
2010-07-30 05:46:22 +00:00
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&origmref_a->subref_count, 1);
2010-08-06 07:38:18 +00:00
if (!atomic_dec_and_test(&origmref_a->subref_count)) {
2010-07-30 05:46:22 +00:00
goto out;
}
2010-08-08 14:02:54 +00:00
cb = origmref->ref_cb;
MARS_DBG("DONE error=%d\n", cb->cb_error);
cb->cb_fn(cb);
2010-08-06 07:38:18 +00:00
usebuf_ref_put(output, origmref);
2010-07-30 05:46:22 +00:00
out:
2010-08-04 17:32:04 +00:00
return;
2010-07-30 05:46:22 +00:00
}
2010-08-08 14:02:54 +00:00
static void _usebuf_mref_endio(struct generic_callback *cb)
2010-07-30 05:46:22 +00:00
{
2010-08-05 15:54:48 +00:00
struct usebuf_mars_ref_aspect *mref_a;
2010-08-08 14:02:54 +00:00
struct mars_ref_object *mref;
struct usebuf_output *output;
2010-08-05 15:54:48 +00:00
struct mars_ref_object *origmref;
struct usebuf_mars_ref_aspect *origmref_a;
2010-08-04 17:32:04 +00:00
int status;
2010-07-30 05:46:22 +00:00
2010-08-08 14:02:54 +00:00
mref_a = cb->cb_private;
2010-08-05 15:54:48 +00:00
if (unlikely(!mref_a)) {
2010-08-04 17:32:04 +00:00
MARS_FAT("cannot get aspect\n");
goto out_fatal;
2010-07-30 05:46:22 +00:00
}
2010-08-08 14:02:54 +00:00
mref = mref_a->object;
if (unlikely(!mref)) {
MARS_FAT("cannot get mref\n");
goto out_fatal;
}
output = mref_a->output;
if (unlikely(!output)) {
MARS_FAT("bad argument output\n");
goto out_fatal;
}
2010-08-05 15:54:48 +00:00
origmref = mref_a->origmref;
if (unlikely(!origmref)) {
MARS_FAT("cannot get origmref\n");
2010-08-04 17:32:04 +00:00
goto out_fatal;
2010-08-02 16:31:10 +00:00
}
2010-08-05 15:54:48 +00:00
MARS_DBG("origmref=%p\n", origmref);
2010-08-04 17:32:04 +00:00
status = -EINVAL;
2010-08-05 15:54:48 +00:00
origmref_a = usebuf_mars_ref_get_aspect(output, origmref);
if (unlikely(!origmref_a)) {
MARS_ERR("cannot get origmref_a\n");
2010-08-04 17:32:04 +00:00
goto out_err;
2010-07-30 05:46:22 +00:00
}
2010-08-02 16:31:10 +00:00
// check if we have an initial read => now start the final write
2010-08-08 14:02:54 +00:00
if (mref->ref_may_write != READ && mref->ref_rw == READ && cb->cb_error >= 0) {
2010-08-04 17:32:04 +00:00
struct usebuf_input *input = output->brick->inputs[0];
2010-08-02 16:31:10 +00:00
status = -EIO;
2010-08-05 15:54:48 +00:00
if (unlikely(!(mref->ref_flags & MARS_REF_UPTODATE))) {
2010-08-02 16:31:10 +00:00
MARS_ERR("not UPTODATE after initial read\n");
2010-08-04 17:32:04 +00:00
goto out_err;
2010-08-02 16:31:10 +00:00
}
2010-08-05 15:54:48 +00:00
_usebuf_copy(mref_a, WRITE);
2010-08-02 16:31:10 +00:00
// grab extra reference
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&origmref_a->subref_count, 1);
2010-08-06 07:38:18 +00:00
atomic_inc(&origmref_a->subref_count);
2010-08-02 16:31:10 +00:00
2010-08-05 15:54:48 +00:00
GENERIC_INPUT_CALL(input, mars_ref_io, mref, WRITE);
2010-08-02 16:31:10 +00:00
} else {
// finalize the read or final write
2010-08-08 14:02:54 +00:00
if (likely(!cb->cb_error)) {
2010-08-05 15:54:48 +00:00
struct bio *bio = origmref->orig_bio;
2010-08-02 16:31:10 +00:00
int direction;
2010-08-04 17:32:04 +00:00
status = -EINVAL;
2010-08-02 16:31:10 +00:00
if (unlikely(!bio)) {
2010-08-05 15:54:48 +00:00
MARS_ERR("bad bio setup on origmref %p", origmref);
2010-08-04 17:32:04 +00:00
goto out_err;
2010-08-02 16:31:10 +00:00
}
direction = bio->bi_rw & 1;
if (direction == READ) {
2010-08-05 15:54:48 +00:00
_usebuf_copy(mref_a, READ);
2010-08-02 16:31:10 +00:00
}
2010-07-30 05:46:22 +00:00
}
}
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&origmref_a->subref_count, 1);
CHECK_ATOMIC(&mref->ref_count, 1);
2010-08-04 17:32:04 +00:00
2010-08-08 14:02:54 +00:00
status = cb->cb_error;
2010-08-02 16:31:10 +00:00
2010-08-03 16:03:32 +00:00
out_err:
2010-08-04 17:32:04 +00:00
if (status < 0) {
2010-08-08 14:02:54 +00:00
origmref->ref_cb->cb_error = status;
2010-08-04 17:32:04 +00:00
MARS_ERR("error %d\n", status);
}
2010-08-05 15:54:48 +00:00
_usebuf_origmref_endio(output, origmref);
2010-08-04 17:32:04 +00:00
out_fatal: // no chance to call callback; this will result in mem leak :(
;
2010-07-30 05:46:22 +00:00
}
////////////////// own brick / input / output operations //////////////////
2010-08-06 07:38:18 +00:00
static int usebuf_get_info(struct usebuf_output *output, struct mars_info *info)
{
struct usebuf_input *input = output->brick->inputs[0];
return GENERIC_INPUT_CALL(input, mars_get_info, info);
}
static int usebuf_ref_get(struct usebuf_output *output, struct mars_ref_object *origmref)
{
MARS_FAT("not callable!\n");
return -ENOSYS;
#if 0
struct usebuf_input *input = output->brick->inputs[0];
return GENERIC_INPUT_CALL(input, mars_ref_get, mref);
#endif
}
static void usebuf_ref_put(struct usebuf_output *output, struct mars_ref_object *origmref)
{
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&origmref->ref_count, 1);
2010-08-06 07:38:18 +00:00
if (!atomic_dec_and_test(&origmref->ref_count)) {
return;
}
usebuf_free_mars_ref(origmref);
#if 0 // NYI
struct usebuf_input *input = output->brick->inputs[0];
GENERIC_INPUT_CALL(input, mars_ref_put, mref);
#endif
}
2010-08-05 15:54:48 +00:00
static void usebuf_ref_io(struct usebuf_output *output, struct mars_ref_object *origmref, int rw)
2010-07-30 05:46:22 +00:00
{
struct usebuf_input *input = output->brick->inputs[0];
2010-08-05 15:54:48 +00:00
struct bio *bio = origmref->orig_bio;
2010-07-30 05:46:22 +00:00
struct bio_vec *bvec;
2010-08-05 15:54:48 +00:00
struct usebuf_mars_ref_aspect *origmref_a;
2010-07-30 05:46:22 +00:00
loff_t start_pos;
int start_len;
int status;
int i;
2010-08-05 15:54:48 +00:00
MARS_DBG("START origmref=%p\n", origmref);
2010-07-30 05:46:22 +00:00
status = -EINVAL;
if (unlikely(!bio)) {
MARS_ERR("cannot get bio\n");
goto done;
}
2010-08-05 15:54:48 +00:00
origmref_a = usebuf_mars_ref_get_aspect(output, origmref);
if (unlikely(!origmref_a)) {
MARS_ERR("cannot get origmref_a\n");
2010-07-30 05:46:22 +00:00
goto done;
}
2010-08-04 17:32:04 +00:00
2010-08-08 14:02:54 +00:00
origmref->ref_cb->cb_error = 0;
2010-08-06 07:38:18 +00:00
2010-07-30 05:46:22 +00:00
// initial refcount: prevent intermediate drops
2010-08-08 09:03:42 +00:00
_CHECK_ATOMIC(&origmref->ref_count, !=, 1);
2010-08-06 07:38:18 +00:00
atomic_inc(&origmref->ref_count);
2010-08-08 09:03:42 +00:00
_CHECK_ATOMIC(&origmref_a->subref_count, !=, 0);
2010-08-06 07:38:18 +00:00
atomic_set(&origmref_a->subref_count, 1);
2010-07-30 05:46:22 +00:00
start_pos = ((loff_t)bio->bi_sector) << 9; // TODO: make dynamic
start_len = bio->bi_size;
bio_for_each_segment(bvec, bio, i) {
int this_len = bvec->bv_len;
int my_offset = 0;
while (this_len > 0) {
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref;
struct usebuf_mars_ref_aspect *mref_a;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb;
2010-07-30 05:46:22 +00:00
int my_len;
2010-08-04 17:32:04 +00:00
int my_rw;
2010-08-02 16:31:10 +00:00
2010-08-05 15:54:48 +00:00
mref = usebuf_alloc_mars_ref(output, &output->ref_object_layout);
2010-08-02 16:31:10 +00:00
status = -ENOMEM;
2010-08-05 15:54:48 +00:00
if (unlikely(!mref)) {
2010-08-02 16:31:10 +00:00
MARS_ERR("cannot alloc buffer, status=%d\n", status);
goto done_drop;
}
2010-08-05 15:54:48 +00:00
mref->ref_pos = start_pos;
mref->ref_len = this_len;
mref->ref_may_write = rw;
2010-07-30 05:46:22 +00:00
2010-08-05 15:54:48 +00:00
status = GENERIC_INPUT_CALL(input, mars_ref_get, mref);
2010-08-02 16:31:10 +00:00
if (unlikely(status < 0)) {
2010-07-30 05:46:22 +00:00
MARS_ERR("cannot get buffer, status=%d\n", status);
goto done_drop;
}
my_len = status;
2010-08-05 15:54:48 +00:00
MARS_DBG("origmref=%p got mref=%p pos=%lld len=%d mode=%d flags=%d status=%d\n", origmref, mref, start_pos, this_len, mref->ref_may_write, mref->ref_flags, status);
2010-07-30 05:46:22 +00:00
status = -ENOMEM;
2010-08-05 15:54:48 +00:00
mref_a = usebuf_mars_ref_get_aspect(output, mref);
if (unlikely(!mref_a)) {
MARS_ERR("cannot get my own mref aspect\n");
2010-07-30 11:50:20 +00:00
goto put;
2010-07-30 05:46:22 +00:00
}
2010-08-05 15:54:48 +00:00
mref_a->origmref = origmref;
mref_a->bvec = bvec;
mref_a->bvec_offset = bvec->bv_offset + my_offset;
mref_a->bvec_len = my_len;
2010-07-30 05:46:22 +00:00
2010-08-08 14:02:54 +00:00
status = 0;
2010-08-05 15:54:48 +00:00
if ((mref->ref_flags & MARS_REF_UPTODATE) && rw == READ) {
2010-07-30 05:46:22 +00:00
// cache hit: immediately signal success
2010-08-05 15:54:48 +00:00
_usebuf_copy(mref_a, READ);
2010-07-30 05:46:22 +00:00
goto put;
}
2010-08-08 14:02:54 +00:00
cb = &mref_a->cb;
cb->cb_fn = _usebuf_mref_endio;
cb->cb_private = mref_a;
cb->cb_error = 0;
cb->cb_prev = NULL;
mref_a->output = output;
mref->ref_cb = cb;
2010-07-30 05:46:22 +00:00
2010-08-04 17:32:04 +00:00
my_rw = rw;
2010-08-02 16:31:10 +00:00
if (!(my_rw == READ)) {
2010-08-05 15:54:48 +00:00
if (mref->ref_flags & MARS_REF_UPTODATE) {
2010-08-02 16:31:10 +00:00
// buffer uptodate: start writing.
2010-08-05 15:54:48 +00:00
_usebuf_copy(mref_a, WRITE);
2010-08-02 16:31:10 +00:00
} else {
// first start initial read, to get the whole buffer UPTODATE
MARS_DBG("AHA\n");
my_rw = READ;
}
2010-07-30 05:46:22 +00:00
}
2010-08-06 07:38:18 +00:00
// grab reference for each sub-IO
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&origmref_a->subref_count, 1);
2010-08-06 07:38:18 +00:00
atomic_inc(&origmref_a->subref_count);
2010-08-05 15:54:48 +00:00
GENERIC_INPUT_CALL(input, mars_ref_io, mref, my_rw);
2010-07-30 05:46:22 +00:00
put:
2010-08-05 15:54:48 +00:00
GENERIC_INPUT_CALL(input, mars_ref_put, mref);
2010-07-30 05:46:22 +00:00
2010-07-30 11:50:20 +00:00
if (unlikely(status < 0))
2010-07-30 05:46:22 +00:00
break;
start_len -= my_len;
start_pos += my_len;
this_len -= my_len;
my_offset += my_len;
}
if (unlikely(this_len != 0)) {
2010-08-08 14:02:54 +00:00
MARS_ERR("bad internal length %d (status=%d)\n", this_len, status);
2010-07-30 05:46:22 +00:00
}
}
if (unlikely(start_len != 0 && !status)) {
2010-08-08 14:02:54 +00:00
MARS_ERR("length mismatch %d (status=%d)\n", start_len, status);
2010-07-30 05:46:22 +00:00
}
done_drop:
2010-08-06 07:38:18 +00:00
// drop initial refcount
2010-08-04 17:32:04 +00:00
if (status < 0)
2010-08-08 14:02:54 +00:00
origmref->ref_cb->cb_error = status;
2010-08-05 15:54:48 +00:00
_usebuf_origmref_endio(output, origmref);
2010-07-30 05:46:22 +00:00
done:
2010-08-02 16:31:10 +00:00
MARS_DBG("status=%d\n", status);
2010-07-30 05:46:22 +00:00
}
//////////////// object / aspect constructors / destructors ///////////////
2010-08-05 15:54:48 +00:00
static int usebuf_mars_ref_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
2010-07-30 05:46:22 +00:00
{
2010-08-05 15:54:48 +00:00
struct usebuf_mars_ref_aspect *ini = (void*)_ini;
ini->origmref = NULL;
2010-07-30 05:46:22 +00:00
ini->bvec = NULL;
return 0;
}
2010-08-08 09:03:42 +00:00
static void usebuf_mars_ref_aspect_exit_fn(struct generic_aspect *_ini, void *_init_data)
{
struct usebuf_mars_ref_aspect *ini = (void*)_ini;
(void)ini;
}
2010-07-30 05:46:22 +00:00
MARS_MAKE_STATICS(usebuf);
////////////////////// brick constructors / destructors ////////////////////
static int usebuf_brick_construct(struct usebuf_brick *brick)
{
return 0;
}
static int usebuf_output_construct(struct usebuf_output *output)
{
return 0;
}
///////////////////////// static structs ////////////////////////
static struct usebuf_brick_ops usebuf_brick_ops = {
};
static struct usebuf_output_ops usebuf_output_ops = {
.make_object_layout = usebuf_make_object_layout,
.mars_get_info = usebuf_get_info,
2010-08-05 15:54:48 +00:00
.mars_ref_get = usebuf_ref_get,
.mars_ref_put = usebuf_ref_put,
.mars_ref_io = usebuf_ref_io,
2010-07-30 05:46:22 +00:00
};
static const struct usebuf_input_type usebuf_input_type = {
.type_name = "usebuf_input",
.input_size = sizeof(struct usebuf_input),
};
static const struct usebuf_input_type *usebuf_input_types[] = {
&usebuf_input_type,
};
static const struct usebuf_output_type usebuf_output_type = {
.type_name = "usebuf_output",
.output_size = sizeof(struct usebuf_output),
.master_ops = &usebuf_output_ops,
.output_construct = &usebuf_output_construct,
.aspect_types = usebuf_aspect_types,
.layout_code = {
2010-08-05 15:54:48 +00:00
[BRICK_OBJ_MARS_REF] = LAYOUT_ALL,
2010-07-30 05:46:22 +00:00
}
};
static const struct usebuf_output_type *usebuf_output_types[] = {
&usebuf_output_type,
};
const struct usebuf_brick_type usebuf_brick_type = {
.type_name = "usebuf_brick",
.brick_size = sizeof(struct usebuf_brick),
.max_inputs = 1,
.max_outputs = 1,
.master_ops = &usebuf_brick_ops,
.default_input_types = usebuf_input_types,
.default_output_types = usebuf_output_types,
.brick_construct = &usebuf_brick_construct,
};
EXPORT_SYMBOL_GPL(usebuf_brick_type);
////////////////// module init stuff /////////////////////////
static int __init init_usebuf(void)
{
printk(MARS_INFO "init_usebuf()\n");
return usebuf_register_brick_type();
}
static void __exit exit_usebuf(void)
{
printk(MARS_INFO "exit_usebuf()\n");
usebuf_unregister_brick_type();
}
MODULE_DESCRIPTION("MARS usebuf brick");
MODULE_AUTHOR("Thomas Schoebel-Theuer <tst@1und1.de>");
MODULE_LICENSE("GPL");
module_init(init_usebuf);
module_exit(exit_usebuf);