mars/mars_device_sio.c

556 lines
13 KiB
C
Raw Normal View History

2010-06-15 18:31:06 +00:00
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
2010-08-04 17:32:04 +00:00
//#define BRICK_DEBUGGING
2010-07-30 05:46:22 +00:00
//#define MARS_DEBUGGING
2010-06-15 18:31:06 +00:00
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
2010-06-17 16:57:10 +00:00
#include <linux/list.h>
2010-06-16 13:21:30 +00:00
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
2010-06-17 16:57:10 +00:00
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
2010-06-18 11:44:14 +00:00
#include <linux/splice.h>
2010-06-15 18:31:06 +00:00
#include "mars.h"
///////////////////////// own type definitions ////////////////////////
#include "mars_device_sio.h"
////////////////// own brick / input / output operations //////////////////
2010-11-26 13:45:10 +00:00
static int device_sio_ref_get(struct device_sio_output *output, struct mars_ref_object *mref)
{
_CHECK_ATOMIC(&mref->ref_count, !=, 0);
/* Buffered IO is not implemented.
* Use an intermediate buf instance if you need it.
*/
if (!mref->ref_data)
return -ENOSYS;
atomic_inc(&mref->ref_count);
return 0;
}
static void device_sio_ref_put(struct device_sio_output *output, struct mars_ref_object *mref)
{
CHECK_ATOMIC(&mref->ref_count, 1);
if (!atomic_dec_and_test(&mref->ref_count))
return;
device_sio_free_mars_ref(mref);
}
2010-06-18 11:44:14 +00:00
// some code borrowed from the loopback driver
2010-06-17 16:57:10 +00:00
static int transfer_none(int cmd,
struct page *raw_page, unsigned raw_off,
2010-11-26 13:45:10 +00:00
//struct page *loop_page, unsigned loop_off,
void *loop_buf,
2010-06-17 16:57:10 +00:00
int size)
{
2010-11-26 13:45:10 +00:00
#if 1
void *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
//void *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
2010-06-17 16:57:10 +00:00
2010-07-30 05:46:22 +00:00
if (unlikely(!raw_buf || !loop_buf)) {
MARS_ERR("transfer NULL: %p %p\n", raw_buf, loop_buf);
return -EFAULT;
}
2010-11-26 13:45:10 +00:00
2010-06-17 16:57:10 +00:00
if (cmd == READ)
memcpy(loop_buf, raw_buf, size);
else
memcpy(raw_buf, loop_buf, size);
2010-11-26 13:45:10 +00:00
2010-06-17 16:57:10 +00:00
kunmap_atomic(raw_buf, KM_USER0);
2010-11-26 13:45:10 +00:00
//kunmap_atomic(loop_buf, KM_USER1);
2010-06-17 16:57:10 +00:00
cond_resched();
2010-11-26 13:45:10 +00:00
#endif
2010-06-17 16:57:10 +00:00
return 0;
}
2010-08-05 15:54:48 +00:00
static void write_aops(struct device_sio_output *output, struct mars_ref_object *mref)
2010-06-17 16:57:10 +00:00
{
struct file *file = output->filp;
2010-11-26 13:45:10 +00:00
loff_t pos = mref->ref_pos;
void *data = mref->ref_data;
unsigned offset;
int len;
2010-08-20 10:58:24 +00:00
struct address_space *mapping;
2010-07-30 05:46:22 +00:00
int ret = 0;
2010-08-20 10:58:24 +00:00
if (unlikely(!file)) {
MARS_FAT("No FILE\n");
return;
}
mapping = file->f_mapping;
2010-06-17 16:57:10 +00:00
mutex_lock(&mapping->host->i_mutex);
2010-11-26 13:45:10 +00:00
offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
len = mref->ref_len;
while (len > 0) {
int transfer_result;
unsigned size, copied;
struct page *page = NULL;
void *fsdata;
size = PAGE_CACHE_SIZE - offset;
if (size > len)
size = len;
ret = pagecache_write_begin(file, mapping, pos, size, 0,
&page, &fsdata);
if (ret) {
MARS_ERR("cannot start pagecache_write_begin() error=%d\n", ret);
if (ret >= 0)
ret = -EIO;
goto fail;
}
2010-06-17 16:57:10 +00:00
2010-11-26 13:45:10 +00:00
//file_update_time(file);
2010-06-17 16:57:10 +00:00
2010-11-26 13:45:10 +00:00
transfer_result = transfer_none(WRITE, page, offset, data, size);
2010-06-17 16:57:10 +00:00
2010-11-26 13:45:10 +00:00
copied = size;
if (transfer_result) {
MARS_ERR("transfer error %d\n", transfer_result);
copied = 0;
}
2010-06-17 16:57:10 +00:00
2010-11-26 13:45:10 +00:00
ret = pagecache_write_end(file, mapping, pos, size, copied,
page, fsdata);
if (ret < 0 || ret != copied || transfer_result) {
MARS_ERR("write error %d\n", ret);
if (ret >= 0)
ret = -EIO;
goto fail;
2010-06-17 16:57:10 +00:00
}
2010-11-26 13:45:10 +00:00
len -= copied;
offset = 0;
pos += copied;
data += copied;
2010-06-17 16:57:10 +00:00
}
2010-11-26 13:45:10 +00:00
ret = 0;
2010-06-17 16:57:10 +00:00
2010-07-30 05:46:22 +00:00
fail:
2010-06-17 16:57:10 +00:00
mutex_unlock(&mapping->host->i_mutex);
2010-08-08 14:02:54 +00:00
mref->ref_cb->cb_error = ret;
2010-06-17 16:57:10 +00:00
2010-06-18 11:44:14 +00:00
#if 1
2010-06-17 16:57:10 +00:00
blk_run_address_space(mapping);
2010-06-18 11:44:14 +00:00
#endif
}
struct cookie_data {
struct device_sio_output *output;
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref;
2010-11-26 13:45:10 +00:00
void *data;
int len;
2010-06-18 11:44:14 +00:00
};
2010-06-17 16:57:10 +00:00
2010-06-18 11:44:14 +00:00
static int
device_sio_splice_actor(struct pipe_inode_info *pipe,
struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct cookie_data *p = sd->u.data;
struct page *page = buf->page;
sector_t IV;
int size, ret;
ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
return ret;
IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
(buf->offset >> 9);
size = sd->len;
2010-11-26 13:45:10 +00:00
if (size > p->len)
size = p->len;
2010-06-18 11:44:14 +00:00
2010-11-26 13:45:10 +00:00
if (transfer_none(READ, page, buf->offset, p->data, size)) {
MARS_ERR("transfer error\n");
2010-06-18 11:44:14 +00:00
size = -EINVAL;
}
2010-11-26 13:45:10 +00:00
//flush_dcache_page(p->bvec->bv_page);
2010-06-18 11:44:14 +00:00
return size;
}
static int
device_sio_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
return __splice_from_pipe(pipe, sd, device_sio_splice_actor);
}
2010-08-05 15:54:48 +00:00
static void read_aops(struct device_sio_output *output, struct mars_ref_object *mref)
2010-06-18 11:44:14 +00:00
{
2010-11-26 13:45:10 +00:00
loff_t pos = mref->ref_pos;
2010-06-18 11:44:14 +00:00
int ret = -EIO;
2010-11-26 13:45:10 +00:00
struct cookie_data cookie = {
.output = output,
.mref = mref,
.data = mref->ref_data,
.len = mref->ref_len,
};
struct splice_desc sd = {
.len = 0,
.total_len = mref->ref_len,
.flags = 0,
.pos = pos,
.u.data = &cookie,
};
ret = splice_direct_to_actor(output->filp, &sd, device_sio_direct_splice_actor);
if (unlikely(ret < 0)) {
MARS_ERR("splice %p %p status=%d\n", output, mref, ret);
2010-07-30 05:46:22 +00:00
}
2010-08-08 14:02:54 +00:00
mref->ref_cb->cb_error = ret;
2010-06-17 16:57:10 +00:00
}
2010-06-18 11:44:14 +00:00
static void sync_file(struct device_sio_output *output)
{
struct file *file = output->filp;
int ret;
#if 1
2010-11-26 13:45:10 +00:00
ret = vfs_fsync(file, file->f_path.dentry, 1);
2010-06-18 11:44:14 +00:00
if (unlikely(ret)) {
MARS_ERR("syncing pages failed: %d\n", ret);
}
return;
#endif
}
2010-12-10 17:40:20 +00:00
static void device_sio_ref_io(struct device_sio_output *output, struct mars_ref_object *mref)
2010-06-15 18:31:06 +00:00
{
2010-08-08 14:02:54 +00:00
struct generic_callback *cb = mref->ref_cb;
2010-11-26 13:45:10 +00:00
bool barrier = false;
2010-08-04 17:32:04 +00:00
int test;
2010-08-20 10:58:24 +00:00
if (unlikely(!output->filp)) {
2010-10-22 12:00:08 +00:00
cb->cb_error = -EINVAL;
goto done;
}
2010-06-17 16:57:10 +00:00
if (barrier) {
MARS_INF("got barrier request\n");
2010-06-18 11:44:14 +00:00
sync_file(output);
2010-06-17 16:57:10 +00:00
}
2010-12-10 17:40:20 +00:00
if (mref->ref_rw == READ) {
2010-08-05 15:54:48 +00:00
read_aops(output, mref);
2010-06-18 11:44:14 +00:00
} else {
2010-08-05 15:54:48 +00:00
write_aops(output, mref);
2010-11-26 13:45:10 +00:00
if (barrier || output->o_fdsync)
2010-06-18 11:44:14 +00:00
sync_file(output);
2010-06-17 16:57:10 +00:00
}
2010-06-16 13:21:30 +00:00
done:
2010-08-04 17:32:04 +00:00
#if 1
2010-08-08 14:02:54 +00:00
if (cb->cb_error < 0)
MARS_ERR("IO error %d\n", cb->cb_error);
2010-08-04 17:32:04 +00:00
#endif
2010-08-08 14:02:54 +00:00
cb->cb_fn(cb);
2010-08-04 17:32:04 +00:00
2010-08-05 15:54:48 +00:00
test = atomic_read(&mref->ref_count);
2010-08-04 17:32:04 +00:00
if (test <= 0) {
2010-08-05 15:54:48 +00:00
MARS_ERR("ref_count UNDERRUN %d\n", test);
atomic_set(&mref->ref_count, 1);
2010-08-04 17:32:04 +00:00
}
2010-08-05 15:54:48 +00:00
if (!atomic_dec_and_test(&mref->ref_count))
2010-08-04 17:32:04 +00:00
return;
2010-08-05 15:54:48 +00:00
device_sio_free_mars_ref(mref);
2010-06-15 18:31:06 +00:00
}
2010-12-10 17:40:20 +00:00
static void device_sio_mars_queue(struct device_sio_output *output, struct mars_ref_object *mref)
2010-06-17 16:57:10 +00:00
{
2010-06-18 11:44:14 +00:00
int index = 0;
struct sio_threadinfo *tinfo;
2010-08-05 15:54:48 +00:00
struct device_sio_mars_ref_aspect *mref_a;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb = mref->ref_cb;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-11-26 13:45:10 +00:00
atomic_inc(&mref->ref_count);
2010-12-10 17:40:20 +00:00
if (mref->ref_rw == READ) {
2010-07-30 05:46:22 +00:00
traced_lock(&output->g_lock, flags);
2010-06-18 11:44:14 +00:00
index = output->index++;
2010-07-30 05:46:22 +00:00
traced_unlock(&output->g_lock, flags);
2010-06-18 11:44:14 +00:00
index = (index % WITH_THREAD) + 1;
}
2010-08-05 15:54:48 +00:00
mref_a = device_sio_mars_ref_get_aspect(output, mref);
if (unlikely(!mref_a)) {
2010-08-04 17:32:04 +00:00
MARS_FAT("cannot get aspect\n");
2010-08-08 14:02:54 +00:00
cb->cb_error = -EINVAL;
cb->cb_fn(cb);
2010-08-04 17:32:04 +00:00
return;
}
2010-06-18 11:44:14 +00:00
tinfo = &output->tinfo[index];
2010-08-05 15:54:48 +00:00
MARS_DBG("queueing %p on %d\n", mref, index);
2010-08-04 17:32:04 +00:00
2010-07-30 05:46:22 +00:00
traced_lock(&tinfo->lock, flags);
2010-08-05 15:54:48 +00:00
list_add_tail(&mref_a->io_head, &tinfo->mref_list);
2010-07-30 05:46:22 +00:00
traced_unlock(&tinfo->lock, flags);
2010-06-17 16:57:10 +00:00
2010-06-18 11:44:14 +00:00
wake_up(&tinfo->event);
2010-06-17 16:57:10 +00:00
}
static int device_sio_thread(void *data)
{
2010-06-18 11:44:14 +00:00
struct sio_threadinfo *tinfo = data;
struct device_sio_output *output = tinfo->output;
2010-06-17 16:57:10 +00:00
MARS_INF("kthread has started.\n");
//set_user_nice(current, -20);
while (!kthread_should_stop()) {
2010-08-03 16:03:32 +00:00
struct list_head *tmp = NULL;
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref;
struct device_sio_mars_ref_aspect *mref_a;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-06-17 16:57:10 +00:00
2010-08-03 16:03:32 +00:00
wait_event_interruptible_timeout(
tinfo->event,
2010-08-05 15:54:48 +00:00
!list_empty(&tinfo->mref_list) || kthread_should_stop(),
2010-08-03 16:03:32 +00:00
HZ);
2010-06-17 16:57:10 +00:00
2010-08-03 16:03:32 +00:00
tinfo->last_jiffies = jiffies;
2010-06-17 16:57:10 +00:00
2010-07-30 05:46:22 +00:00
traced_lock(&tinfo->lock, flags);
2010-08-04 17:32:04 +00:00
2010-08-05 15:54:48 +00:00
if (!list_empty(&tinfo->mref_list)) {
tmp = tinfo->mref_list.next;
2010-08-03 16:03:32 +00:00
list_del_init(tmp);
}
2010-07-30 05:46:22 +00:00
traced_unlock(&tinfo->lock, flags);
2010-06-17 16:57:10 +00:00
2010-08-03 16:03:32 +00:00
if (!tmp)
continue;
2010-08-05 15:54:48 +00:00
mref_a = container_of(tmp, struct device_sio_mars_ref_aspect, io_head);
mref = mref_a->object;
MARS_DBG("got %p %p\n", mref_a, mref);
2010-12-10 17:40:20 +00:00
device_sio_ref_io(output, mref);
2010-06-17 16:57:10 +00:00
}
MARS_INF("kthread has stopped.\n");
return 0;
}
2010-08-03 16:03:32 +00:00
static int device_sio_watchdog(void *data)
{
struct device_sio_output *output = data;
MARS_INF("watchdog has started.\n");
while (!kthread_should_stop()) {
int i;
msleep_interruptible(5000);
for (i = 0; i <= WITH_THREAD; i++) {
struct sio_threadinfo *tinfo = &output->tinfo[i];
unsigned long now = jiffies;
unsigned long elapsed = now - tinfo->last_jiffies;
if (elapsed > 10 * HZ) {
tinfo->last_jiffies = now;
MARS_ERR("thread %d is dead for more than 10 seconds.\n", i);
}
}
}
return 0;
}
2010-06-17 16:57:10 +00:00
2010-07-07 14:09:16 +00:00
static int device_sio_get_info(struct device_sio_output *output, struct mars_info *info)
2010-06-28 05:53:46 +00:00
{
struct file *file = output->filp;
2010-07-07 14:09:16 +00:00
info->current_size = i_size_read(file->f_mapping->host);
info->backing_file = file;
return 0;
2010-06-28 05:53:46 +00:00
}
2010-06-22 13:21:42 +00:00
//////////////// object / aspect constructors / destructors ///////////////
2010-08-05 15:54:48 +00:00
static int device_sio_mars_ref_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
2010-07-23 11:55:18 +00:00
{
2010-08-05 15:54:48 +00:00
struct device_sio_mars_ref_aspect *ini = (void*)_ini;
2010-08-04 17:32:04 +00:00
INIT_LIST_HEAD(&ini->io_head);
2010-07-23 11:55:18 +00:00
return 0;
}
2010-08-08 09:03:42 +00:00
static void device_sio_mars_ref_aspect_exit_fn(struct generic_aspect *_ini, void *_init_data)
{
struct device_sio_mars_ref_aspect *ini = (void*)_ini;
(void)ini;
#if 1
CHECK_HEAD_EMPTY(&ini->io_head);
#endif
}
2010-07-23 11:55:18 +00:00
MARS_MAKE_STATICS(device_sio);
2010-06-22 13:21:42 +00:00
////////////////////// brick constructors / destructors ////////////////////
2010-06-15 18:31:06 +00:00
static int device_sio_brick_construct(struct device_sio_brick *brick)
{
return 0;
}
2010-08-20 10:58:24 +00:00
static int device_sio_switch(struct device_sio_brick *brick, bool state)
2010-06-15 18:31:06 +00:00
{
2010-08-20 10:58:24 +00:00
struct device_sio_output *output = brick->outputs[0];
char *path = output->output_name;
2010-06-16 13:21:30 +00:00
int flags = O_CREAT | O_RDWR | O_LARGEFILE;
int prot = 0600;
2010-08-20 10:58:24 +00:00
mm_segment_t oldfs;
2010-06-16 13:21:30 +00:00
2010-10-22 12:00:08 +00:00
if (output->o_direct) {
flags |= O_DIRECT;
MARS_INF("using O_DIRECT on %s\n", path);
}
2010-08-20 10:58:24 +00:00
if (state) {
oldfs = get_fs();
set_fs(get_ds());
output->filp = filp_open(path, flags, prot);
set_fs(oldfs);
if (IS_ERR(output->filp)) {
int err = PTR_ERR(output->filp);
MARS_ERR("can't open file '%s' status=%d\n", path, err);
output->filp = NULL;
return err;
}
2010-06-17 16:57:10 +00:00
#if 0
2010-08-20 10:58:24 +00:00
{
struct address_space *mapping = output->filp->f_mapping;
int old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, old_gfp_mask & ~(__GFP_IO|__GFP_FS));
}
2010-06-17 16:57:10 +00:00
#endif
2010-08-20 10:58:24 +00:00
MARS_INF("opened file '%s'\n", path);
} else {
// TODO: close etc...
}
return 0;
}
static int device_sio_output_construct(struct device_sio_output *output)
{
struct task_struct *watchdog;
int index;
2010-06-17 16:57:10 +00:00
2010-06-18 11:44:14 +00:00
spin_lock_init(&output->g_lock);
output->index = 0;
for (index = 0; index <= WITH_THREAD; index++) {
struct sio_threadinfo *tinfo = &output->tinfo[index];
tinfo->output = output;
spin_lock_init(&tinfo->lock);
init_waitqueue_head(&tinfo->event);
2010-08-05 15:54:48 +00:00
INIT_LIST_HEAD(&tinfo->mref_list);
2010-08-03 16:03:32 +00:00
tinfo->last_jiffies = jiffies;
2010-06-18 11:44:14 +00:00
tinfo->thread = kthread_create(device_sio_thread, tinfo, "mars_sio%d", index);
if (IS_ERR(tinfo->thread)) {
int error = PTR_ERR(tinfo->thread);
MARS_ERR("cannot create thread, status=%d\n", error);
filp_close(output->filp, NULL);
return error;
}
wake_up_process(tinfo->thread);
2010-06-17 16:57:10 +00:00
}
2010-08-03 16:03:32 +00:00
watchdog = kthread_create(device_sio_watchdog, output, "mars_watchdog%d", 0);
if (!IS_ERR(watchdog)) {
wake_up_process(watchdog);
}
2010-06-16 13:21:30 +00:00
return 0;
}
static int device_sio_output_destruct(struct device_sio_output *output)
{
2010-06-18 11:44:14 +00:00
int index;
for (index = 0; index <= WITH_THREAD; index++) {
kthread_stop(output->tinfo[index].thread);
output->tinfo[index].thread = NULL;
}
2010-08-03 16:03:32 +00:00
2010-06-16 13:21:30 +00:00
if (output->filp) {
filp_close(output->filp, NULL);
2010-06-17 16:57:10 +00:00
output->filp = NULL;
2010-06-16 13:21:30 +00:00
}
2010-06-15 18:31:06 +00:00
return 0;
}
///////////////////////// static structs ////////////////////////
static struct device_sio_brick_ops device_sio_brick_ops = {
2010-08-20 10:58:24 +00:00
.brick_switch = device_sio_switch,
2010-06-15 18:31:06 +00:00
};
static struct device_sio_output_ops device_sio_output_ops = {
2010-06-22 13:21:42 +00:00
.make_object_layout = device_sio_make_object_layout,
2010-11-26 13:45:10 +00:00
.mars_ref_get = device_sio_ref_get,
.mars_ref_put = device_sio_ref_put,
2010-08-05 15:54:48 +00:00
.mars_ref_io = device_sio_mars_queue,
2010-07-07 14:09:16 +00:00
.mars_get_info = device_sio_get_info,
2010-06-15 18:31:06 +00:00
};
2010-08-10 17:39:30 +00:00
const struct device_sio_output_type device_sio_output_type = {
2010-06-15 18:31:06 +00:00
.type_name = "device_sio_output",
.output_size = sizeof(struct device_sio_output),
.master_ops = &device_sio_output_ops,
.output_construct = &device_sio_output_construct,
2010-06-16 13:21:30 +00:00
.output_destruct = &device_sio_output_destruct,
2010-07-30 05:46:22 +00:00
.aspect_types = device_sio_aspect_types,
.layout_code = {
2010-08-05 15:54:48 +00:00
[BRICK_OBJ_MARS_REF] = LAYOUT_NONE,
2010-07-30 05:46:22 +00:00
}
2010-06-15 18:31:06 +00:00
};
2010-07-23 11:55:18 +00:00
static const struct device_sio_output_type *device_sio_output_types[] = {
2010-06-15 18:31:06 +00:00
&device_sio_output_type,
};
2010-07-23 11:55:18 +00:00
const struct device_sio_brick_type device_sio_brick_type = {
2010-06-15 18:31:06 +00:00
.type_name = "device_sio_brick",
.brick_size = sizeof(struct device_sio_brick),
.max_inputs = 0,
.max_outputs = 1,
.master_ops = &device_sio_brick_ops,
.default_output_types = device_sio_output_types,
.brick_construct = &device_sio_brick_construct,
};
EXPORT_SYMBOL_GPL(device_sio_brick_type);
////////////////// module init stuff /////////////////////////
static int __init init_device_sio(void)
{
2010-06-18 11:44:14 +00:00
MARS_INF("init_device_sio()\n");
2010-06-15 18:31:06 +00:00
return device_sio_register_brick_type();
}
static void __exit exit_device_sio(void)
{
2010-06-18 11:44:14 +00:00
MARS_INF("exit_device_sio()\n");
2010-06-15 18:31:06 +00:00
device_sio_unregister_brick_type();
}
MODULE_DESCRIPTION("MARS device_sio brick");
MODULE_AUTHOR("Thomas Schoebel-Theuer <tst@1und1.de>");
MODULE_LICENSE("GPL");
module_init(init_device_sio);
module_exit(exit_device_sio);