import mars-14.tgz

This commit is contained in:
Thomas Schoebel-Theuer 2010-07-23 12:55:18 +01:00
parent 388c3d59f4
commit 2a3370fdc0
16 changed files with 1505 additions and 1113 deletions

View File

@ -2,12 +2,12 @@
# Makefile for MARS
#
obj-$(CONFIG_MARS) += mars_generic.o
obj-$(CONFIG_MARS) += brick.o mars_generic.o
obj-$(CONFIG_MARS_DUMMY) += mars_dummy.o
obj-$(CONFIG_MARS_IF_DEVICE) += mars_if_device.o
obj-$(CONFIG_MARS_DEVICE_SIO) += mars_device_sio.o
obj-$(CONFIG_MARS_BUF) += mars_buf.o
obj-$(CONFIG_MARS_TRANS_LOGGER) += mars_trans_logger.o
#obj-$(CONFIG_MARS_TRANS_LOGGER) += mars_trans_logger.o
obj-$(CONFIG_MARS_TEST) += mars_test.o

309
brick.c Normal file
View File

@ -0,0 +1,309 @@
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#define _STRATEGY
#define BRICK_OBJ_NR /*empty => leads to an open array */
#include "brick.h"
//////////////////////////////////////////////////////////////
// object stuff
//////////////////////////////////////////////////////////////
// brick stuff
static int nr_brick_types = 0;
static const struct generic_brick_type *brick_types[MAX_BRICK_TYPES] = {};
int generic_register_brick_type(const struct generic_brick_type *new_type)
{
int i;
int found = -1;
BRICK_DBG("generic_register_brick_type()\n");
for (i = 0; i < nr_brick_types; i++) {
if (!brick_types[i]) {
found = i;
continue;
}
if (!strcmp(brick_types[i]->type_name, new_type->type_name)) {
printk("sorry, bricktype %s is already registered.\n", new_type->type_name);
return -EEXIST;
}
}
if (found < 0) {
if (nr_brick_types >= MAX_BRICK_TYPES) {
printk("sorry, cannot register bricktype %s.\n", new_type->type_name);
return -EEXIST;
}
found = nr_brick_types++;
}
brick_types[found] = new_type;
BRICK_DBG("generic_register_brick_type() done.\n");
return 0;
}
EXPORT_SYMBOL_GPL(generic_register_brick_type);
int generic_unregister_brick_type(const struct generic_brick_type *old_type)
{
BRICK_DBG("generic_unregister_brick_type()\n");
return -1; // NYI
}
EXPORT_SYMBOL_GPL(generic_unregister_brick_type);
int generic_brick_init_full(
void *data,
int size,
const struct generic_brick_type *brick_type,
const struct generic_input_type **input_types,
const struct generic_output_type **output_types,
char **names)
{
struct generic_brick *brick = data;
int status;
int i;
BRICK_DBG("generic_brick_init_full()\n");
// first, call the generic constructors
status = generic_brick_init(brick_type, brick, *names++);
if (status)
return status;
data += brick_type->brick_size;
size -= brick_type->brick_size;
if (size < 0)
return -ENOMEM;
if (!input_types) {
BRICK_DBG("generic_brick_init_full: switch to default input_types\n");
input_types = brick_type->default_input_types;
names = brick_type->default_input_names;
}
if (input_types) {
BRICK_DBG("generic_brick_init_full: input_types\n");
brick->inputs = data;
data += sizeof(void*) * brick_type->max_inputs;
size -= sizeof(void*) * brick_type->max_inputs;
if (size < 0)
return -1;
for (i = 0; i < brick_type->max_inputs; i++) {
struct generic_input *input = data;
const struct generic_input_type *type = *input_types++;
BRICK_DBG("generic_brick_init_full: calling generic_input_init()\n");
status = generic_input_init(brick, i, type, input, names ? *names++ : type->type_name);
if (status)
return status;
data += type->input_size;
size -= type->input_size;
if (size < 0)
return -ENOMEM;
}
}
if (!output_types) {
BRICK_DBG("generic_brick_init_full: switch to default output_types\n");
output_types = brick_type->default_output_types;
names = brick_type->default_output_names;
}
if (output_types) {
BRICK_DBG("generic_brick_init_full: output_types\n");
brick->outputs = data;
data += sizeof(void*) * brick_type->max_outputs;
size -= sizeof(void*) * brick_type->max_outputs;
if (size < 0)
return -1;
for (i = 0; i < brick_type->max_outputs; i++) {
struct generic_output *output = data;
const struct generic_output_type *type = *output_types++;
BRICK_DBG("generic_brick_init_full: calling generic_output_init()\n");
generic_output_init(brick, i, type, output, names ? *names++ : type->type_name);
if (status)
return status;
data += type->output_size;
size -= type->output_size;
if (size < 0)
return -ENOMEM;
}
}
// call the specific constructors
BRICK_DBG("generic_brick_init_full: call specific contructors.\n");
if (brick_type->brick_construct) {
BRICK_DBG("generic_brick_init_full: calling brick_construct()\n");
status = brick_type->brick_construct(brick);
if (status)
return status;
}
for (i = 0; i < brick_type->max_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (!input)
continue;
if (!input->type) {
BRICK_ERR("input has no associated type!\n");
continue;
}
if (input->type->input_construct) {
BRICK_DBG("generic_brick_init_full: calling input_construct()\n");
status = input->type->input_construct(input);
if (status)
return status;
}
}
for (i = 0; i < brick_type->max_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
BRICK_ERR("output has no associated type!\n");
continue;
}
if (output->type->output_construct) {
BRICK_DBG("generic_brick_init_full: calling output_construct()\n");
status = output->type->output_construct(output);
if (status)
return status;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(generic_brick_init_full);
int generic_brick_exit_full(struct generic_brick *brick)
{
int i;
int status;
// first, check all outputs
for (i = 0; i < brick->nr_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
BRICK_ERR("output has no associated type!\n");
continue;
}
if (output->nr_connected) {
BRICK_DBG("output is connected!\n");
return -EPERM;
}
}
// ok, test succeeded. start destruction...
for (i = 0; i < brick->type->max_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
BRICK_ERR("output has no associated type!\n");
continue;
}
if (output->type->output_destruct) {
BRICK_DBG("generic_brick_exit_full: calling output_destruct()\n");
status = output->type->output_destruct(output);
if (status)
return status;
brick->outputs[i] = NULL; // others may remain leftover
}
}
for (i = 0; i < brick->type->max_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (!input)
continue;
if (!input->type) {
BRICK_ERR("input has no associated type!\n");
continue;
}
if (input->type->input_destruct) {
BRICK_DBG("generic_brick_exit_full: calling input_destruct()\n");
status = input->type->input_destruct(input);
if (status)
return status;
brick->inputs[i] = NULL; // others may remain leftover
status = generic_disconnect(input);
if (status)
return status;
}
}
if (brick->type->brick_destruct) {
BRICK_DBG("generic_brick_exit_full: calling brick_destruct()\n");
status = brick->type->brick_destruct(brick);
if (status)
return status;
}
return 0;
}
EXPORT_SYMBOL_GPL(generic_brick_exit_full);
int generic_brick_exit_recursively(struct generic_brick *brick)
{
int final_status = 0;
LIST_HEAD(head);
list_add(&brick->tmp_head, &head);
while (!list_empty(&head)) {
int i;
int status;
brick = container_of(head.next, struct generic_brick, tmp_head);
for (i = 0; i < brick->nr_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (output->nr_connected) {
list_del(&brick->tmp_head);
continue;
}
}
list_del(&brick->tmp_head);
for (i = 0; i < brick->nr_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (input->connect) {
struct generic_brick *other = input->connect->brick;
list_add(&other->tmp_head, &head);
}
}
status = generic_brick_exit_full(brick);
if (status)
final_status = status;
}
return final_status;
}
EXPORT_SYMBOL_GPL(generic_brick_exit_recursively);
////////////////////////////////////////////////////////////////////////
// default implementations
#if 0
int default_make_object_layout(struct generic_output *output, struct generic_object_layout *object_layout)
{
const struct generic_object_type *object_type = object_layout->object_type;
int status;
int aspect_size = 0;
struct default_brick *brick = output->brick;
int i;
for (i = 0; i < brick->type->max_inputs; i++) {
struct dummy_input *input = brick->inputs[i];
if (input && input->connect) {
int substatus = input->connect->ops->make_object_layout(input->connect, object_layout);
if (substatus < 0)
return substatus;
aspect_size += substatus;
}
}
if (object_type == &mars_io_type) {
aspect_size = sizeof(struct dummy_mars_io_aspect);
status = generic_io_add_aspect(output, object_layout, &dummy_mars_io_aspect_type);
} else if (object_type == &mars_buf_type) {
aspect_size = sizeof(struct dummy_mars_buf_aspect);
status = generic_buf_add_aspect(output, object_layout, &dummy_mars_buf_aspect_type);
} else if (object_type == &mars_buf_callback_type) {
aspect_size = sizeof(struct dummy_mars_buf_callback_aspect);
status = generic_buf_callback_add_aspect(output, object_layout, &dummy_mars_buf_callback_aspect_type);
} else {
return 0;
}
if (status < 0)
return status;
return aspect_size;
}
#endif

494
brick.h Normal file
View File

@ -0,0 +1,494 @@
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
#ifndef BRICK_H
#define BRICK_H
#ifdef _STRATEGY
#define _STRATEGY_CODE(X) X
#define _NORMAL_CODE(X) /**/
#else
#define _STRATEGY_CODE(X) /**/
#define _NORMAL_CODE(X) X
#endif
#define BRICK_ERROR "BRICK_ERROR: "
#define BRICK_INFO "BRICK_INFO: "
#define BRICK_DEBUG "BRICK_DEBUG: "
#define BRICK_ERR(args...) printk(BRICK_ERROR args)
#define BRICK_INF(args...) printk(BRICK_INFO args)
#ifdef BRICK_DEBUGGING
#define BRICK_DBG(args...) printk(BRICK_DEBUG args)
#else
#define BRICK_DBG(args...) /**/
#endif
#define MAX_BRICK_TYPES 64
/////////////////////////////////////////////////////////////////////////
// definitions for generic objects with aspects
#define MAX_DEFAULT_ASPECTS 8
struct generic_aspect;
#define GENERIC_ASPECT_TYPE(PREFIX) \
char *aspect_type_name; \
int aspect_size; \
int (*init_fn)(struct generic_aspect *ini, void *data); \
struct generic_aspect_type {
GENERIC_ASPECT_TYPE(generic);
};
#define GENERIC_ASPECT_LAYOUT(PREFIX) \
struct generic_object_layout *object_layout; \
const struct generic_aspect_type *aspect_type; \
int aspect_offset; \
void *init_data; \
struct PREFIX##_aspect_layout *next; /* TODO: replace with list_head */ \
struct generic_aspect_layout {
GENERIC_ASPECT_LAYOUT(generic);
};
#define GENERIC_OBJECT_TYPE(PREFIX) \
char *object_type_name; \
int default_size; \
int brick_obj_nr; \
struct generic_object_type {
GENERIC_OBJECT_TYPE(generic);
};
#define GENERIC_OBJECT_LAYOUT(PREFIX) \
const struct generic_object_type *object_type; \
struct generic_aspect_layout *aspect_list; /* TODO: replace with list_head */ \
int object_size; \
int rest_size; \
void *alloc_ptr; \
struct generic_object_layout {
GENERIC_OBJECT_LAYOUT(generic);
};
#define GENERIC_OBJECT_LAYOUT_FUNCTIONS(PREFIX) \
\
extern inline struct PREFIX##_object_layout *PREFIX##_init_object_layout(void *data, int size, int max_aspects, const struct generic_object_type *object_type) \
{ \
struct PREFIX##_object_layout *object_layout = data; \
data += sizeof(struct PREFIX##_object_layout); \
size -= sizeof(struct PREFIX##_object_layout); \
if (size < 0) \
return NULL; \
object_layout->object_type = object_type; \
object_layout->object_size = object_type->default_size; \
object_layout->alloc_ptr = data; \
object_layout->rest_size = size; \
return object_layout; \
} \
#define GENERIC_ASPECT_LAYOUT_FUNCTIONS(BRICK,PREFIX) \
\
extern int BRICK##_##PREFIX##_add_aspect(struct BRICK##_output *output, struct generic_object_layout *object_layout, const struct generic_aspect_type *aspect_type) \
{ \
int nr = object_layout->object_type->brick_obj_nr; \
struct generic_aspect_layout *aspect_layout; \
aspect_layout = (void*)&output->aspect_layouts[nr]; \
if (aspect_layout->object_layout) \
return -EEXIST; \
aspect_layout->next = object_layout->aspect_list; \
object_layout->aspect_list = aspect_layout; \
aspect_layout->object_layout = object_layout; \
aspect_layout->aspect_type = aspect_type; \
aspect_layout->aspect_offset = object_layout->object_size; \
aspect_layout->init_data = output; \
object_layout->object_size += aspect_type->aspect_size; \
return 0; \
} \
#define GENERIC_OBJECT(PREFIX) \
struct PREFIX##_object_layout *object_layout; \
int object_size; \
struct generic_object {
GENERIC_OBJECT(generic);
};
#define GENERIC_ASPECT(PREFIX) \
struct PREFIX##_object *object; \
struct generic_aspect {
GENERIC_ASPECT(generic);
};
#define GENERIC_OBJECT_FUNCTIONS(PREFIX) \
\
extern inline struct PREFIX##_object *PREFIX##_construct(void *data, struct PREFIX##_object_layout *object_layout) \
{ \
struct PREFIX##_object *obj = data; \
struct generic_aspect_layout *aspect_layout; \
\
obj->object_layout = object_layout; \
for (aspect_layout = object_layout->aspect_list; aspect_layout; aspect_layout = aspect_layout->next) { \
struct generic_aspect *aspect; \
if (!aspect_layout->aspect_type) \
continue; \
aspect = data + aspect_layout->aspect_offset; \
aspect->object = (void*)obj; \
if (aspect_layout->aspect_type->init_fn) { \
int status = aspect_layout->aspect_type->init_fn((void*)aspect, aspect_layout->init_data); \
if (status) { \
return NULL; \
} \
} \
} \
return obj; \
} \
#define GENERIC_ASPECT_FUNCTIONS(BRICK,PREFIX) \
\
extern inline struct BRICK##_##PREFIX##_aspect *BRICK##_##PREFIX##_get_aspect(struct BRICK##_output *output, struct PREFIX##_object *obj) \
{ \
struct PREFIX##_object_layout *object_layout; \
struct generic_aspect_layout *aspect_layout; \
int nr; \
\
object_layout = obj->object_layout; \
nr = object_layout->object_type->brick_obj_nr; \
aspect_layout = &output->aspect_layouts[nr]; \
if (unlikely(!aspect_layout->aspect_type)) { \
BRICK_ERR("brick "#BRICK": bad aspect slot on "#PREFIX" pointer %p\n", obj); \
return NULL; \
} \
return (void*)obj + aspect_layout->aspect_offset; \
} \
\
/////////////////////////////////////////////////////////////////////////
// definitions for generic bricks
struct generic_input;
struct generic_output;
struct generic_brick_ops;
struct generic_output_ops;
struct generic_brick_type;
#define GENERIC_BRICK(PREFIX) \
char *brick_name; \
const struct PREFIX##_brick_type *type; \
struct PREFIX##_brick_ops *ops; \
int nr_inputs; \
int nr_outputs; \
struct PREFIX##_input **inputs; \
struct PREFIX##_output **outputs; \
struct list_head tmp_head; \
struct generic_brick {
GENERIC_BRICK(generic);
};
#define GENERIC_INPUT(PREFIX) \
char *input_name; \
struct PREFIX##_brick *brick; \
const struct PREFIX##_input_type *type; \
struct PREFIX##_output *connect; \
struct generic_input {
GENERIC_INPUT(generic);
};
#define GENERIC_OUTPUT(PREFIX) \
char *output_name; \
struct PREFIX##_brick *brick; \
const struct PREFIX##_output_type *type; \
struct PREFIX##_output_ops *ops; \
int nr_connected; \
/* _must_ be the last member */ \
struct generic_aspect_layout aspect_layouts[BRICK_OBJ_NR]; \
struct generic_output {
GENERIC_OUTPUT(generic);
};
#define GENERIC_OUTPUT_CALL(OUTPUT,OP,...) \
((OUTPUT) && (OUTPUT)->ops->OP ? \
(OUTPUT)->ops->OP(OUTPUT, ##__VA_ARGS__) : -ENOSYS) \
#define GENERIC_INPUT_CALL(INPUT,OP,...) \
((INPUT) && (INPUT)->connect ? \
GENERIC_OUTPUT_CALL((INPUT)->connect, OP, ##__VA_ARGS__) : \
-ENOSYS) \
#define GENERIC_BRICK_OPS(PREFIX) \
/*int (*brick_start)(struct PREFIX##_brick *brick);*/ \
/*int (*brick_stop)(struct PREFIX##_brick *brick);*/ \
struct generic_brick_ops {
GENERIC_BRICK_OPS(generic);
};
#define GENERIC_OUTPUT_OPS(PREFIX) \
/*int (*output_start)(struct PREFIX##_output *output);*/ \
/*int (*output_stop)(struct PREFIX##_output *output);*/ \
int (*make_object_layout)(struct PREFIX##_output *output, struct generic_object_layout *object_layout); \
struct generic_output_ops {
GENERIC_OUTPUT_OPS(generic)
};
// although possible, *_type should never be extended
#define GENERIC_BRICK_TYPE(PREFIX) \
char *type_name; \
int brick_size; \
int max_inputs; \
int max_outputs; \
const struct PREFIX##_input_type **default_input_types; \
char **default_input_names; \
const struct PREFIX##_output_type **default_output_types; \
char **default_output_names; \
struct PREFIX##_brick_ops *master_ops; \
const struct PREFIX##input_types **default_type; \
int (*brick_construct)(struct PREFIX##_brick *brick); \
int (*brick_destruct)(struct PREFIX##_brick *brick); \
struct generic_brick_type {
GENERIC_BRICK_TYPE(generic);
};
#define GENERIC_INPUT_TYPE(PREFIX) \
char *type_name; \
int input_size; \
int (*input_construct)(struct PREFIX##_input *input); \
int (*input_destruct)(struct PREFIX##_input *input); \
struct generic_input_type {
GENERIC_INPUT_TYPE(generic);
};
#define GENERIC_OUTPUT_TYPE(PREFIX) \
char *type_name; \
int output_size; \
struct PREFIX##_output_ops *master_ops; \
int (*output_construct)(struct PREFIX##_output *output); \
int (*output_destruct)(struct PREFIX##_output *output); \
const int *test[BRICK_OBJ_NR]; \
struct generic_output_type {
GENERIC_OUTPUT_TYPE(generic);
};
int generic_register_brick_type(const struct generic_brick_type *new_type);
int generic_unregister_brick_type(const struct generic_brick_type *old_type);
#ifdef _STRATEGY // call this only in strategy bricks, never in ordinary bricks
// you need this only if you circumvent generic_brick_init_full()
extern inline int generic_brick_init(const struct generic_brick_type *type, struct generic_brick *brick, char *brick_name)
{
brick->brick_name = brick_name;
brick->type = type;
brick->ops = type->master_ops;
brick->nr_inputs = 0;
brick->nr_outputs = 0;
brick->tmp_head.next = brick->tmp_head.prev = &brick->tmp_head;
return 0;
}
extern inline int generic_input_init(struct generic_brick *brick, int index, const struct generic_input_type *type, struct generic_input *input, char *input_name)
{
if (index < 0 || index >= brick->type->max_inputs)
return -ENOMEM;
if (brick->inputs[index])
return -EEXIST;
input->input_name = input_name;
input->brick = brick;
input->type = type;
input->connect = NULL;
brick->inputs[index] = input;
brick->nr_inputs++;
return 0;
}
extern inline int generic_output_init(struct generic_brick *brick, int index, const struct generic_output_type *type, struct generic_output *output, char *output_name)
{
if (index < 0 || index >= brick->type->max_outputs)
return -ENOMEM;
if (brick->outputs[index])
return -EEXIST;
output->output_name = output_name;
output->brick = brick;
output->type = type;
output->ops = type->master_ops;
output->nr_connected = 0;
brick->outputs[index] = output;
brick->nr_outputs++;
return 0;
}
extern inline int generic_size(const struct generic_brick_type *brick_type)
{
int size = brick_type->brick_size;
int i;
size += brick_type->max_inputs * sizeof(void*);
for (i = 0; i < brick_type->max_inputs; i++) {
size += brick_type->default_input_types[i]->input_size;
}
size += brick_type->max_outputs * sizeof(void*);
for (i = 0; i < brick_type->max_outputs; i++) {
size += brick_type->default_output_types[i]->output_size;
}
return size;
}
/* If possible, use this instead of generic_*_init().
* input_types and output_types may be NULL => use default_*_types
*/
int generic_brick_init_full(
void *data,
int size,
const struct generic_brick_type *brick_type,
const struct generic_input_type **input_types,
const struct generic_output_type **output_types,
char **names);
int generic_brick_exit_full(
struct generic_brick *brick);
extern inline int generic_connect(struct generic_input *input, struct generic_output *output)
{
BRICK_DBG("generic_connect(input=%p, output=%p)\n", input, output);
if (!input || !output)
return -EINVAL;
if (input->connect)
return -EEXIST;
input->connect = output;
output->nr_connected++; //TODO: protect against races, e.g. atomic_t
BRICK_DBG("now nr_connected=%d\n", output->nr_connected);
return 0;
}
extern inline int generic_disconnect(struct generic_input *input)
{
BRICK_DBG("generic_disconnect(input=%p)\n", input);
if (!input)
return -EINVAL;
if (input->connect) {
input->connect->nr_connected--; //TODO: protect against races, e.g. atomic_t
BRICK_DBG("now nr_connected=%d\n", input->connect->nr_connected);
input->connect = NULL;
}
return 0;
}
#endif // _STRATEGY
// simple wrappers for type safety
#define GENERIC_MAKE_FUNCTIONS(PREFIX) \
extern inline int PREFIX##_register_brick_type(void) \
{ \
extern const struct PREFIX##_brick_type PREFIX##_brick_type; \
extern int PREFIX##_brick_nr; \
if (PREFIX##_brick_nr >= 0) { \
BRICK_ERR("brick type " #PREFIX " is already registered.\n"); \
return -EEXIST; \
} \
PREFIX##_brick_nr = generic_register_brick_type((const struct generic_brick_type*)&PREFIX##_brick_type); \
return PREFIX##_brick_nr < 0 ? PREFIX##_brick_nr : 0; \
} \
\
extern inline int PREFIX##_unregister_brick_type(void) \
{ \
extern const struct PREFIX##_brick_type PREFIX##_brick_type; \
return generic_unregister_brick_type((const struct generic_brick_type*)&PREFIX##_brick_type); \
} \
\
_STRATEGY_CODE( \
extern const struct PREFIX##_brick_type PREFIX##_brick_type; \
extern const struct PREFIX##_input_type PREFIX##_input_type; \
extern const struct PREFIX##_output_type PREFIX##_output_type; \
\
static inline int PREFIX##_brick_init(struct PREFIX##_brick *brick, char *brick_name) \
{ \
return generic_brick_init((const struct generic_brick_type*)&PREFIX##_brick_type, (struct generic_brick*)brick, brick_name); \
} \
\
static inline int PREFIX##_input_init(struct PREFIX##_brick *brick, int index, struct PREFIX##_input *input, char *input_name) \
{ \
return generic_input_init( \
(struct generic_brick*)brick, \
index, \
(struct generic_input_type*)&PREFIX##_input_type, \
(struct generic_input*)input, \
input_name); \
} \
\
static inline int PREFIX##_output_init(struct PREFIX##_brick *brick, int index, struct PREFIX##_input *output, char *output_name) \
{ \
return generic_output_init( \
(struct generic_brick*)brick, \
index, \
(const struct generic_output_type*)&PREFIX##_output_type, \
(struct generic_output*)output, \
output_name); \
} \
\
extern inline int PREFIX##_size(const struct PREFIX##_brick_type *brick_type) \
{ \
return generic_size((const struct generic_brick_type*)brick_type); \
} \
\
extern inline int PREFIX##_brick_init_full( \
void *data, \
int size, \
const struct PREFIX##_brick_type *brick_type, \
const struct PREFIX##_input_type **input_types, \
const struct PREFIX##_output_type **output_types, \
char **names) \
{ \
return generic_brick_init_full( \
data, \
size, \
(const struct generic_brick_type*)brick_type, \
(const struct generic_input_type**)input_types, \
(const struct generic_output_type**)output_types, \
(char**)names); \
} \
\
extern inline int PREFIX##_brick_exit_full( \
struct PREFIX##_brick *brick) \
{ \
return generic_brick_exit_full( \
(struct generic_brick*)brick); \
} \
)
/* Define a pair of connectable subtypes.
* For type safety, use this for all possible combinations.
* Yes, this may become quadratic in large type systems, but
* (a) thou shalt not define many types,
* (b) these macros generate only definitions, but no additional
* code at runtime.
*/
#define GENERIC_MAKE_CONNECT(INPUT_PREFIX,OUTPUT_PREFIX) \
\
_STRATEGY_CODE( \
\
extern inline int INPUT_PREFIX##_##OUTPUT_PREFIX##_connect( \
struct INPUT_PREFIX##_input *input, \
struct OUTPUT_PREFIX##_output *output) \
{ \
return generic_connect((struct generic_input*)input, (struct generic_output*)output); \
} \
\
extern inline int INPUT_PREFIX##_##OUTPUT_PREFIX####_disconnect( \
struct INPUT_PREFIX##_input *input) \
{ \
return generic_disconnect((struct generic_input*)input); \
} \
)
#endif

562
mars.h
View File

@ -3,14 +3,7 @@
#define MARS_H
#include <linux/list.h>
#ifdef _STRATEGY
#define _STRATEGY_CODE(X) X
#define _NORMAL_CODE(X) /**/
#else
#define _STRATEGY_CODE(X) /**/
#define _NORMAL_CODE(X) X
#endif
#include <asm/atomic.h>
#define MARS_ERROR "MARS_ERROR: "
#define MARS_INFO "MARS_INFO: "
@ -18,465 +11,19 @@
#define MARS_ERR(args...) printk(MARS_ERROR args)
#define MARS_INF(args...) printk(MARS_INFO args)
//#define MARS_DBG(args...) printk("MARS_DEBUG: " args)
#ifdef MARS_DEBUGGING
#define MARS_DBG(args...) printk(MARS_DEBUG args)
#else
#define MARS_DBG(args...) /**/
#endif
/////////////////////////////////////////////////////////////////////////
#define BRICK_OBJ_MARS_IO 0
#define BRICK_OBJ_MARS_BUF 1
#define BRICK_OBJ_MARS_BUF_CALLBACK 2
#define BRICK_OBJ_NR 3
// definitions for generic objects with aspects
#include "brick.h"
#define MAX_DEFAULT_ASPECTS 8
struct generic_aspect;
#define GENERIC_ASPECT_LAYOUT(PREFIX) \
int aspect_size; \
int aspect_offset; \
int (*init_fn)(struct PREFIX##_aspect *ini, void *data); \
void *init_data; \
struct generic_aspect_layout {
GENERIC_ASPECT_LAYOUT(generic);
};
#define GENERIC_OBJECT_TYPE(PREFIX) \
char *object_type_name; \
int default_size; \
struct generic_object_type {
GENERIC_OBJECT_TYPE(generic);
};
#define GENERIC_OBJECT_LAYOUT(PREFIX) \
const struct generic_object_type *type; \
int object_size; \
int rest_size; \
int max_aspects; \
int nr_aspects; \
void *alloc_ptr; \
struct PREFIX##_aspect_layout *aspect_layouts; \
struct generic_object_layout {
GENERIC_OBJECT_LAYOUT(generic);
};
#define GENERIC_OBJECT_LAYOUT_FUNCTIONS(PREFIX) \
\
extern inline struct PREFIX##_object_layout *PREFIX##_init_object_layout(void *data, int size, int max_aspects, const struct generic_object_type *type) \
{ \
struct PREFIX##_object_layout *object_layout = data; \
data += sizeof(struct PREFIX##_object_layout); \
size -= sizeof(struct PREFIX##_object_layout); \
if (size < 0) \
return NULL; \
object_layout->type = type; \
object_layout->object_size = type->default_size; \
object_layout->max_aspects = max_aspects; \
object_layout->nr_aspects = 0; \
size -= max_aspects * sizeof(struct PREFIX##_aspect_layout); \
if (size < 0) \
return NULL; \
object_layout->aspect_layouts = data; \
data += max_aspects * sizeof(struct PREFIX##_aspect_layout); \
object_layout->alloc_ptr = data; \
object_layout->rest_size = size; \
return object_layout; \
} \
\
extern int PREFIX##_add_aspect(struct generic_object_layout *object_layout, int aspect_size, int (*init_fn)(struct PREFIX##_aspect *_ini, void *_init_data), void *init_data) \
{ \
int slot = object_layout->nr_aspects; \
int max_aspects = object_layout->max_aspects; \
struct PREFIX##_aspect_layout *aspect_layout; \
if (unlikely(slot >= max_aspects)) { \
void *data = object_layout->alloc_ptr; \
void *old; \
int size = object_layout->rest_size; \
int old_aspects = max_aspects; \
max_aspects <<= 1; \
size -= max_aspects * sizeof(struct PREFIX##_aspect_layout); \
if (size < 0) \
return -ENOMEM; \
object_layout->rest_size = size; \
old = object_layout->aspect_layouts; \
object_layout->aspect_layouts = data; \
memcpy(data, old, old_aspects * sizeof(struct PREFIX##_aspect_layout)); \
data += max_aspects * sizeof(struct PREFIX##_aspect_layout); \
object_layout->alloc_ptr = data; \
object_layout->max_aspects = max_aspects; \
} \
aspect_layout = (void*)&object_layout->aspect_layouts[slot]; \
aspect_layout->aspect_size = aspect_size; \
aspect_layout->aspect_offset = object_layout->object_size; \
aspect_layout->init_fn = init_fn; \
aspect_layout->init_data = init_data; \
object_layout->object_size += aspect_size; \
object_layout->nr_aspects++; \
return slot; \
} \
#define GENERIC_OBJECT(PREFIX) \
struct PREFIX##_object_layout *object_layout; \
int object_size; \
struct generic_object {
GENERIC_OBJECT(generic);
};
#define GENERIC_ASPECT(PREFIX) \
struct PREFIX##_object *object; \
struct generic_aspect {
GENERIC_ASPECT(generic);
};
#define GENERIC_OBJECT_FUNCTIONS(PREFIX) \
\
extern inline struct PREFIX##_object *PREFIX##_construct(void *data, struct PREFIX##_object_layout *object_layout) \
{ \
int i; \
struct PREFIX##_object *obj = data; \
obj->object_layout = object_layout; \
for (i = 0; i < object_layout->nr_aspects; i++) { \
struct PREFIX##_aspect_layout *aspect_layout = &object_layout->aspect_layouts[i]; \
struct PREFIX##_aspect *aspect = data + aspect_layout->aspect_offset; \
aspect->object = obj; \
if (aspect_layout->init_fn) { \
void *init_data = aspect_layout->init_data; \
int status = aspect_layout->init_fn(aspect, init_data); \
if (status) { \
return NULL; \
} \
} \
} \
return obj; \
} \
\
extern inline void *PREFIX##_get_aspect(struct PREFIX##_object *obj, int slot) \
{ \
if (slot < 0 || slot >= obj->object_layout->nr_aspects) \
return NULL; \
return (void*)obj + obj->object_layout->aspect_layouts[slot].aspect_offset; \
} \
\
/////////////////////////////////////////////////////////////////////////
// definitions for generic bricks
struct generic_input;
struct generic_output;
struct generic_brick_ops;
struct generic_output_ops;
struct generic_brick_type;
#define GENERIC_BRICK(PREFIX) \
char *brick_name; \
struct PREFIX##_brick_type *type; \
struct PREFIX##_brick_ops *ops; \
int nr_inputs; \
int nr_outputs; \
struct PREFIX##_input **inputs; \
struct PREFIX##_output **outputs; \
struct list_head tmp_head; \
struct generic_brick {
GENERIC_BRICK(generic);
};
#define GENERIC_INPUT(PREFIX) \
char *input_name; \
struct PREFIX##_brick *brick; \
struct PREFIX##_input_type *type; \
struct PREFIX##_output *connect; \
struct generic_input {
GENERIC_INPUT(generic);
};
#define GENERIC_OUTPUT(PREFIX) \
char *output_name; \
struct PREFIX##_brick *brick; \
struct PREFIX##_output_type *type; \
struct PREFIX##_output_ops *ops; \
int nr_connected; \
struct generic_output {
GENERIC_OUTPUT(generic);
};
#define GENERIC_OUTPUT_CALL(OUTPUT,OP,...) \
((OUTPUT) && (OUTPUT)->ops->OP ? \
(OUTPUT)->ops->OP(OUTPUT, ##__VA_ARGS__) : -ENOSYS) \
#define GENERIC_INPUT_CALL(INPUT,OP,...) \
((INPUT) && (INPUT)->connect ? \
GENERIC_OUTPUT_CALL((INPUT)->connect, OP, ##__VA_ARGS__) : \
-ENOSYS) \
#define GENERIC_BRICK_OPS(PREFIX) \
/*int (*brick_start)(struct PREFIX##_brick *brick);*/ \
/*int (*brick_stop)(struct PREFIX##_brick *brick);*/ \
struct generic_brick_ops {
GENERIC_BRICK_OPS(generic);
};
#define GENERIC_OUTPUT_OPS(PREFIX) \
/*int (*output_start)(struct PREFIX##_output *output);*/ \
/*int (*output_stop)(struct PREFIX##_output *output);*/ \
int (*make_object_layout)(struct PREFIX##_output *output, struct generic_object_layout *object_layout); \
struct generic_output_ops {
GENERIC_OUTPUT_OPS(generic)
};
// although possible, *_type should never be extended
#define GENERIC_BRICK_TYPE(PREFIX) \
char type_name[32]; \
int brick_size; \
int max_inputs; \
int max_outputs; \
struct PREFIX##_input_type **default_input_types; \
char **default_input_names; \
struct PREFIX##_output_type **default_output_types; \
char **default_output_names; \
struct PREFIX##_brick_ops *master_ops; \
struct PREFIX##input_types **default_type; \
int (*brick_construct)(struct PREFIX##_brick *brick); \
int (*brick_destruct)(struct PREFIX##_brick *brick); \
struct generic_brick_type {
GENERIC_BRICK_TYPE(generic);
};
#define GENERIC_INPUT_TYPE(PREFIX) \
char type_name[32]; \
int input_size; \
int (*input_construct)(struct PREFIX##_input *input); \
int (*input_destruct)(struct PREFIX##_input *input); \
struct generic_input_type {
GENERIC_INPUT_TYPE(generic);
};
#define GENERIC_OUTPUT_TYPE(PREFIX) \
char type_name[32]; \
int output_size; \
struct PREFIX##_output_ops *master_ops; \
int (*output_construct)(struct PREFIX##_output *output); \
int (*output_destruct)(struct PREFIX##_output *output); \
struct generic_output_type {
GENERIC_OUTPUT_TYPE(generic);
};
int generic_register_brick_type(struct generic_brick_type *new_type);
int generic_unregister_brick_type(struct generic_brick_type *old_type);
#ifdef _STRATEGY // call this only in strategy bricks, never in ordinary bricks
// you need this only if you circumvent generic_brick_init_full()
extern inline int generic_brick_init(struct generic_brick_type *type, struct generic_brick *brick, char *brick_name)
{
brick->brick_name = brick_name;
brick->type = type;
brick->ops = type->master_ops;
brick->nr_inputs = 0;
brick->nr_outputs = 0;
brick->tmp_head.next = brick->tmp_head.prev = &brick->tmp_head;
return 0;
}
extern inline int generic_input_init(struct generic_brick *brick, int index, struct generic_input_type *type, struct generic_input *input, char *input_name)
{
if (index < 0 || index >= brick->type->max_inputs)
return -ENOMEM;
if (brick->inputs[index])
return -EEXIST;
input->input_name = input_name;
input->brick = brick;
input->type = type;
input->connect = NULL;
brick->inputs[index] = input;
brick->nr_inputs++;
return 0;
}
extern inline int generic_output_init(struct generic_brick *brick, int index, struct generic_output_type *type, struct generic_output *output, char *output_name)
{
if (index < 0 || index >= brick->type->max_outputs)
return -ENOMEM;
if (brick->outputs[index])
return -EEXIST;
output->output_name = output_name;
output->brick = brick;
output->type = type;
output->ops = type->master_ops;
output->nr_connected = 0;
brick->outputs[index] = output;
brick->nr_outputs++;
return 0;
}
extern inline int generic_size(struct generic_brick_type *brick_type)
{
int size = brick_type->brick_size;
int i;
size += brick_type->max_inputs * sizeof(void*);
for (i = 0; i < brick_type->max_inputs; i++) {
size += brick_type->default_input_types[i]->input_size;
}
size += brick_type->max_outputs * sizeof(void*);
for (i = 0; i < brick_type->max_outputs; i++) {
size += brick_type->default_output_types[i]->output_size;
}
return size;
}
/* If possible, use this instead of generic_*_init().
* input_types and output_types may be NULL => use default_*_types
*/
int generic_brick_init_full(
void *data,
int size,
struct generic_brick_type *brick_type,
struct generic_input_type **input_types,
struct generic_output_type **output_types,
char **names);
int generic_brick_exit_full(
struct generic_brick *brick);
extern inline int generic_connect(struct generic_input *input, struct generic_output *output)
{
MARS_DBG("generic_connect(input=%p, output=%p)\n", input, output);
if (!input || !output)
return -EINVAL;
if (input->connect)
return -EEXIST;
input->connect = output;
output->nr_connected++; //TODO: protect against races, e.g. atomic_t
MARS_DBG("now nr_connected=%d\n", output->nr_connected);
return 0;
}
extern inline int generic_disconnect(struct generic_input *input)
{
MARS_DBG("generic_disconnect(input=%p)\n", input);
if (!input)
return -EINVAL;
if (input->connect) {
input->connect->nr_connected--; //TODO: protect against races, e.g. atomic_t
MARS_DBG("now nr_connected=%d\n", input->connect->nr_connected);
input->connect = NULL;
}
return 0;
}
#endif // _STRATEGY
// simple wrappers for type safety
#define GENERIC_MAKE_FUNCTIONS(PREFIX) \
extern inline int PREFIX##_register_brick_type(void) \
{ \
extern struct PREFIX##_brick_type PREFIX##_brick_type; \
return generic_register_brick_type((struct generic_brick_type*)&PREFIX##_brick_type); \
}\
\
extern inline int PREFIX##_unregister_brick_type(void) \
{ \
extern struct PREFIX##_brick_type PREFIX##_brick_type; \
return generic_unregister_brick_type((struct generic_brick_type*)&PREFIX##_brick_type); \
} \
\
_STRATEGY_CODE( \
extern struct PREFIX##_brick_type PREFIX##_brick_type; \
extern struct PREFIX##_input_type PREFIX##_input_type; \
extern struct PREFIX##_output_type PREFIX##_output_type; \
\
static inline int PREFIX##_brick_init(struct PREFIX##_brick *brick, char *brick_name) \
{ \
return generic_brick_init((struct generic_brick_type*)&PREFIX##_brick_type, (struct generic_brick*)brick, brick_name); \
} \
\
static inline int PREFIX##_input_init(struct PREFIX##_brick *brick, int index, struct PREFIX##_input *input, char *input_name) \
{ \
return generic_input_init( \
(struct generic_brick*)brick, \
index, \
(struct generic_input_type*)&PREFIX##_input_type, \
(struct generic_input*)input, \
input_name); \
} \
\
static inline int PREFIX##_output_init(struct PREFIX##_brick *brick, int index, struct PREFIX##_input *output, char *output_name) \
{ \
return generic_output_init( \
(struct generic_brick*)brick, \
index, \
(struct generic_output_type*)&PREFIX##_output_type, \
(struct generic_output*)output, \
output_name); \
} \
\
extern inline int PREFIX##_size(struct PREFIX##_brick_type *brick_type) \
{ \
return generic_size((struct generic_brick_type*)brick_type); \
} \
\
extern inline int PREFIX##_brick_init_full( \
void *data, \
int size, \
struct PREFIX##_brick_type *brick_type, \
struct PREFIX##_input_type **input_types, \
struct PREFIX##_output_type **output_types, \
char **names) \
{ \
return generic_brick_init_full( \
data, \
size, \
(struct generic_brick_type*)brick_type, \
(struct generic_input_type**)input_types, \
(struct generic_output_type**)output_types, \
(char**)names); \
} \
\
extern inline int PREFIX##_brick_exit_full( \
struct PREFIX##_brick *brick) \
{ \
return generic_brick_exit_full( \
(struct generic_brick*)brick); \
} \
)
/* Define a pair of connectable subtypes.
* For type safety, use this for all possible combinations.
* Yes, this may become quadratic in large type systems, but
* (a) thou shalt not define many types,
* (b) these macros generate only definitions, but no additional
* code at runtime.
*/
#define GENERIC_MAKE_CONNECT(INPUT_PREFIX,OUTPUT_PREFIX) \
\
_STRATEGY_CODE( \
\
extern inline int INPUT_PREFIX##_##OUTPUT_PREFIX##_connect( \
struct INPUT_PREFIX##_input *input, \
struct OUTPUT_PREFIX##_output *output) \
{ \
return generic_connect((struct generic_input*)input, (struct generic_output*)output); \
} \
\
extern inline int INPUT_PREFIX##_##OUTPUT_PREFIX####_disconnect( \
struct INPUT_PREFIX##_input *input) \
{ \
return generic_disconnect((struct generic_input*)input); \
} \
)
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
// MARS-specific definitions
@ -484,6 +31,7 @@ extern inline int INPUT_PREFIX##_##OUTPUT_PREFIX####_disconnect( \
// object stuff
/* mars_io */
extern const struct generic_object_type mars_io_type;
struct mars_io_aspect {
@ -508,11 +56,12 @@ struct mars_io_object {
};
/* mars_buf */
#define MARS_BUF_UPTODATE 1
#define MARS_BUF_READING 2
#define MARS_BUF_WRITING 4
extern const struct generic_object_type mars_buffer_type;
extern const struct generic_object_type mars_buf_type;
struct mars_buf_aspect {
GENERIC_ASPECT(mars_buf);
@ -527,7 +76,7 @@ struct mars_buf_object_layout {
};
#define MARS_BUF_OBJECT(PREFIX) \
MARS_IO_OBJECT(PREFIX); \
GENERIC_OBJECT(PREFIX); \
spinlock_t buf_lock; \
void *buf_data; \
int buf_len; \
@ -538,11 +87,32 @@ struct mars_buf_object {
MARS_BUF_OBJECT(mars_buf);
};
GENERIC_OBJECT_LAYOUT_FUNCTIONS(mars_io);
GENERIC_OBJECT_LAYOUT_FUNCTIONS(mars_buf);
/* mars_buf_callback_object */
GENERIC_OBJECT_FUNCTIONS(mars_io);
GENERIC_OBJECT_FUNCTIONS(mars_buf);
extern const struct generic_object_type mars_buf_callback_type;
struct mars_buf_callback_aspect {
GENERIC_ASPECT(mars_buf_callback);
};
struct mars_buf_callback_aspect_layout {
GENERIC_ASPECT_LAYOUT(mars_buf_callback);
};
struct mars_buf_callback_object_layout {
GENERIC_OBJECT_LAYOUT(mars_buf_callback);
};
#define MARS_BUF_CALLBACK_OBJECT(PREFIX) \
GENERIC_OBJECT(PREFIX); \
struct mars_buf_object *cb_mbuf; \
int cb_rw; \
int(*cb_buf_endio)(struct mars_buf_callback_object *mbuf_cb); \
int cb_error; \
struct mars_buf_callback_object {
MARS_BUF_CALLBACK_OBJECT(mars_buf_callback);
};
// internal helper structs
@ -552,7 +122,6 @@ struct mars_info {
};
// brick stuff
extern const struct generic_object_type mars_buf_type;
#define MARS_BRICK(PREFIX) \
GENERIC_BRICK(PREFIX); \
@ -587,7 +156,7 @@ struct mars_output {
/* mars_buf */ \
int (*mars_buf_get)(struct PREFIX##_output *output, struct mars_buf_object **mbuf, struct mars_buf_object_layout *buf_layout, loff_t pos, int len); \
int (*mars_buf_put)(struct PREFIX##_output *output, struct mars_buf_object *mbuf); \
int (*mars_buf_io)(struct PREFIX##_output *output, struct mars_buf_object *mbuf, int rw, int(*buf_endio)(struct mars_buf_object *mbuf)); \
int (*mars_buf_io)(struct PREFIX##_output *output, struct mars_buf_callback_object *mbuf_cb); \
// all non-extendable types
#define _MARS_TYPES(PREFIX) \
@ -610,14 +179,39 @@ struct PREFIX##_input_type { \
struct PREFIX##_output_type { \
GENERIC_OUTPUT_TYPE(PREFIX); \
}; \
\
GENERIC_MAKE_FUNCTIONS(PREFIX); \
GENERIC_MAKE_CONNECT(PREFIX,PREFIX); \
#define MARS_TYPES(PREFIX) \
_MARS_TYPES(PREFIX) \
GENERIC_MAKE_CONNECT(generic,PREFIX); \
GENERIC_MAKE_CONNECT(mars,PREFIX); \
GENERIC_ASPECT_LAYOUT_FUNCTIONS(PREFIX,mars_io); \
GENERIC_ASPECT_LAYOUT_FUNCTIONS(PREFIX,mars_buf); \
GENERIC_ASPECT_LAYOUT_FUNCTIONS(PREFIX,mars_buf_callback); \
GENERIC_ASPECT_FUNCTIONS(PREFIX,mars_io); \
GENERIC_ASPECT_FUNCTIONS(PREFIX,mars_buf); \
GENERIC_ASPECT_FUNCTIONS(PREFIX,mars_buf_callback); \
// instantiate all mars-specific functions
GENERIC_OBJECT_LAYOUT_FUNCTIONS(mars_io);
GENERIC_OBJECT_LAYOUT_FUNCTIONS(mars_buf);
GENERIC_OBJECT_LAYOUT_FUNCTIONS(mars_buf_callback);
GENERIC_ASPECT_LAYOUT_FUNCTIONS(mars,mars_io);
GENERIC_ASPECT_LAYOUT_FUNCTIONS(mars,mars_buf);
GENERIC_ASPECT_LAYOUT_FUNCTIONS(mars,mars_buf_callback);
GENERIC_OBJECT_FUNCTIONS(mars_io);
GENERIC_OBJECT_FUNCTIONS(mars_buf);
GENERIC_OBJECT_FUNCTIONS(mars_buf_callback);
GENERIC_ASPECT_FUNCTIONS(mars,mars_io);
GENERIC_ASPECT_FUNCTIONS(mars,mars_buf);
GENERIC_ASPECT_FUNCTIONS(mars,mars_buf_callback);
/////////////////////////////////////////////////////////////////////////
@ -626,4 +220,28 @@ GENERIC_MAKE_CONNECT(mars,PREFIX); \
_MARS_TYPES(mars);
GENERIC_MAKE_CONNECT(generic,mars);
#define MARS_MAKE_STATICS(PREFIX) \
\
int PREFIX##_brick_nr = -EEXIST; \
EXPORT_SYMBOL_GPL(PREFIX##_brick_nr); \
\
static const struct generic_aspect_type PREFIX##_mars_io_aspect_type = { \
.aspect_type_name = #PREFIX "_mars_io_aspect_type", \
.aspect_size = sizeof(struct PREFIX##_mars_io_aspect), \
.init_fn = PREFIX##_mars_io_aspect_init_fn, \
}; \
\
static const struct generic_aspect_type PREFIX##_mars_buf_aspect_type = { \
.aspect_type_name = #PREFIX "_mars_buf_aspect_type", \
.aspect_size = sizeof(struct PREFIX##_mars_buf_aspect), \
.init_fn = PREFIX##_mars_buf_aspect_init_fn, \
}; \
\
static const struct generic_aspect_type PREFIX##_mars_buf_callback_aspect_type = { \
.aspect_type_name = #PREFIX "_mars_buf_callback_aspect_type", \
.aspect_size = sizeof(struct PREFIX##_mars_buf_callback_aspect), \
.init_fn = PREFIX##_mars_buf_callback_aspect_init_fn, \
}; \
#endif

View File

@ -2,11 +2,14 @@
// Buf brick (just for demonstration)
//#define MARS_DEBUGGING
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include "mars.h"
@ -63,9 +66,9 @@ static inline void prune_cache(struct buf_brick *brick)
brick->current_count--;
brick->alloc_count--;
spin_unlock(&brick->buf_lock);
spin_unlock_irq(&brick->buf_lock);
free_buf(brick, bf);
spin_lock(&brick->buf_lock);
spin_lock_irq(&brick->buf_lock);
}
}
@ -125,21 +128,23 @@ static int make_bio(struct buf_brick *brick, struct bio **_bio, void *data, int
struct bio *bio = NULL;
struct block_device *bdev;
bdev = brick->base_info.backing_file->f_mapping->host->i_sb->s_bdev;
if (unlikely(!brick->got_info)) {
struct request_queue *q;
status = get_info(brick);
if (status < 0)
goto out;
bdev = brick->base_info.backing_file->f_mapping->host->i_sb->s_bdev;
q = bdev_get_queue(bdev);
brick->bvec_max = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
} else {
bdev = brick->base_info.backing_file->f_mapping->host->i_sb->s_bdev;
}
sector = pos << 9; // TODO: make dynamic
sector_offset = pos & ((1 << 9) - 1); // TODO: make dynamic
// round down to start of first sector
data -= sector_offset;
len -= sector_offset;
len += sector_offset;
pos -= sector_offset;
page_offset = pos & (PAGE_SIZE - 1);
bvec_count = len / PAGE_SIZE + 1;
@ -182,12 +187,11 @@ static int make_bio(struct buf_brick *brick, struct bio **_bio, void *data, int
bio->bi_size = i * PAGE_SIZE;
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_private = brick; // ????
bio->bi_private = NULL; // must be filled in later
bio->bi_end_io = NULL; // must be filled in later
if (status >= sector_offset)
status -= sector_offset;
#if 0
bio->bi_end_io = ...;
#endif
status = len;
out:
*_bio = bio;
return status;
@ -225,7 +229,7 @@ static int buf_buf_get(struct buf_output *output, struct mars_buf_object **_mbuf
if (!mbuf)
goto err_free;
mbuf_a = mars_buf_get_aspect(mbuf, output->buf_aspect_slot);
mbuf_a = buf_mars_buf_get_aspect(output, mbuf);
if (!mbuf_a)
goto err_free;
@ -235,12 +239,15 @@ static int buf_buf_get(struct buf_output *output, struct mars_buf_object **_mbuf
base_pos = pos & ~((loff_t)brick->backing_size - 1);
spin_lock(&brick->buf_lock);
spin_lock_irq(&brick->buf_lock);
bf = hash_find(brick, base_pos);
if (!bf) {
MARS_DBG("buf_get() hash nothing found\n");
if (unlikely(list_empty(&brick->free_anchor))) {
struct buf_head *test_bf;
spin_unlock(&brick->buf_lock);
MARS_DBG("buf_get() alloc new buf_head\n");
spin_unlock_irq(&brick->buf_lock);
status = -ENOMEM;
bf = kzalloc(sizeof(struct buf_head), GFP_KERNEL);
@ -252,14 +259,14 @@ static int buf_buf_get(struct buf_output *output, struct mars_buf_object **_mbuf
goto err_free2;
bf->bf_brick = brick;
INIT_LIST_HEAD(&bf->bf_mbuf_anchor);
atomic_set(&bf->bf_bio_count, 0);
//INIT_LIST_HEAD(&bf->bf_mbuf_anchor);
INIT_LIST_HEAD(&bf->bf_lru_head);
INIT_LIST_HEAD(&bf->bf_hash_head);
INIT_LIST_HEAD(&bf->bf_read_pending_anchor);
INIT_LIST_HEAD(&bf->bf_write_pending_anchor);
INIT_LIST_HEAD(&bf->bf_io_pending_anchor);
INIT_LIST_HEAD(&bf->bf_again_write_pending_anchor);
spin_lock(&brick->buf_lock);
spin_lock_irq(&brick->buf_lock);
brick->alloc_count++;
/* during the open lock, somebody might have raced
* against us at the same base_pos...
@ -272,22 +279,23 @@ static int buf_buf_get(struct buf_output *output, struct mars_buf_object **_mbuf
} else {
bf = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
}
MARS_DBG("buf_get() bf=%p\n", bf);
bf->bf_pos = base_pos;
bf->bf_flags = 0;
bf->bf_count = 0;
atomic_set(&bf->bf_count, 0);
hash_insert(brick, bf);
brick->current_count++;
}
mbuf_a->bf = bf;
list_add(&mbuf_a->bf_member_head, &bf->bf_mbuf_anchor);
bf->bf_count++;
mbuf_a->bfa_bf = bf;
atomic_inc(&bf->bf_count);
MARS_DBG("buf_get() bf=%p initial bf_count=%d\n", bf, atomic_read(&bf->bf_count));
list_del_init(&bf->bf_lru_head);
mbuf->buf_flags = bf->bf_flags;
spin_unlock(&brick->buf_lock);
spin_unlock_irq(&brick->buf_lock);
mbuf->buf_data = bf->bf_data + (pos - base_pos);
mbuf->buf_len = brick->backing_size - (pos - base_pos);
@ -304,23 +312,23 @@ err_free:
return status;
}
static int buf_buf_put(struct buf_output *output, struct mars_buf_object *mbuf)
static int _buf_buf_put(struct buf_head *bf)
{
struct buf_brick *brick = output->brick;
struct buf_mars_buf_aspect *mbuf_a;
struct buf_head *bf;
struct buf_brick *brick;
mbuf_a = mars_buf_get_aspect(mbuf, output->buf_aspect_slot);
if (!mbuf_a)
return -EILSEQ;
MARS_DBG("_buf_buf_put() bf=%p bf_count=%d\n", bf, atomic_read(&bf->bf_count));
bf = mbuf_a->bf;
if (!atomic_dec_and_test(&bf->bf_count))
return 0;
spin_lock(&brick->buf_lock);
list_del(&mbuf_a->bf_member_head);
if (--bf->bf_count <= 0) {
MARS_DBG("_buf_buf_put() ZERO_COUNT\n");
brick = bf->bf_brick;
spin_lock_irq(&brick->buf_lock);
if (atomic_read(&bf->bf_count) <= 0) {
struct list_head *where = &brick->lru_anchor;
BUG_ON(bf->bf_count < 0);
BUG_ON(bf->bf_flags & (MARS_BUF_READING | MARS_BUF_WRITING));
if (unlikely(!(bf->bf_flags & MARS_BUF_UPTODATE))) {
list_del_init(&bf->bf_hash_head);
@ -330,6 +338,7 @@ static int buf_buf_put(struct buf_output *output, struct mars_buf_object *mbuf)
list_del(&bf->bf_lru_head);
list_add(&bf->bf_lru_head, where);
}
// lru freeing
while (brick->current_count > brick->max_count) {
if (list_empty(&brick->lru_anchor))
@ -340,57 +349,62 @@ static int buf_buf_put(struct buf_output *output, struct mars_buf_object *mbuf)
brick->current_count--;
list_add(&bf->bf_lru_head, &brick->free_anchor);
}
prune_cache(brick);
spin_unlock(&brick->buf_lock);
spin_unlock_irq(&brick->buf_lock);
return 0;
}
static int buf_buf_io(struct buf_output *output, struct mars_buf_object *mbuf, int rw, int(*buf_endio)(struct mars_buf_object *mbuf))
static int buf_buf_put(struct buf_output *output, struct mars_buf_object *mbuf)
{
struct buf_brick *brick = output->brick;
struct buf_mars_buf_aspect *mbuf_a;
struct buf_head *bf;
bool tostart = false;
mbuf_a = mars_buf_get_aspect(mbuf, output->buf_aspect_slot);
mbuf_a = buf_mars_buf_get_aspect(output, mbuf);
if (!mbuf_a)
return -EILSEQ;
bf = mbuf_a->bf;
bf = mbuf_a->bfa_bf;
MARS_DBG("buf_buf_put() mbuf=%p mbuf_a=%p bf=%p\n", mbuf, mbuf_a, bf);
return _buf_buf_put(bf);
}
spin_lock(&brick->buf_lock);
if (rw) { // WRITE
BUG_ON(bf->bf_flags & MARS_BUF_READING);
if (!(bf->bf_flags & MARS_BUF_WRITING)) {
bf->bf_flags |= MARS_BUF_WRITING;
tostart = true;
list_add(&mbuf_a->bf_pending_head, &bf->bf_write_pending_anchor);
static int _buf_endio(struct mars_io_object *mio)
{
struct bio *bio = mio->orig_bio;
MARS_DBG("_buf_endio() mio=%p bio=%p\n", mio, bio);
if (bio) {
mio->orig_bio = NULL;
if (!bio->bi_size) {
bio_endio(bio, 0);
} else {
list_add(&mbuf_a->bf_pending_head, &bf->bf_again_write_pending_anchor);
MARS_ERR("NYI: RETRY LOGIC %u\n", bio->bi_size);
bio_endio(bio, -EIO);
}
} else { // READ
if (bf->bf_flags & (MARS_BUF_UPTODATE | MARS_BUF_WRITING)) {
spin_unlock(&brick->buf_lock);
return buf_endio(mbuf);
}
if (!(bf->bf_flags & MARS_BUF_READING)) {
bf->bf_flags |= MARS_BUF_READING;
tostart = true;
}
list_add(&mbuf_a->bf_pending_head, &bf->bf_read_pending_anchor);
}
spin_unlock(&brick->buf_lock);
} // else lower layers have already signalled the orig_bio
if (tostart) {
struct buf_input *input = output->brick->inputs[0];
kfree(mio);
return 0;
}
static void _buf_bio_callback(struct bio *bio, int code);
static int _buf_make_bios(struct buf_brick *brick, struct buf_head *bf, void *start_data, loff_t start_pos, int start_len)
{
while (start_len > 0) {
struct buf_input *input = brick->inputs[0];
struct mars_io_object *data;
struct mars_io_object *mio;
struct buf_mars_io_aspect *mio_a;
struct bio *bio = NULL;
int len;
int status;
if (unlikely(!brick->mio_layout)) {
brick->mio_layout = buf_init_object_layout(input->connect);
brick->mio_layout = buf_init_object_layout(brick->outputs[0]);
if (!brick->mio_layout)
return -ENOMEM;
}
@ -403,12 +417,40 @@ static int buf_buf_io(struct buf_output *output, struct mars_buf_object *mbuf, i
if (!mio)
goto err_free;
status = make_bio(brick, &bio, mbuf->buf_data, mbuf->buf_len, mbuf->buf_pos);
if (status < 0 || !bio)
mio_a = buf_mars_io_get_aspect(brick->outputs[0], mio);
if (!mio_a)
goto err_free2;
//...
return GENERIC_INPUT_CALL(input, mars_io, mio);
len = make_bio(brick, &bio, start_data, start_len, start_pos);
if (len < 0 || !bio)
goto err_free2;
mio_a->mia_bf = bf;
atomic_inc(&bf->bf_bio_count);
bio->bi_private = mio_a;
bio->bi_end_io = _buf_bio_callback;
mio->orig_bio = bio;
mio->mars_endio = _buf_endio;
MARS_DBG("starting buf IO mio=%p bio=%p len=%d bf=%p bf_count=%d bf_bio_count=%d\n", mio, bio, len, bf, atomic_read(&bf->bf_count), atomic_read(&bf->bf_bio_count));
#if 1
status = GENERIC_INPUT_CALL(input, mars_io, mio);
if (status < 0)
goto err_free3;
#else
// fake IO for testing
bio->bi_size = 0;
mio->mars_endio(mio);
#endif
start_data -= len;
start_pos -= len;
start_len -= len;
continue;
err_free3:
atomic_dec(&bf->bf_bio_count);
bio_put(bio);
err_free2:
kfree(mio);
err_free:
@ -418,25 +460,214 @@ static int buf_buf_io(struct buf_output *output, struct mars_buf_object *mbuf, i
return 0;
}
static void _buf_bio_callback(struct bio *bio, int code)
{
struct buf_mars_io_aspect *mio_a;
struct buf_head *bf;
struct buf_brick *brick;
void *start_data = NULL;
loff_t start_pos = 0;
int start_len = 0;
int old_flags;
mio_a = bio->bi_private;
bf = mio_a->mia_bf;
MARS_DBG("_buf_bio_callback() mio=%p bio=%p bf=%p bf_count=%d bf_bio_count=%d code=%d\n", mio_a->object, bio, bf, atomic_read(&bf->bf_count), atomic_read(&bf->bf_bio_count), code);
if (unlikely(mio_a->mia_end_io_called)) {
MARS_ERR("Oops, somebody called us twice on the same bio. I'm not amused.\n");
msleep(5000);
return;
} else {
mio_a->mia_end_io_called = true;
bio_put(bio);
}
if (code < 0) {
// this can race, but we don't worry about the exact error code
bf->bf_bio_status = code;
}
if (!atomic_dec_and_test(&bf->bf_bio_count))
return;
MARS_DBG("_buf_bio_callback() ZERO_COUNT mio=%p bio=%p bf=%p code=%d\n", mio_a->object, bio, bf, code);
brick = bf->bf_brick;
spin_lock_irq(&brick->buf_lock);
// signal success by calling all callbacks.
while (!list_empty(&bf->bf_io_pending_anchor)) {
struct buf_mars_buf_callback_aspect *mbuf_cb_a = container_of(bf->bf_io_pending_anchor.next, struct buf_mars_buf_callback_aspect, bfc_pending_head);
struct mars_buf_callback_object *mbuf_cb = mbuf_cb_a->object;
BUG_ON(mbuf_cb_a->bfc_bfa->bfa_bf != bf);
mbuf_cb->cb_error = bf->bf_bio_status;
list_del(&mbuf_cb_a->bfc_pending_head);
/* drop normal refcount.
* full _buf_buf_put() not needed, see below. */
atomic_dec(&bf->bf_count);
MARS_DBG("_buf_bio_callback() bf=%p now bf_count=%d\n", bf, atomic_read(&bf->bf_count));
spin_unlock_irq(&brick->buf_lock);
mbuf_cb->cb_buf_endio(mbuf_cb);
spin_lock_irq(&brick->buf_lock);
}
old_flags = bf->bf_flags;
if (!bf->bf_bio_status && (old_flags & MARS_BUF_READING)) {
bf->bf_flags |= MARS_BUF_UPTODATE;
}
// clear the flags. may be re-enabled later.
bf->bf_flags &= ~(MARS_BUF_READING | MARS_BUF_WRITING);
/* move pending jobs to work.
* this is in essence an automatic restart mechanism.
*/
while (!list_empty(&bf->bf_again_write_pending_anchor)) {
struct buf_mars_buf_callback_aspect *mbuf_cb_a = container_of(bf->bf_again_write_pending_anchor.next, struct buf_mars_buf_callback_aspect, bfc_pending_head);
struct mars_buf_object *mbuf = mbuf_cb_a->bfc_bfa->object;
BUG_ON(mbuf_cb_a->bfc_bfa->bfa_bf != bf);
list_del(&mbuf_cb_a->bfc_pending_head);
list_add_tail(&mbuf_cb_a->bfc_pending_head, &bf->bf_io_pending_anchor);
// re-enable flags
bf->bf_flags |= MARS_BUF_WRITING;
bf->bf_bio_status = 0;
if (!start_len) {
start_data = mbuf->buf_data;
start_pos = mbuf->buf_pos;
start_len = mbuf->buf_len;
} else if (start_data != mbuf->buf_data ||
start_pos != mbuf->buf_pos ||
start_len != mbuf->buf_len) {
start_data = bf->bf_data;
start_pos = bf->bf_pos;
start_len = brick->backing_size;
}
}
spin_unlock_irq(&brick->buf_lock);
if (start_len) {
// in this case, the extra refcount is kept => nothing to do
_buf_make_bios(brick, bf, start_data, start_pos, start_len);
} else if (old_flags & (MARS_BUF_READING | MARS_BUF_WRITING)) {
// drop extra refcount for pending IO
_buf_buf_put(bf);
}
}
static int buf_buf_io(struct buf_output *output, struct mars_buf_callback_object *mbuf_cb)
{
struct buf_brick *brick = output->brick;
struct mars_buf_object *mbuf = mbuf_cb->cb_mbuf;
struct buf_mars_buf_aspect *mbuf_a;
struct buf_mars_buf_callback_aspect *mbuf_cb_a;
struct buf_head *bf;
void *start_data = NULL;
loff_t start_pos = 0;
int start_len = 0;
if (!mbuf) {
MARS_ERR("internal problem: forgotten to supply mbuf\n");
return -EILSEQ;
}
mbuf_cb_a = buf_mars_buf_callback_get_aspect(output, mbuf_cb);
if (!mbuf_cb_a) {
MARS_ERR("internal problem: mbuf_cb aspect does not work\n");
return -EILSEQ;
}
mbuf_a = buf_mars_buf_get_aspect(output, mbuf);
if (!mbuf_a) {
MARS_ERR("internal problem: mbuf aspect does not work\n");
return -EILSEQ;
}
mbuf_cb_a->bfc_bfa = mbuf_a;
bf = mbuf_a->bfa_bf;
spin_lock_irq(&brick->buf_lock);
if (mbuf_cb->cb_rw) { // WRITE
BUG_ON(bf->bf_flags & MARS_BUF_READING);
if (!(bf->bf_flags & MARS_BUF_WRITING)) {
bf->bf_flags |= MARS_BUF_WRITING;
bf->bf_bio_status = 0;
// grab an extra refcount for pending IO
atomic_inc(&bf->bf_count);
MARS_DBG("buf_buf_io() bf=%p extra bf_count=%d\n", bf, atomic_read(&bf->bf_count));
start_data = mbuf->buf_data;
start_pos = mbuf->buf_pos;
start_len = mbuf->buf_len;
list_add(&mbuf_cb_a->bfc_pending_head, &bf->bf_io_pending_anchor);
} else {
list_add(&mbuf_cb_a->bfc_pending_head, &bf->bf_again_write_pending_anchor);
MARS_INF("postponing %lld %d\n", mbuf->buf_pos, mbuf->buf_len);
}
} else { // READ
if (bf->bf_flags & (MARS_BUF_UPTODATE | MARS_BUF_WRITING)) {
spin_unlock_irq(&brick->buf_lock);
return mbuf_cb->cb_buf_endio(mbuf_cb);
}
if (!(bf->bf_flags & MARS_BUF_READING)) {
bf->bf_flags |= MARS_BUF_READING;
bf->bf_bio_status = 0;
// grab an extra refcount for pending IO
atomic_inc(&bf->bf_count);
MARS_DBG("buf_buf_io() bf=%p extra bf_count=%d\n", bf, atomic_read(&bf->bf_count));
start_data = (void*)((unsigned long)mbuf->buf_data & (brick->backing_size - 1));
start_pos = mbuf->buf_pos & (brick->backing_size - 1);
start_len = brick->backing_size;
}
list_add(&mbuf_cb_a->bfc_pending_head, &bf->bf_io_pending_anchor);
}
// grab normal refcount for each mbuf_cb
atomic_inc(&bf->bf_count);
MARS_DBG("buf_buf_io() bf=%p normal bf_count=%d\n", bf, atomic_read(&bf->bf_count));
spin_unlock_irq(&brick->buf_lock);
return _buf_make_bios(brick, bf, start_data, start_pos, start_len);
}
//////////////// object / aspect constructors / destructors ///////////////
static int buf_mars_io_aspect_init_fn(struct mars_io_aspect *_ini, void *_init_data)
static int buf_mars_io_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct buf_mars_io_aspect *ini = (void*)_ini;
ini->mia_bf = NULL;
ini->mia_end_io_called = false;
return 0;
}
static int buf_mars_buf_aspect_init_fn(struct mars_buf_aspect *_ini, void *_init_data)
static int buf_mars_buf_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct buf_mars_buf_aspect *ini = (void*)_ini;
INIT_LIST_HEAD(&ini->bf_member_head);
INIT_LIST_HEAD(&ini->bf_pending_head);
ini->bfa_bf = NULL;
return 0;
}
static int buf_mars_buf_callback_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct buf_mars_buf_callback_aspect *ini = (void*)_ini;
INIT_LIST_HEAD(&ini->bfc_pending_head);
ini->bfc_bfa = NULL;
return 0;
}
MARS_MAKE_STATICS(buf);
static int buf_make_object_layout(struct buf_output *output, struct generic_object_layout *object_layout)
{
const struct generic_object_type *object_type = object_layout->type;
const struct generic_object_type *object_type = object_layout->object_type;
int status;
int aspect_size = 0;
struct buf_brick *brick = output->brick;
@ -444,14 +675,18 @@ static int buf_make_object_layout(struct buf_output *output, struct generic_obje
if (object_type == &mars_io_type) {
aspect_size = sizeof(struct buf_mars_io_aspect);
status = mars_io_add_aspect(object_layout, aspect_size, buf_mars_io_aspect_init_fn, output);
output->io_aspect_slot = status;
status = buf_mars_io_add_aspect(output, object_layout, &buf_mars_io_aspect_type);
} else if (object_type == &mars_buf_type) {
aspect_size = sizeof(struct buf_mars_buf_aspect);
status = mars_buf_add_aspect(object_layout, aspect_size, buf_mars_buf_aspect_init_fn, output);
status = buf_mars_buf_add_aspect(output, object_layout, &buf_mars_buf_aspect_type);
if (status < 0)
return status;
return aspect_size;
} else if (object_type == &mars_buf_callback_type) {
aspect_size = sizeof(struct buf_mars_buf_callback_aspect);
status = buf_mars_buf_callback_add_aspect(output, object_layout, &buf_mars_buf_callback_aspect_type);
if (status < 0)
return status;
output->buf_aspect_slot = status;
return aspect_size;
} else {
return 0;
@ -511,27 +746,27 @@ static struct buf_output_ops buf_output_ops = {
.mars_buf_io = buf_buf_io,
};
static struct buf_input_type buf_input_type = {
static const struct buf_input_type buf_input_type = {
.type_name = "buf_input",
.input_size = sizeof(struct buf_input),
};
static struct buf_input_type *buf_input_types[] = {
static const struct buf_input_type *buf_input_types[] = {
&buf_input_type,
};
static struct buf_output_type buf_output_type = {
static const struct buf_output_type buf_output_type = {
.type_name = "buf_output",
.output_size = sizeof(struct buf_output),
.master_ops = &buf_output_ops,
.output_construct = &buf_output_construct,
};
static struct buf_output_type *buf_output_types[] = {
static const struct buf_output_type *buf_output_types[] = {
&buf_output_type,
};
struct buf_brick_type buf_brick_type = {
const struct buf_brick_type buf_brick_type = {
.type_name = "buf_brick",
.brick_size = sizeof(struct buf_brick),
.max_inputs = 1,

View File

@ -3,25 +3,35 @@
#define MARS_BUF_H
#include <linux/list.h>
#include <asm/atomic.h>
#define MARS_BUF_HASH_MAX 512
struct buf_mars_io_aspect {
GENERIC_ASPECT(mars_io);
struct buf_head *mia_bf;
bool mia_end_io_called;
};
struct buf_mars_buf_aspect {
GENERIC_ASPECT(mars_buf);
struct list_head bf_member_head;
struct list_head bf_pending_head;
struct buf_head *bf;
struct buf_head *bfa_bf;
};
struct buf_mars_buf_callback_aspect {
GENERIC_ASPECT(mars_buf_callback);
struct list_head bfc_pending_head;
struct buf_mars_buf_aspect *bfc_bfa;
};
struct buf_brick {
MARS_BRICK(buf);
/* brick parameters */
int backing_order;
int backing_size;
int max_count;
/* internals */
int current_count;
int alloc_count;
struct mars_io_object_layout *mio_layout;
@ -45,25 +55,24 @@ struct buf_input {
struct buf_output {
MARS_OUTPUT(buf);
int io_aspect_slot;
int buf_aspect_slot;
};
MARS_TYPES(buf);
struct buf_head {
struct buf_brick *bf_brick;
void *bf_data;
loff_t bf_pos;
int bf_flags;
int bf_count;
void *bf_data;
loff_t bf_pos;
int bf_flags;
atomic_t bf_count;
int bf_bio_status;
atomic_t bf_bio_count;
// lists for caching
struct list_head bf_mbuf_anchor; // all current mbuf members
//struct list_head bf_mbuf_anchor; // all current mbuf members
struct list_head bf_lru_head;
struct list_head bf_hash_head;
// lists for IO
struct list_head bf_read_pending_anchor;
struct list_head bf_write_pending_anchor;
struct list_head bf_io_pending_anchor;
struct list_head bf_again_write_pending_anchor;
};

View File

@ -312,7 +312,7 @@ static int device_sio_mars_queue(struct device_sio_output *output, struct mars_i
spin_unlock_irq(&output->g_lock);
index = (index % WITH_THREAD) + 1;
}
aspect = mars_io_get_aspect(mio, output->aspect_slot);
aspect = device_sio_mars_io_get_aspect(output, mio);
tinfo = &output->tinfo[index];
MARS_DBG("queueing %p on %d\n", mio, index);
spin_lock_irq(&tinfo->lock);
@ -370,25 +370,40 @@ static int device_sio_get_info(struct device_sio_output *output, struct mars_inf
//////////////// object / aspect constructors / destructors ///////////////
static int device_sio_aspect_init_fn(struct mars_io_aspect *_ini, void *_init_data)
static int device_sio_mars_io_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct device_sio_mars_io_aspect *ini = (void*)_ini;
INIT_LIST_HEAD(&ini->io_head);
return 0;
}
static int device_sio_mars_buf_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct device_sio_mars_buf_aspect *ini = (void*)_ini;
(void)ini;
return 0;
}
static int device_sio_mars_buf_callback_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct device_sio_mars_buf_callback_aspect *ini = (void*)_ini;
(void)ini;
return 0;
}
MARS_MAKE_STATICS(device_sio);
static int device_sio_make_object_layout(struct device_sio_output *output, struct generic_object_layout *object_layout)
{
const struct generic_object_type *object_type = object_layout->type;
const struct generic_object_type *object_type = object_layout->object_type;
int slot;
if (object_type != &mars_io_type)
return 0;
slot = mars_io_add_aspect(object_layout, sizeof(struct device_sio_mars_io_aspect), device_sio_aspect_init_fn, output);
slot = device_sio_mars_io_add_aspect(output, object_layout, &device_sio_mars_io_aspect_type);
if (slot < 0)
return slot;
output->aspect_slot = slot;
return sizeof(struct device_sio_mars_io_aspect);
}
@ -482,7 +497,7 @@ static struct device_sio_output_ops device_sio_output_ops = {
.mars_get_info = device_sio_get_info,
};
static struct device_sio_output_type device_sio_output_type = {
static const struct device_sio_output_type device_sio_output_type = {
.type_name = "device_sio_output",
.output_size = sizeof(struct device_sio_output),
.master_ops = &device_sio_output_ops,
@ -490,11 +505,11 @@ static struct device_sio_output_type device_sio_output_type = {
.output_destruct = &device_sio_output_destruct,
};
static struct device_sio_output_type *device_sio_output_types[] = {
static const struct device_sio_output_type *device_sio_output_types[] = {
&device_sio_output_type,
};
struct device_sio_brick_type device_sio_brick_type = {
const struct device_sio_brick_type device_sio_brick_type = {
.type_name = "device_sio_brick",
.brick_size = sizeof(struct device_sio_brick),
.max_inputs = 0,

View File

@ -9,6 +9,14 @@ struct device_sio_mars_io_aspect {
struct list_head io_head;
};
struct device_sio_mars_buf_aspect {
GENERIC_ASPECT(mars_buf);
};
struct device_sio_mars_buf_callback_aspect {
GENERIC_ASPECT(mars_buf_callback);
};
struct device_sio_brick {
MARS_BRICK(device_sio);
};
@ -30,7 +38,6 @@ struct sio_threadinfo {
struct device_sio_output {
MARS_OUTPUT(device_sio);
struct file *filp;
int aspect_slot;
#ifdef WITH_THREAD
struct sio_threadinfo tinfo[WITH_THREAD+1];
spinlock_t g_lock;

View File

@ -40,31 +40,40 @@ static int dummy_buf_put(struct dummy_output *output, struct mars_buf_object *mb
return GENERIC_INPUT_CALL(input, mars_buf_put, mbuf);
}
static int dummy_buf_io(struct dummy_output *output, struct mars_buf_object *mbuf, int rw, int(*buf_endio)(struct mars_buf_object *mbuf))
static int dummy_buf_io(struct dummy_output *output, struct mars_buf_callback_object *mbuf_cb)
{
struct dummy_input *input = output->brick->inputs[0];
return GENERIC_INPUT_CALL(input, mars_buf_io, mbuf, rw, buf_endio);
return GENERIC_INPUT_CALL(input, mars_buf_io, mbuf_cb);
}
//////////////// object / aspect constructors / destructors ///////////////
static int dummy_mars_io_aspect_init_fn(struct mars_io_aspect *_ini, void *_init_data)
static int dummy_mars_io_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct dummy_mars_io_aspect *ini = (void*)_ini;
ini->my_own = 0;
return 0;
}
static int dummy_mars_buf_aspect_init_fn(struct mars_buf_aspect *_ini, void *_init_data)
static int dummy_mars_buf_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct dummy_mars_buf_aspect *ini = (void*)_ini;
ini->my_own = 0;
return 0;
}
static int dummy_mars_buf_callback_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
struct dummy_mars_buf_callback_aspect *ini = (void*)_ini;
ini->my_own = 0;
return 0;
}
MARS_MAKE_STATICS(dummy);
static int dummy_make_object_layout(struct dummy_output *output, struct generic_object_layout *object_layout)
{
const struct generic_object_type *object_type = object_layout->type;
const struct generic_object_type *object_type = object_layout->object_type;
int status;
int aspect_size = 0;
struct dummy_brick *brick = output->brick;
@ -72,12 +81,13 @@ static int dummy_make_object_layout(struct dummy_output *output, struct generic_
if (object_type == &mars_io_type) {
aspect_size = sizeof(struct dummy_mars_io_aspect);
status = mars_io_add_aspect(object_layout, aspect_size, dummy_mars_io_aspect_init_fn, output);
output->io_aspect_slot = status;
status = dummy_mars_io_add_aspect(output, object_layout, &dummy_mars_io_aspect_type);
} else if (object_type == &mars_buf_type) {
aspect_size = sizeof(struct dummy_mars_buf_aspect);
status = mars_buf_add_aspect(object_layout, aspect_size, dummy_mars_buf_aspect_init_fn, output);
output->buf_aspect_slot = status;
status = dummy_mars_buf_add_aspect(output, object_layout, &dummy_mars_buf_aspect_type);
} else if (object_type == &mars_buf_callback_type) {
aspect_size = sizeof(struct dummy_mars_buf_callback_aspect);
status = dummy_mars_buf_callback_add_aspect(output, object_layout, &dummy_mars_buf_callback_aspect_type);
} else {
return 0;
}
@ -126,27 +136,32 @@ static struct dummy_output_ops dummy_output_ops = {
.mars_buf_io = dummy_buf_io,
};
static struct dummy_input_type dummy_input_type = {
static const struct dummy_input_type dummy_input_type = {
.type_name = "dummy_input",
.input_size = sizeof(struct dummy_input),
};
static struct dummy_input_type *dummy_input_types[] = {
static const struct dummy_input_type *dummy_input_types[] = {
&dummy_input_type,
};
static struct dummy_output_type dummy_output_type = {
static const int test[] = {0, -1};
static const struct dummy_output_type dummy_output_type = {
.type_name = "dummy_output",
.output_size = sizeof(struct dummy_output),
.master_ops = &dummy_output_ops,
.output_construct = &dummy_output_construct,
.test = {
[BRICK_OBJ_MARS_IO] = test,
}
};
static struct dummy_output_type *dummy_output_types[] = {
static const struct dummy_output_type *dummy_output_types[] = {
&dummy_output_type,
};
struct dummy_brick_type dummy_brick_type = {
const struct dummy_brick_type dummy_brick_type = {
.type_name = "dummy_brick",
.brick_size = sizeof(struct dummy_brick),
.max_inputs = 1,

View File

@ -12,6 +12,11 @@ struct dummy_mars_buf_aspect {
int my_own;
};
struct dummy_mars_buf_callback_aspect {
GENERIC_ASPECT(mars_buf_callback);
int my_own;
};
struct dummy_brick {
MARS_BRICK(dummy);
int my_own;
@ -23,8 +28,6 @@ struct dummy_input {
struct dummy_output {
MARS_OUTPUT(dummy);
int io_aspect_slot;
int buf_aspect_slot;
int my_own;
};

View File

@ -14,270 +14,28 @@
const struct generic_object_type mars_io_type = {
.object_type_name = "mars_io",
.default_size = sizeof(struct mars_io_object),
.brick_obj_nr = BRICK_OBJ_MARS_IO,
};
EXPORT_SYMBOL_GPL(mars_io_type);
const struct generic_object_type mars_buf_type = {
.object_type_name = "mars_buf",
.default_size = sizeof(struct mars_buf_object),
.brick_obj_nr = BRICK_OBJ_MARS_BUF,
};
EXPORT_SYMBOL_GPL(mars_buf_type);
const struct generic_object_type mars_buf_callback_type = {
.object_type_name = "mars_buf_callback",
.default_size = sizeof(struct mars_buf_callback_object),
.brick_obj_nr = BRICK_OBJ_MARS_BUF_CALLBACK,
};
EXPORT_SYMBOL_GPL(mars_buf_callback_type);
//////////////////////////////////////////////////////////////
// brick stuff
#define MAX_BRICK_TYPES 64
static int nr_brick_types = 0;
static struct generic_brick_type *brick_types[MAX_BRICK_TYPES] = {};
int generic_register_brick_type(struct generic_brick_type *new_type)
{
int i;
int found = -1;
MARS_DBG("generic_register_brick_type()\n");
for (i = 0; i < nr_brick_types; i++) {
if (!brick_types[i]) {
found = i;
continue;
}
if (!strcmp(brick_types[i]->type_name, new_type->type_name)) {
printk("sorry, bricktype %s is already registered.\n", new_type->type_name);
return -EEXIST;
}
}
if (found < 0) {
if (nr_brick_types >= MAX_BRICK_TYPES) {
printk("sorry, cannot register bricktype %s.\n", new_type->type_name);
return -EEXIST;
}
found = nr_brick_types++;
}
brick_types[found] = new_type;
MARS_DBG("generic_register_brick_type() done.\n");
return 0;
}
EXPORT_SYMBOL_GPL(generic_register_brick_type);
int generic_unregister_brick_type(struct generic_brick_type *old_type)
{
MARS_DBG("generic_unregister_brick_type()\n");
return -1; // NYI
}
EXPORT_SYMBOL_GPL(generic_unregister_brick_type);
int generic_brick_init_full(
void *data,
int size,
struct generic_brick_type *brick_type,
struct generic_input_type **input_types,
struct generic_output_type **output_types,
char **names)
{
struct generic_brick *brick = data;
int status;
int i;
MARS_DBG("generic_brick_init_full()\n");
// first, call the generic constructors
status = generic_brick_init(brick_type, brick, *names++);
if (status)
return status;
data += brick_type->brick_size;
size -= brick_type->brick_size;
if (size < 0)
return -ENOMEM;
if (!input_types) {
MARS_DBG("generic_brick_init_full: switch to default input_types\n");
input_types = brick_type->default_input_types;
names = brick_type->default_input_names;
}
if (input_types) {
MARS_DBG("generic_brick_init_full: input_types\n");
brick->inputs = data;
data += sizeof(void*) * brick_type->max_inputs;
size -= sizeof(void*) * brick_type->max_inputs;
if (size < 0)
return -1;
for (i = 0; i < brick_type->max_inputs; i++) {
struct generic_input *input = data;
struct generic_input_type *type = *input_types++;
MARS_DBG("generic_brick_init_full: calling generic_input_init()\n");
status = generic_input_init(brick, i, type, input, names ? *names++ : type->type_name);
if (status)
return status;
data += type->input_size;
size -= type->input_size;
if (size < 0)
return -ENOMEM;
}
}
if (!output_types) {
MARS_DBG("generic_brick_init_full: switch to default output_types\n");
output_types = brick_type->default_output_types;
names = brick_type->default_output_names;
}
if (output_types) {
MARS_DBG("generic_brick_init_full: output_types\n");
brick->outputs = data;
data += sizeof(void*) * brick_type->max_outputs;
size -= sizeof(void*) * brick_type->max_outputs;
if (size < 0)
return -1;
for (i = 0; i < brick_type->max_outputs; i++) {
struct generic_output *output = data;
struct generic_output_type *type = *output_types++;
MARS_DBG("generic_brick_init_full: calling generic_output_init()\n");
generic_output_init(brick, i, type, output, names ? *names++ : type->type_name);
if (status)
return status;
data += type->output_size;
size -= type->output_size;
if (size < 0)
return -ENOMEM;
}
}
// call the specific constructors
MARS_DBG("generic_brick_init_full: call specific contructors.\n");
if (brick_type->brick_construct) {
MARS_DBG("generic_brick_init_full: calling brick_construct()\n");
status = brick_type->brick_construct(brick);
if (status)
return status;
}
for (i = 0; i < brick_type->max_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (!input)
continue;
if (!input->type) {
MARS_ERR("input has no associated type!\n");
continue;
}
if (input->type->input_construct) {
MARS_DBG("generic_brick_init_full: calling input_construct()\n");
status = input->type->input_construct(input);
if (status)
return status;
}
}
for (i = 0; i < brick_type->max_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
MARS_ERR("output has no associated type!\n");
continue;
}
if (output->type->output_construct) {
MARS_DBG("generic_brick_init_full: calling output_construct()\n");
status = output->type->output_construct(output);
if (status)
return status;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(generic_brick_init_full);
int generic_brick_exit_full(struct generic_brick *brick)
{
int i;
int status;
// first, check all outputs
for (i = 0; i < brick->nr_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
MARS_ERR("output has no associated type!\n");
continue;
}
if (output->nr_connected) {
MARS_DBG("output is connected!\n");
return -EPERM;
}
}
// ok, test succeeded. start desctruction...
for (i = 0; i < brick->type->max_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (!output)
continue;
if (!output->type) {
MARS_ERR("output has no associated type!\n");
continue;
}
if (output->type->output_destruct) {
MARS_DBG("generic_brick_exit_full: calling output_destruct()\n");
status = output->type->output_destruct(output);
if (status)
return status;
brick->outputs[i] = NULL; // others may remain leftover
}
}
for (i = 0; i < brick->type->max_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (!input)
continue;
if (!input->type) {
MARS_ERR("input has no associated type!\n");
continue;
}
if (input->type->input_destruct) {
MARS_DBG("generic_brick_exit_full: calling input_destruct()\n");
status = input->type->input_destruct(input);
if (status)
return status;
brick->inputs[i] = NULL; // others may remain leftover
status = generic_disconnect(input);
if (status)
return status;
}
}
if (brick->type->brick_destruct) {
MARS_DBG("generic_brick_exit_full: calling brick_destruct()\n");
status = brick->type->brick_destruct(brick);
if (status)
return status;
}
return 0;
}
EXPORT_SYMBOL_GPL(generic_brick_exit_full);
int generic_brick_exit_recursively(struct generic_brick *brick)
{
int final_status = 0;
LIST_HEAD(head);
list_add(&brick->tmp_head, &head);
while (!list_empty(&head)) {
int i;
int status;
brick = container_of(head.next, struct generic_brick, tmp_head);
for (i = 0; i < brick->nr_outputs; i++) {
struct generic_output *output = brick->outputs[i];
if (output->nr_connected) {
list_del(&brick->tmp_head);
continue;
}
}
list_del(&brick->tmp_head);
for (i = 0; i < brick->nr_inputs; i++) {
struct generic_input *input = brick->inputs[i];
if (input->connect) {
struct generic_brick *other = input->connect->brick;
list_add(&other->tmp_head, &head);
}
}
status = generic_brick_exit_full(brick);
if (status)
final_status = status;
}
return final_status;
}
EXPORT_SYMBOL_GPL(generic_brick_exit_recursively);
/////////////////////////////////////////////////////////////////////
static int __init init_mars(void)

View File

@ -160,6 +160,25 @@ static void if_device_unplug(struct request_queue *q)
}
//////////////// object / aspect constructors / destructors ///////////////
static int if_device_mars_io_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
return 0;
}
static int if_device_mars_buf_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
return 0;
}
static int if_device_mars_buf_callback_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
{
return 0;
}
MARS_MAKE_STATICS(if_device);
//////////////////////// contructors / destructors ////////////////////////
static int if_device_brick_construct(struct if_device_brick *brick)
@ -254,18 +273,18 @@ static int if_device_input_destruct(struct if_device_input *input)
static struct if_device_brick_ops if_device_brick_ops = {
};
static struct if_device_input_type if_device_input_type = {
static const struct if_device_input_type if_device_input_type = {
.type_name = "if_device_input",
.input_size = sizeof(struct if_device_input),
.input_construct = &if_device_input_construct,
.input_destruct = &if_device_input_destruct,
};
static struct if_device_input_type *if_device_input_types[] = {
static const struct if_device_input_type *if_device_input_types[] = {
&if_device_input_type,
};
struct if_device_brick_type if_device_brick_type = {
const struct if_device_brick_type if_device_brick_type = {
.type_name = "if_device_brick",
.brick_size = sizeof(struct if_device_brick),
.max_inputs = 1,

View File

@ -5,6 +5,19 @@
#define HT_SHIFT 6 //????
#define MARS_MAX_SEGMENT_SIZE (1U << (9+HT_SHIFT))
struct if_device_mars_io_aspect {
GENERIC_ASPECT(mars_io);
};
struct if_device_mars_buf_aspect {
GENERIC_ASPECT(mars_buf);
};
struct if_device_mars_buf_callback_aspect {
GENERIC_ASPECT(mars_buf_callback);
};
struct if_device_brick {
MARS_BRICK(if_device);
};

View File

@ -1,5 +1,8 @@
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
//#define BRICK_DEBUGGING
//#define MARS_DEBUGGING
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
@ -13,18 +16,91 @@
#include "mars_if_device.h"
#include "mars_device_sio.h"
#include "mars_buf.h"
GENERIC_MAKE_CONNECT(if_device, device_sio);
GENERIC_MAKE_CONNECT(if_device, buf);
GENERIC_MAKE_CONNECT(buf, device_sio);
static struct if_device_brick *if_brick = NULL;
static struct buf_brick *buf_brick = NULL;
static struct device_sio_brick *device_brick = NULL;
static struct mars_buf_object_layout *_init_buf_object_layout(struct buf_output *output)
{
const int layout_size = 1024;
const int max_aspects = 16;
struct mars_buf_object_layout *res;
int status;
void *data = kzalloc(layout_size, GFP_KERNEL);
if (!data) {
MARS_ERR("emergency, cannot allocate object_layout!\n");
return NULL;
}
res = mars_buf_init_object_layout(data, layout_size, max_aspects, &mars_buf_type);
if (unlikely(!res)) {
MARS_ERR("emergency, cannot init object_layout!\n");
goto err_free;
}
status = output->ops->make_object_layout(output, (struct generic_object_layout*)res);
if (unlikely(status < 0)) {
MARS_ERR("emergency, cannot add aspects to object_layout!\n");
goto err_free;
}
MARS_INF("OK, buf_object_layout init succeeded.\n");
return res;
err_free:
kfree(res);
return NULL;
}
static struct mars_buf_callback_object_layout *_init_buf_callback_object_layout(struct buf_output *output)
{
const int layout_size = 1024;
const int max_aspects = 16;
struct mars_buf_callback_object_layout *res;
int status;
void *data = kzalloc(layout_size, GFP_KERNEL);
if (!data) {
MARS_ERR("emergency, cannot allocate object_layout!\n");
return NULL;
}
res = mars_buf_callback_init_object_layout(data, layout_size, max_aspects, &mars_buf_callback_type);
if (unlikely(!res)) {
MARS_ERR("emergency, cannot init object_layout!\n");
goto err_free;
}
status = output->ops->make_object_layout(output, (struct generic_object_layout*)res);
if (unlikely(status < 0)) {
MARS_ERR("emergency, cannot add aspects to object_layout!\n");
goto err_free;
}
MARS_INF("OK, buf_callback_object_layout init succeeded.\n");
return res;
err_free:
kfree(res);
return NULL;
}
static int test_endio(struct mars_buf_callback_object *mbuf_cb)
{
MARS_DBG("test_endio() called! error=%d\n", mbuf_cb->cb_error);
return 0;
}
void make_test_instance(void)
{
static char *names[] = { "brick" };
int size = 4096;
int buf_size = 4096 * 8;
int status;
void *mem = kzalloc(size, GFP_KERNEL);
void *mem;
mem = kzalloc(size, GFP_KERNEL);
if (!mem) {
MARS_ERR("cannot grab test memory\n");
return;
@ -34,8 +110,11 @@ void make_test_instance(void)
status = device_sio_brick_init_full(mem, size, &device_sio_brick_type, NULL, NULL, names);
MARS_DBG("done (status=%d)\n", status);
if (!status)
device_brick = mem;
if (status) {
MARS_ERR("cannot init brick device_sio\n");
return;
}
device_brick = mem;
mem = kzalloc(size, GFP_KERNEL);
if (!mem) {
@ -45,11 +124,86 @@ void make_test_instance(void)
status = if_device_brick_init_full(mem, size, &if_device_brick_type, NULL, NULL, names);
MARS_DBG("done (status=%d)\n", status);
if (!status)
if_brick = mem;
if (status) {
MARS_ERR("cannot init brick if_device\n");
return;
}
if_brick = mem;
#if 1
mem = kzalloc(buf_size, GFP_KERNEL);
if (!mem) {
MARS_ERR("cannot grab test memory\n");
return;
}
status = buf_brick_init_full(mem, buf_size, &buf_brick_type, NULL, NULL, names);
MARS_DBG("done (status=%d)\n", status);
if (status) {
MARS_ERR("cannot init brick buf\n");
return;
}
buf_brick = mem;
buf_brick->backing_order = 0;
buf_brick->backing_size = PAGE_SIZE << buf_brick->backing_order;
buf_brick->max_count = 512;
status = buf_device_sio_connect(buf_brick->inputs[0], device_brick->outputs[0]);
MARS_DBG("connect (status=%d)\n", status);
#if 1
status = if_device_buf_connect(if_brick->inputs[0], buf_brick->outputs[0]);
MARS_DBG("connect (status=%d)\n", status);
#endif
if (true) {
struct buf_output *output = buf_brick->outputs[0];
struct mars_buf_object_layout *buf_layout = _init_buf_object_layout(output);
struct mars_buf_callback_object_layout *buf_callback_layout = _init_buf_callback_object_layout(output);
struct mars_buf_object *mbuf = NULL;
if (!buf_layout) {
MARS_ERR("cannot init buf_layout\n");
return;
}
if (!buf_callback_layout) {
MARS_ERR("cannot init buf_callback_layout\n");
return;
}
status = GENERIC_OUTPUT_CALL(output, mars_buf_get, &mbuf, buf_layout, 0, PAGE_SIZE);
MARS_DBG("buf_get (status=%d)\n", status);
if (mbuf) {
if (true) {
void *data = kzalloc(buf_callback_layout->object_size, GFP_KERNEL);
struct mars_buf_callback_object *mbuf_cb = NULL;
if (!data) {
MARS_ERR("cannot alloc buf_callback\n");
return;
}
mbuf_cb = mars_buf_callback_construct(data, buf_callback_layout);
if (!mbuf_cb) {
MARS_ERR("cannot init buf_callback\n");
return;
}
mbuf_cb->cb_mbuf = mbuf;
mbuf_cb->cb_rw = READ;
mbuf_cb->cb_buf_endio = test_endio;
status = GENERIC_OUTPUT_CALL(output, mars_buf_io, mbuf_cb);
MARS_DBG("buf_io (status=%d)\n", status);
}
status = GENERIC_OUTPUT_CALL(output, mars_buf_put, mbuf);
MARS_DBG("buf_put (status=%d)\n", status);
}
}
#else
status = if_device_device_sio_connect(if_brick->inputs[0], device_brick->outputs[0]);
MARS_DBG("connect (status=%d)\n", status);
#endif
}
@ -61,6 +215,12 @@ void destroy_test_instance(void)
kfree(if_brick);
if_brick = NULL;
}
if (buf_brick) {
buf_device_sio_disconnect(buf_brick->inputs[0]);
buf_brick_exit_full(buf_brick);
kfree(buf_brick);
buf_brick = NULL;
}
if (device_brick) {
device_sio_brick_exit_full(device_brick);
kfree(device_brick);

View File

@ -1,234 +0,0 @@
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
// Trans_Logger brick (just for demonstration)
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bio.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include "mars.h"
///////////////////////// own type definitions ////////////////////////
#include "mars_trans_logger.h"
////////////////// own brick / input / output helpers //////////////////
static int trans_logger_mars_io(struct trans_logger_output *output, struct mars_io_object *mio)
{
return -EINVAL;
}
static int trans_logger_thread(void *data)
{
struct trans_logger_output *output = data;
MARS_INF("kthread has started.\n");
//set_user_nice(current, -20);
while (!kthread_should_stop()) {
struct list_head *tmp;
struct trans_logger_mars_io_aspect *aspect;
struct mars_io_object *mio;
wait_event_interruptible(output->event,
!list_empty(&output->mio_list) ||
kthread_should_stop());
if (list_empty(&output->mio_list))
continue;
spin_lock_irq(&output->lock);
tmp = output->mio_list.next;
list_del_init(tmp);
spin_unlock_irq(&output->lock);
aspect = container_of(tmp, struct trans_logger_mars_io_aspect, io_head);
mio = aspect->object;
MARS_DBG("got %p %p\n", aspect, mio);
trans_logger_mars_io(output, mio);
}
MARS_INF("kthread has stopped.\n");
return 0;
}
static int trans_logger_io_write(struct trans_logger_output *output, struct trans_logger_output *other, struct mars_io_object *mio)
{
struct trans_logger_mars_io_aspect *aspect;
aspect = mars_io_get_aspect(mio, output->aspect_slot);
MARS_DBG("queueing %p\n", mio);
spin_lock_irq(&output->lock);
list_add_tail(&aspect->io_head, &output->mio_list);
spin_unlock_irq(&output->lock);
wake_up(&output->event);
return 0;
}
static int trans_logger_io_read(struct trans_logger_output *output, struct trans_logger_output *other, struct mars_io_object *mio)
{
//TODO: ask writeback cache first
return other->ops->mars_io(other, mio);
}
////////////////// own brick / input / output operations //////////////////
static int trans_logger_io(struct trans_logger_output *output, struct mars_io_object *mio)
{
struct trans_logger_input *input = output->brick->inputs[0];
struct trans_logger_output *other;
int direction;
if (unlikely(!input))
return -ENOSYS;
other = input->connect;
if (unlikely(!other || !other->ops || !other->ops->mars_io))
return -ENOSYS;
if (unlikely(!mio->orig_bio))
return -EINVAL;
direction = mio->orig_bio->bi_rw & 1;
if (direction == READ) {
return trans_logger_io_read(output, other, mio);
}
return trans_logger_io_write(output, other, mio);
}
static int trans_logger_get_info(struct trans_logger_output *output, struct mars_info *info)
{
struct trans_logger_input *input = output->brick->inputs[0];
return GENERIC_INPUT_CALL(input, mars_get_info, info);
}
//////////////// object / aspect constructors / destructors ///////////////
static int trans_logger_aspect_init_fn(struct mars_io_aspect *_ini, void *_init_data)
{
struct trans_logger_mars_io_aspect *ini = (void*)_ini;
INIT_LIST_HEAD(&ini->io_head);
return 0;
}
static int trans_logger_make_object_layout(struct trans_logger_output *output, struct generic_object_layout *object_layout)
{
const struct generic_object_type *object_type = object_layout->type;
int res;
struct trans_logger_brick *brick = output->brick;
int i;
if (object_type != &mars_io_type)
return 0;
res = mars_io_add_aspect(object_layout, sizeof(struct trans_logger_mars_io_aspect), trans_logger_aspect_init_fn, output);
if (res < 0)
return res;
output->aspect_slot = res;
for (i = 0; i < brick->type->max_inputs; i++) {
struct trans_logger_input *input = brick->inputs[i];
if (input && input->connect) {
int subres = input->connect->ops->make_object_layout(input->connect, object_layout);
if (subres < 0)
return subres;
res += subres;
}
}
return res + sizeof(struct trans_logger_mars_io_aspect);
}
////////////////////// brick constructors / destructors ////////////////////
static int trans_logger_brick_construct(struct trans_logger_brick *brick)
{
return 0;
}
static int trans_logger_output_construct(struct trans_logger_output *output)
{
spin_lock_init(&output->lock);
INIT_LIST_HEAD(&output->mio_list);
init_waitqueue_head(&output->event);
output->thread = kthread_create(trans_logger_thread, output, "mars_logger%d", 0);
if (IS_ERR(output->thread)) {
int error = PTR_ERR(output->thread);
MARS_ERR("cannot create thread, status=%d\n", error);
return error;
}
wake_up_process(output->thread);
return 0;
}
///////////////////////// static structs ////////////////////////
static struct trans_logger_brick_ops trans_logger_brick_ops = {
};
static struct trans_logger_output_ops trans_logger_output_ops = {
.make_object_layout = trans_logger_make_object_layout,
.mars_io = trans_logger_io,
.mars_get_info = trans_logger_get_info,
};
static struct trans_logger_input_type trans_logger_input_type = {
.type_name = "data",
.input_size = sizeof(struct trans_logger_input),
};
static struct trans_logger_input_type trans_logger_input_log_type = {
.type_name = "log",
.input_size = sizeof(struct trans_logger_input),
};
static struct trans_logger_input_type *trans_logger_input_types[] = {
&trans_logger_input_type,
&trans_logger_input_log_type,
};
static struct trans_logger_output_type trans_logger_output_type = {
.type_name = "trans_logger_output",
.output_size = sizeof(struct trans_logger_output),
.master_ops = &trans_logger_output_ops,
.output_construct = &trans_logger_output_construct,
};
static struct trans_logger_output_type *trans_logger_output_types[] = {
&trans_logger_output_type,
};
struct trans_logger_brick_type trans_logger_brick_type = {
.type_name = "trans_logger_brick",
.brick_size = sizeof(struct trans_logger_brick),
.max_inputs = 2,
.max_outputs = 1,
.master_ops = &trans_logger_brick_ops,
.default_input_types = trans_logger_input_types,
.default_output_types = trans_logger_output_types,
.brick_construct = &trans_logger_brick_construct,
};
EXPORT_SYMBOL_GPL(trans_logger_brick_type);
////////////////// module init stuff /////////////////////////
static int __init init_trans_logger(void)
{
printk(MARS_INFO "init_trans_logger()\n");
return trans_logger_register_brick_type();
}
static void __exit exit_trans_logger(void)
{
printk(MARS_INFO "exit_trans_logger()\n");
trans_logger_unregister_brick_type();
}
MODULE_DESCRIPTION("MARS trans_logger brick");
MODULE_AUTHOR("Thomas Schoebel-Theuer <tst@1und1.de>");
MODULE_LICENSE("GPL");
module_init(init_trans_logger);
module_exit(exit_trans_logger);

View File

@ -1,29 +0,0 @@
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
#ifndef MARS_TRANS_LOGGER_H
#define MARS_TRANS_LOGGER_H
struct trans_logger_mars_io_aspect {
GENERIC_ASPECT(mars_io);
struct list_head io_head;
};
struct trans_logger_brick {
MARS_BRICK(trans_logger);
};
struct trans_logger_input {
MARS_INPUT(trans_logger);
};
struct trans_logger_output {
MARS_OUTPUT(trans_logger);
int aspect_slot;
struct list_head mio_list;
struct task_struct *thread;
wait_queue_head_t event;
spinlock_t lock;
};
MARS_TYPES(trans_logger);
#endif