import mars-83.tgz

This commit is contained in:
Thomas Schoebel-Theuer 2011-03-24 17:05:46 +01:00
parent 9b1410c0ef
commit c430b3701f
11 changed files with 381 additions and 132 deletions

255
brick.c
View File

@ -11,10 +11,56 @@
#define _STRATEGY #define _STRATEGY
#define BRICK_OBJ_MAX /*empty => leads to an open array */ #define BRICK_OBJ_MAX /*empty => leads to an open array */
#define GFP_MARS GFP_ATOMIC
#include "brick.h" #include "brick.h"
#define GFP_BRICK GFP_NOIO
//////////////////////////////////////////////////////////////
// number management
static char *nr_table = NULL;
static int nr_max = 256;
int get_nr(void)
{
char *new;
int nr;
if (unlikely(!nr_table)) {
nr_table = kzalloc(nr_max, GFP_BRICK);
if (!nr_table) {
return 0;
}
}
for (;;) {
for (nr = 1; nr < nr_max; nr++) {
if (!nr_table[nr]) {
nr_table[nr] = 1;
return nr;
}
}
new = kzalloc(nr_max << 1, GFP_BRICK);
if (!new)
return 0;
memcpy(new, nr_table, nr_max);
kfree(nr_table);
nr_table = new;
nr_max <<= 1;
}
}
EXPORT_SYMBOL_GPL(get_nr);
void put_nr(int nr)
{
if (likely(nr_table && nr > 0 && nr < nr_max)) {
nr_table[nr] = 0;
}
}
EXPORT_SYMBOL_GPL(put_nr);
////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////
// object stuff // object stuff
@ -23,6 +69,9 @@
// brick stuff // brick stuff
int brick_obj_max = 0;
EXPORT_SYMBOL_GPL(brick_obj_max);
static int nr_brick_types = 0; static int nr_brick_types = 0;
static const struct generic_brick_type *brick_types[MAX_BRICK_TYPES] = {}; static const struct generic_brick_type *brick_types[MAX_BRICK_TYPES] = {};
@ -226,6 +275,7 @@ int generic_brick_exit_full(struct generic_brick *brick)
status = output->type->output_destruct(output); status = output->type->output_destruct(output);
if (status) if (status)
return status; return status;
_generic_output_exit(output);
brick->outputs[i] = NULL; // others may remain leftover brick->outputs[i] = NULL; // others may remain leftover
} }
} }
@ -246,6 +296,7 @@ int generic_brick_exit_full(struct generic_brick *brick)
status = generic_disconnect(input); status = generic_disconnect(input);
if (status) if (status)
return status; return status;
generic_input_exit(input);
} }
} }
if (brick->type->brick_destruct) { if (brick->type->brick_destruct) {
@ -254,6 +305,7 @@ int generic_brick_exit_full(struct generic_brick *brick)
if (status) if (status)
return status; return status;
} }
generic_brick_exit(brick);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(generic_brick_exit_full); EXPORT_SYMBOL_GPL(generic_brick_exit_full);
@ -304,22 +356,28 @@ int generic_add_aspect(struct generic_output *output, struct generic_object_layo
struct generic_aspect_layout *aspect_layout; struct generic_aspect_layout *aspect_layout;
int nr; int nr;
int i; int i;
int status;
(void)i; (void)i;
if (unlikely(!object_layout->object_type)) { status = -EINVAL;
return -EINVAL; if (unlikely(!object_layout || !object_layout->object_type)) {
goto err;
} }
if (NEW_ASPECTS) {
#if 0 nr = output->output_index;
nr = object_layout->object_type->brick_obj_nr; if (nr <= 0 || nr > nr_max) {
if (nr < 0 || nr >= BRICK_OBJ_NR) { BRICK_ERR("oops, bad nr = %d\n", nr);
return -EINVAL; goto err;
}
aspect_layout = &object_layout->aspect_layouts[nr];
} else {
nr = object_layout->object_type->brick_obj_nr;
if (nr < 0 || nr >= brick_obj_max) {
goto done;
}
aspect_layout = (void*)&output->output_aspect_layouts[nr];
} }
#else
nr = 0;
#endif
aspect_layout = (void*)&output->output_aspect_layouts[nr];
if (aspect_layout->aspect_type && aspect_layout->aspect_layout_generation == object_layout->object_layout_generation) { if (aspect_layout->aspect_type && aspect_layout->aspect_layout_generation == object_layout->object_layout_generation) {
/* aspect_layout is already initialized. /* aspect_layout is already initialized.
* this is a kind of "dynamic programming". * this is a kind of "dynamic programming".
@ -327,18 +385,20 @@ int generic_add_aspect(struct generic_output *output, struct generic_object_layo
*/ */
int min_offset; int min_offset;
BRICK_DBG("reusing aspect_type %s on object_layout %p\n", aspect_type->aspect_type_name, object_layout); BRICK_DBG("reusing aspect_type %s on object_layout %p\n", aspect_type->aspect_type_name, object_layout);
status = -EBADF;
if (unlikely(aspect_layout->aspect_type != aspect_type)) { if (unlikely(aspect_layout->aspect_type != aspect_type)) {
BRICK_ERR("inconsistent use of aspect_type %s != %s\n", aspect_type->aspect_type_name, aspect_layout->aspect_type->aspect_type_name); BRICK_ERR("inconsistent use of aspect_type %s != %s\n", aspect_type->aspect_type_name, aspect_layout->aspect_type->aspect_type_name);
return -EBADF; goto done;
} }
if (unlikely(aspect_layout->init_data != output)) { if (unlikely(aspect_layout->init_data != output)) {
BRICK_ERR("inconsistent output assigment (aspect_type=%s)\n", aspect_type->aspect_type_name); BRICK_ERR("inconsistent output assigment (aspect_type=%s)\n", aspect_type->aspect_type_name);
return -EBADF; goto done;
} }
min_offset = aspect_layout->aspect_offset + aspect_type->aspect_size; min_offset = aspect_layout->aspect_offset + aspect_type->aspect_size;
status = -ENOMEM;
if (unlikely(object_layout->object_size > min_offset)) { if (unlikely(object_layout->object_size > min_offset)) {
BRICK_ERR("overlapping aspects %d > %d (aspect_type=%s)\n", object_layout->object_size, min_offset, aspect_type->aspect_type_name); BRICK_ERR("overlapping aspects %d > %d (aspect_type=%s)\n", object_layout->object_size, min_offset, aspect_type->aspect_type_name);
return -ENOMEM; goto done;
} }
BRICK_DBG("adjusting object_size %d to %d (aspect_type=%s)\n", object_layout->object_size, min_offset, aspect_type->aspect_type_name); BRICK_DBG("adjusting object_size %d to %d (aspect_type=%s)\n", object_layout->object_size, min_offset, aspect_type->aspect_type_name);
object_layout->object_size = min_offset; object_layout->object_size = min_offset;
@ -351,25 +411,31 @@ int generic_add_aspect(struct generic_output *output, struct generic_object_layo
aspect_layout->aspect_layout_generation = object_layout->object_layout_generation; aspect_layout->aspect_layout_generation = object_layout->object_layout_generation;
BRICK_DBG("initializing aspect_type %s on object_layout %p, object_size=%d\n", aspect_type->aspect_type_name, object_layout, object_layout->object_size); BRICK_DBG("initializing aspect_type %s on object_layout %p, object_size=%d\n", aspect_type->aspect_type_name, object_layout, object_layout->object_size);
} }
// find an empty slot if (NEW_ASPECTS) {
nr = -1; if (object_layout->aspect_count <= nr) {
#if 0 object_layout->aspect_count = nr + 1;
for (i = 0; i < object_layout->aspect_count; i++) {
if (!object_layout->aspect_layouts_table[nr]) {
nr = i;
break;
} }
} } else {
#endif // find an empty slot
if (nr < 0) { nr = -1;
nr = object_layout->aspect_count++; if (nr < 0) {
if (nr >= object_layout->aspect_max) { nr = object_layout->aspect_count++;
BRICK_ERR("aspect overflow\n"); status = -ENOMEM;
return -ENOMEM; if (unlikely(nr >= object_layout->aspect_max)) {
BRICK_ERR("aspect overflow\n");
goto done;
}
} }
object_layout->aspect_layouts_table[nr] = aspect_layout;
} }
object_layout->aspect_layouts_table[nr] = aspect_layout; status = 0;
return 0;
done:
if (status < 0) { // invalidate the layout
object_layout->object_type = NULL;
}
err:
return status;
} }
@ -389,7 +455,9 @@ int default_init_object_layout(struct generic_output *output, struct generic_obj
// TODO: make locking granularity finer (if it were worth). // TODO: make locking granularity finer (if it were worth).
static DEFINE_SPINLOCK(global_lock); static DEFINE_SPINLOCK(global_lock);
void *data; void *data;
void *data2;
void *olddata; void *olddata;
void *olddata2;
int status= -ENOMEM; int status= -ENOMEM;
unsigned long flags; unsigned long flags;
@ -397,8 +465,13 @@ int default_init_object_layout(struct generic_output *output, struct generic_obj
module_name = "(unknown)"; module_name = "(unknown)";
} }
data = kzalloc(aspect_max * sizeof(void*), GFP_MARS); if (NEW_ASPECTS) {
if (unlikely(!data)) { aspect_max = nr_max;
}
data = kzalloc(aspect_max * sizeof(struct generic_aspect_layout), GFP_BRICK);
data2 = kzalloc(aspect_max * sizeof(void*), GFP_BRICK);
if (unlikely(!data || !data2)) {
BRICK_ERR("kzalloc failed, size = %lu\n", aspect_max * sizeof(void*)); BRICK_ERR("kzalloc failed, size = %lu\n", aspect_max * sizeof(void*));
goto done; goto done;
} }
@ -412,9 +485,11 @@ int default_init_object_layout(struct generic_output *output, struct generic_obj
goto done; goto done;
} }
olddata = object_layout->aspect_layouts_table; olddata = object_layout->aspect_layouts;
olddata2 = object_layout->aspect_layouts_table;
object_layout->aspect_layouts_table = data; object_layout->aspect_layouts_table = data2;
object_layout->aspect_layouts = data;
object_layout->object_layout_generation = brick_layout_generation; object_layout->object_layout_generation = brick_layout_generation;
object_layout->object_type = object_type; object_layout->object_type = object_type;
object_layout->init_data = output; object_layout->init_data = output;
@ -429,14 +504,18 @@ int default_init_object_layout(struct generic_output *output, struct generic_obj
status = output->ops->make_object_layout(output, object_layout); status = output->ops->make_object_layout(output, object_layout);
traced_unlock(&global_lock, flags);
if (unlikely(status < 0)) { if (unlikely(status < 0)) {
object_layout->object_type = NULL; object_layout->object_type = NULL;
}
traced_unlock(&global_lock, flags);
if (unlikely(status < 0)) {
kfree(data); kfree(data);
BRICK_ERR("emergency, cannot add aspects to object_layout %s (module %s)\n", object_type->object_type_name, module_name); BRICK_ERR("emergency, cannot add aspects to object_layout %s (module %s)\n", object_type->object_type_name, module_name);
goto done; goto done;
} }
BRICK_INF("OK, object_layout %s init succeeded (size = %d).\n", object_type->object_type_name, object_layout->object_size); BRICK_INF("OK, object_layout %s init succeeded (size = %d).\n", object_type->object_type_name, object_layout->object_size);
@ -444,6 +523,11 @@ done:
if (olddata) { if (olddata) {
#if 0 // FIXME: use RCU here #if 0 // FIXME: use RCU here
kfree(olddata); kfree(olddata);
#endif
}
if (olddata2) {
#if 0 // FIXME: use RCU here
kfree(olddata2);
#endif #endif
} }
return status; return status;
@ -455,19 +539,49 @@ EXPORT_SYMBOL_GPL(default_init_object_layout);
*/ */
int default_make_object_layout(struct generic_output *output, struct generic_object_layout *object_layout) int default_make_object_layout(struct generic_output *output, struct generic_object_layout *object_layout)
{ {
struct generic_brick *brick = output->brick; struct generic_brick *brick;
const struct generic_output_type *output_type = output->type; const struct generic_output_type *output_type;
const struct generic_object_type *object_type = object_layout->object_type; const struct generic_object_type *object_type;
const int nr = object_type->brick_obj_nr; const struct generic_aspect_type *aspect_type;
const struct generic_aspect_type *aspect_type = output_type->aspect_types[nr]; int nr;
int layout_code = output_type->layout_code[nr]; int layout_code;
int aspect_size = 0;
int status = -EINVAL;
int status; if (unlikely(!output)) {
int aspect_size; BRICK_ERR("output is missing\n");
goto done;
if (!aspect_type) { }
if (unlikely(!object_layout || !object_layout->object_type)) {
BRICK_ERR("object_layout not inizialized\n");
goto done;
}
brick = output->brick;
if (unlikely(!brick)) {
BRICK_ERR("brick is missing\n");
goto done;
}
output_type = output->type;
if (unlikely(!output_type)) {
BRICK_ERR("output_type is missing\n");
goto done;
}
object_type = object_layout->object_type;
if (unlikely(!object_type)) {
BRICK_ERR("object_type is missing\n");
goto done;
}
nr = object_type->brick_obj_nr;
if (unlikely(nr < 0 || nr >= brick_obj_max)) {
BRICK_ERR("bad brick_obj_nr = %d\n", nr);
goto done;
}
layout_code = output_type->layout_code[nr];
aspect_type = output_type->aspect_types[nr];
status = -ENOENT;
if (unlikely(!aspect_type)) {
BRICK_ERR("aspect type on %s does not exist\n", output_type->type_name); BRICK_ERR("aspect type on %s does not exist\n", output_type->type_name);
return -ENOENT; goto done;
} }
aspect_size = aspect_type->aspect_size; aspect_size = aspect_type->aspect_size;
@ -505,6 +619,7 @@ int default_make_object_layout(struct generic_output *output, struct generic_obj
status = generic_add_aspect(output, object_layout, aspect_type); status = generic_add_aspect(output, object_layout, aspect_type);
done:
if (status < 0) if (status < 0)
return status; return status;
@ -515,9 +630,15 @@ EXPORT_SYMBOL_GPL(default_make_object_layout);
struct generic_object *alloc_generic(struct generic_object_layout *object_layout) struct generic_object *alloc_generic(struct generic_object_layout *object_layout)
{ {
struct generic_object *object;
void *data; void *data;
struct generic_object *object = object_layout->free_list;
if (unlikely(!object_layout || !object_layout->object_type)) {
BRICK_ERR("bad object_layout\n");
goto err;
}
object = object_layout->free_list;
if (object) { if (object) {
unsigned long flags; unsigned long flags;
traced_lock(&object_layout->free_lock, flags); traced_lock(&object_layout->free_lock, flags);
@ -533,7 +654,7 @@ struct generic_object *alloc_generic(struct generic_object_layout *object_layout
traced_unlock(&object_layout->free_lock, flags); traced_unlock(&object_layout->free_lock, flags);
} }
data = kzalloc(object_layout->object_size, GFP_MARS); data = kzalloc(object_layout->object_size, GFP_BRICK);
if (unlikely(!data)) if (unlikely(!data))
goto err; goto err;
@ -729,7 +850,7 @@ int set_recursive_button(struct generic_brick *orig_brick, brick_switch_t mode,
if (table) if (table)
kfree(table); kfree(table);
max <<= 1; max <<= 1;
table = kmalloc(max * sizeof(void*), GFP_MARS); table = kmalloc(max * sizeof(void*), GFP_BRICK);
status = -ENOMEM; status = -ENOMEM;
if (unlikely(!table)) if (unlikely(!table))
goto done; goto done;
@ -756,7 +877,7 @@ int set_recursive_button(struct generic_brick *orig_brick, brick_switch_t mode,
struct generic_output *output; struct generic_output *output;
struct generic_brick *next; struct generic_brick *next;
BRICK_DBG("---> i = %d\n", i); BRICK_DBG("---> i = %d\n", i);
msleep(1000); //msleep(1000);
if (!input) if (!input)
continue; continue;
output = input->connect; output = input->connect;
@ -775,14 +896,14 @@ int set_recursive_button(struct generic_brick *orig_brick, brick_switch_t mode,
struct generic_output *output = brick->outputs[i]; struct generic_output *output = brick->outputs[i];
struct list_head *tmp; struct list_head *tmp;
BRICK_DBG("---> i = %d output = %p\n", i, output); BRICK_DBG("---> i = %d output = %p\n", i, output);
msleep(1000); //msleep(1000);
if (!output) if (!output)
continue; continue;
for (tmp = output->output_head.next; tmp && tmp != &output->output_head; tmp = tmp->next) { for (tmp = output->output_head.next; tmp && tmp != &output->output_head; tmp = tmp->next) {
struct generic_input *input = container_of(tmp, struct generic_input, input_head); struct generic_input *input = container_of(tmp, struct generic_input, input_head);
struct generic_brick *next = input->brick; struct generic_brick *next = input->brick;
BRICK_DBG("----> tmp = %p input = %p next = %p\n", tmp, input, next); BRICK_DBG("----> tmp = %p input = %p next = %p\n", tmp, input, next);
msleep(1000); //msleep(1000);
if (unlikely(!next)) { if (unlikely(!next)) {
BRICK_ERR("oops, bad brick pointer\n"); BRICK_ERR("oops, bad brick pointer\n");
status = -EINVAL; status = -EINVAL;
@ -867,4 +988,30 @@ void free_meta(void *data, const struct meta *meta)
EXPORT_SYMBOL_GPL(free_meta); EXPORT_SYMBOL_GPL(free_meta);
/////////////////////////////////////////////////////////////////////////
// module init stuff
static int __init init_brick(void)
{
nr_table = kzalloc(nr_max, GFP_BRICK);
if (!nr_table) {
return -ENOMEM;
}
return 0;
}
static void __exit exit_brick(void)
{
if (nr_table) {
kfree(nr_table);
}
}
MODULE_DESCRIPTION("generic brick infrastructure");
MODULE_AUTHOR("Thomas Schoebel-Theuer <tst@1und1.de>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(init_brick);
module_exit(exit_brick);

93
brick.h
View File

@ -15,42 +15,51 @@
#define _NORMAL_CODE(X) X #define _NORMAL_CODE(X) X
#endif #endif
#define BRICK_FATAL "BRICK_FATAL " #define BRICK_FATAL "BRICK_FATAL "
#define BRICK_ERROR "BRICK_ERROR " #define BRICK_ERROR "BRICK_ERROR "
#define BRICK_INFO "BRICK_INFO " #define BRICK_WARNING "BRICK_WARN "
#define BRICK_DEBUG "BRICK_DEBUG " #define BRICK_INFO "BRICK_INFO "
#define BRICK_DEBUG "BRICK_DEBUG "
#define _BRICK_FMT(fmt) __BASE_FILE__ " %d %s(): " fmt, __LINE__, __FUNCTION__ #define _BRICK_FMT(fmt) __BASE_FILE__ " %d %s(): " fmt, __LINE__, __FUNCTION__
#define _BRICK_MSG(PREFIX, fmt, args...) do { printk(PREFIX _BRICK_FMT(fmt), ##args); } while (0) #define _BRICK_MSG(PREFIX, fmt, args...) do { printk(PREFIX _BRICK_FMT(fmt), ##args); } while (0)
#define BRICK_FAT(fmt, args...) _BRICK_MSG(BRICK_FATAL, fmt, ##args) #define BRICK_FAT(fmt, args...) _BRICK_MSG(BRICK_FATAL, fmt, ##args)
#define BRICK_ERR(fmt, args...) _BRICK_MSG(BRICK_ERROR, fmt, ##args) #define BRICK_ERR(fmt, args...) _BRICK_MSG(BRICK_ERROR, fmt, ##args)
#define BRICK_INF(fmt, args...) _BRICK_MSG(BRICK_INFO, fmt, ##args) #define BRICK_WRN(fmt, args...) _BRICK_MSG(BRICK_WARNING, fmt, ##args)
#define BRICK_INF(fmt, args...) _BRICK_MSG(BRICK_INFO, fmt, ##args)
#ifdef BRICK_DEBUGGING #ifdef BRICK_DEBUGGING
#define BRICK_DBG(fmt, args...) _BRICK_MSG(BRICK_DEBUG, fmt, ##args) #define BRICK_DBG(fmt, args...) _BRICK_MSG(BRICK_DEBUG, fmt, ##args)
#else #else
#define BRICK_DBG(args...) /**/ #define BRICK_DBG(args...) /**/
#endif #endif
#ifdef IO_DEBUGGING #ifdef IO_DEBUGGING
#define BRICK_IO(fmt, args...) _BRICK_MSG(BRICK_DEBUG, fmt, ##args) #define BRICK_IO(fmt, args...) _BRICK_MSG(BRICK_DEBUG, fmt, ##args)
#else #else
#define BRICK_IO(args...) /*empty*/ #define BRICK_IO(args...) /*empty*/
#endif #endif
#define MAX_BRICK_TYPES 64 #define MAX_BRICK_TYPES 64
#define NEW_ASPECTS 1
extern int brick_layout_generation; extern int brick_layout_generation;
extern int brick_obj_max;
/////////////////////////////////////////////////////////////////////////
// number management helpers
extern int get_nr(void);
extern void put_nr(int nr);
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// definitions for generic objects with aspects // definitions for generic objects with aspects
#define MAX_DEFAULT_ASPECTS 8
struct generic_aspect; struct generic_aspect;
#define GENERIC_ASPECT_TYPE(TYPE) \ #define GENERIC_ASPECT_TYPE(TYPE) \
@ -87,6 +96,7 @@ struct generic_object_type {
#define GENERIC_OBJECT_LAYOUT(TYPE) \ #define GENERIC_OBJECT_LAYOUT(TYPE) \
struct generic_aspect_layout **aspect_layouts_table; \ struct generic_aspect_layout **aspect_layouts_table; \
struct generic_aspect_layout *aspect_layouts; \
const struct generic_object_type *object_type; \ const struct generic_object_type *object_type; \
void *init_data; \ void *init_data; \
int aspect_count; \ int aspect_count; \
@ -183,6 +193,7 @@ struct generic_input {
struct BRICK##_output_ops *ops; \ struct BRICK##_output_ops *ops; \
struct list_head output_head; \ struct list_head output_head; \
int nr_connected; \ int nr_connected; \
int output_index; /* globally unique */ \
/* _must_ be the last member (may expand to open array) */ \ /* _must_ be the last member (may expand to open array) */ \
struct generic_aspect_layout output_aspect_layouts[BRICK_OBJ_MAX]; \ struct generic_aspect_layout output_aspect_layouts[BRICK_OBJ_MAX]; \
@ -280,9 +291,21 @@ inline void _generic_output_init(struct generic_brick *brick, const struct gener
output->type = type; output->type = type;
output->ops = type->master_ops; output->ops = type->master_ops;
output->nr_connected = 0; output->nr_connected = 0;
output->output_index = get_nr();
INIT_LIST_HEAD(&output->output_head); INIT_LIST_HEAD(&output->output_head);
} }
inline void _generic_output_exit(struct generic_output *output)
{
list_del_init(&output->output_head);
output->output_name = NULL;
output->brick = NULL;
output->type = NULL;
output->ops = NULL;
output->nr_connected = 0;
put_nr(output->output_index);
}
#ifdef _STRATEGY // call this only in strategy bricks, never in ordinary bricks #ifdef _STRATEGY // call this only in strategy bricks, never in ordinary bricks
// you need this only if you circumvent generic_brick_init_full() // you need this only if you circumvent generic_brick_init_full()
@ -294,13 +317,21 @@ inline int generic_brick_init(const struct generic_brick_type *type, struct gene
brick->nr_inputs = 0; brick->nr_inputs = 0;
brick->nr_outputs = 0; brick->nr_outputs = 0;
brick->power.led_off = true; brick->power.led_off = true;
//brick->power.event = __WAIT_QUEUE_HEAD_INITIALIZER(brick->power.event);
init_waitqueue_head(&brick->power.event); init_waitqueue_head(&brick->power.event);
//INIT_LIST_HEAD(&brick->tmp_head); INIT_LIST_HEAD(&brick->tmp_head);
brick->tmp_head.next = brick->tmp_head.prev = &brick->tmp_head;
return 0; return 0;
} }
inline void generic_brick_exit(struct generic_brick *brick)
{
list_del_init(&brick->tmp_head);
brick->brick_name = NULL;
brick->type = NULL;
brick->ops = NULL;
brick->nr_inputs = 0;
brick->nr_outputs = 0;
}
inline int generic_input_init(struct generic_brick *brick, int index, const struct generic_input_type *type, struct generic_input *input, const char *input_name) inline int generic_input_init(struct generic_brick *brick, int index, const struct generic_input_type *type, struct generic_input *input, const char *input_name)
{ {
if (index < 0 || index >= brick->type->max_inputs) if (index < 0 || index >= brick->type->max_inputs)
@ -317,6 +348,15 @@ inline int generic_input_init(struct generic_brick *brick, int index, const stru
return 0; return 0;
} }
inline void generic_input_exit(struct generic_input *input)
{
list_del_init(&input->input_head);
input->input_name = NULL;
input->brick = NULL;
input->type = NULL;
input->connect = NULL;
}
inline int generic_output_init(struct generic_brick *brick, int index, const struct generic_output_type *type, struct generic_output *output, const char *output_name) inline int generic_output_init(struct generic_brick *brick, int index, const struct generic_output_type *type, struct generic_output *output, const char *output_name)
{ {
if (index < 0 || index >= brick->type->max_outputs) if (index < 0 || index >= brick->type->max_outputs)
@ -507,7 +547,7 @@ extern void free_generic(struct generic_object *object);
\ \
inline int BRICK##_init_object_layout(struct BRICK##_output *output, struct generic_object_layout *object_layout, int aspect_max, const struct generic_object_type *object_type) \ inline int BRICK##_init_object_layout(struct BRICK##_output *output, struct generic_object_layout *object_layout, int aspect_max, const struct generic_object_type *object_type) \
{ \ { \
if (likely(object_layout->aspect_layouts_table && object_layout->object_layout_generation == brick_layout_generation)) \ if (likely(object_layout->aspect_layouts_table && object_layout->aspect_layouts && object_layout->object_layout_generation == brick_layout_generation)) \
return 0; \ return 0; \
return default_init_object_layout((struct generic_output*)output, object_layout, aspect_max, object_type, #BRICK); \ return default_init_object_layout((struct generic_output*)output, object_layout, aspect_max, object_type, #BRICK); \
} \ } \
@ -538,7 +578,10 @@ inline struct TYPE##_object *TYPE##_construct(void *data, struct TYPE##_object_l
for (i = 0; i < object_layout->aspect_count; i++) { \ for (i = 0; i < object_layout->aspect_count; i++) { \
struct generic_aspect_layout *aspect_layout; \ struct generic_aspect_layout *aspect_layout; \
struct generic_aspect *aspect; \ struct generic_aspect *aspect; \
aspect_layout = object_layout->aspect_layouts_table[i]; \ if (NEW_ASPECTS) \
aspect_layout = &object_layout->aspect_layouts[i]; \
else \
aspect_layout = object_layout->aspect_layouts_table[i]; \
if (!aspect_layout->aspect_type) \ if (!aspect_layout->aspect_type) \
continue; \ continue; \
aspect = data + aspect_layout->aspect_offset; \ aspect = data + aspect_layout->aspect_offset; \
@ -569,7 +612,10 @@ inline void TYPE##_destruct(struct TYPE##_object *obj) \
for (i = 0; i < object_layout->aspect_count; i++) { \ for (i = 0; i < object_layout->aspect_count; i++) { \
struct generic_aspect_layout *aspect_layout; \ struct generic_aspect_layout *aspect_layout; \
struct generic_aspect *aspect; \ struct generic_aspect *aspect; \
aspect_layout = object_layout->aspect_layouts_table[i]; \ if (NEW_ASPECTS) \
aspect_layout = &object_layout->aspect_layouts[i]; \
else \
aspect_layout = object_layout->aspect_layouts_table[i]; \
if (!aspect_layout->aspect_type) \ if (!aspect_layout->aspect_type) \
continue; \ continue; \
aspect = ((void*)obj) + aspect_layout->aspect_offset; \ aspect = ((void*)obj) + aspect_layout->aspect_offset; \
@ -588,8 +634,13 @@ inline struct BRICK##_##TYPE##_aspect *BRICK##_##TYPE##_get_aspect(struct BRICK#
int nr; \ int nr; \
\ \
object_layout = (struct generic_object_layout*)obj->object_layout; \ object_layout = (struct generic_object_layout*)obj->object_layout; \
nr = object_layout->object_type->brick_obj_nr; \ if (NEW_ASPECTS) { \
aspect_layout = &output->output_aspect_layouts[nr]; \ nr = output->output_index; \
aspect_layout = &object_layout->aspect_layouts[nr]; \
} else { \
nr = object_layout->object_type->brick_obj_nr; \
aspect_layout = &output->output_aspect_layouts[nr]; \
} \
if (unlikely(!aspect_layout->aspect_type)) { \ if (unlikely(!aspect_layout->aspect_type)) { \
BRICK_ERR("brick "#BRICK": bad aspect slot on " #TYPE " pointer %p\n", obj); \ BRICK_ERR("brick "#BRICK": bad aspect slot on " #TYPE " pointer %p\n", obj); \
return NULL; \ return NULL; \
@ -599,7 +650,7 @@ inline struct BRICK##_##TYPE##_aspect *BRICK##_##TYPE##_get_aspect(struct BRICK#
\ \
inline struct TYPE##_object *BRICK##_alloc_##TYPE(struct BRICK##_output *output, struct generic_object_layout *object_layout) \ inline struct TYPE##_object *BRICK##_alloc_##TYPE(struct BRICK##_output *output, struct generic_object_layout *object_layout) \
{ \ { \
if (unlikely(!object_layout->aspect_layouts_table || object_layout->object_layout_generation != brick_layout_generation)) { \ if (unlikely(!object_layout->aspect_layouts_table || !object_layout->aspect_layouts || object_layout->object_layout_generation != brick_layout_generation)) { \
int status = default_init_object_layout((struct generic_output*)output, object_layout, BRICK_DEPTH_MAX, &TYPE##_type, #BRICK); \ int status = default_init_object_layout((struct generic_output*)output, object_layout, BRICK_DEPTH_MAX, &TYPE##_type, #BRICK); \
if (status < 0) \ if (status < 0) \
return NULL; \ return NULL; \

View File

@ -181,6 +181,10 @@ put:
err_free: err_free:
mars_free_mref(mref); mars_free_mref(mref);
if (logst->private) {
kfree(logst->private);
logst->private = NULL;
}
err: err:
return NULL; return NULL;
} }

27
mars.h
View File

@ -8,6 +8,8 @@
#include <asm/spinlock.h> #include <asm/spinlock.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#define MEMLEAK // FIXME: remove this
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// include the generic brick infrastucture // include the generic brick infrastucture
@ -20,10 +22,10 @@
#define BRICK_OBJ_MAX 1 #define BRICK_OBJ_MAX 1
#define BRICK_DEPTH_MAX 128 #define BRICK_DEPTH_MAX 128
#define GFP_MARS GFP_NOIO
#include "brick.h" #include "brick.h"
#define GFP_MARS GFP_NOIO
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// MARS-specific debugging helpers // MARS-specific debugging helpers
@ -31,27 +33,29 @@
#define MARS_DELAY /**/ #define MARS_DELAY /**/
//#define MARS_DELAY msleep(20000) //#define MARS_DELAY msleep(20000)
#define MARS_FATAL "MARS_FATAL " #define MARS_FATAL "MARS_FATAL "
#define MARS_ERROR "MARS_ERROR " #define MARS_ERROR "MARS_ERROR "
#define MARS_INFO "MARS_INFO " #define MARS_WARNING "MARS_WARN "
#define MARS_DEBUG "MARS_DEBUG " #define MARS_INFO "MARS_INFO "
#define MARS_DEBUG "MARS_DEBUG "
#define _MARS_FMT(fmt) "[%s] " __BASE_FILE__ " %d %s(): " fmt, current->comm, __LINE__, __FUNCTION__ #define _MARS_FMT(fmt) "[%s] " __BASE_FILE__ " %d %s(): " fmt, current->comm, __LINE__, __FUNCTION__
//#define _MARS_FMT(fmt) _BRICK_FMT(fmt) //#define _MARS_FMT(fmt) _BRICK_FMT(fmt)
#define _MARS_MSG(PREFIX, fmt, args...) do { printk(PREFIX _MARS_FMT(fmt), ##args); MARS_DELAY; } while (0) #define _MARS_MSG(PREFIX, fmt, args...) do { printk(PREFIX _MARS_FMT(fmt), ##args); MARS_DELAY; } while (0)
#define MARS_FAT(fmt, args...) _MARS_MSG(MARS_FATAL, fmt, ##args) #define MARS_FAT(fmt, args...) _MARS_MSG(MARS_FATAL, fmt, ##args)
#define MARS_ERR(fmt, args...) _MARS_MSG(MARS_ERROR, fmt, ##args) #define MARS_ERR(fmt, args...) _MARS_MSG(MARS_ERROR, fmt, ##args)
#define MARS_INF(fmt, args...) _MARS_MSG(MARS_INFO, fmt, ##args) #define MARS_WRN(fmt, args...) _MARS_MSG(MARS_WARNING, fmt, ##args)
#define MARS_INF(fmt, args...) _MARS_MSG(MARS_INFO, fmt, ##args)
#ifdef MARS_DEBUGGING #ifdef MARS_DEBUGGING
#define MARS_DBG(fmt, args...) _MARS_MSG(MARS_DEBUG, fmt, ##args) #define MARS_DBG(fmt, args...) _MARS_MSG(MARS_DEBUG, fmt, ##args)
#else #else
#define MARS_DBG(args...) /**/ #define MARS_DBG(args...) /**/
#endif #endif
#ifdef IO_DEBUGGING #ifdef IO_DEBUGGING
#define MARS_IO(fmt, args...) _MARS_MSG(MARS_DEBUG, fmt, ##args) #define MARS_IO(fmt, args...) _MARS_MSG(MARS_DEBUG, fmt, ##args)
#else #else
#define MARS_IO(args...) /*empty*/ #define MARS_IO(args...) /*empty*/
#endif #endif
@ -106,6 +110,7 @@ struct mref_object_layout {
int ref_rw; \ int ref_rw; \
int ref_id; /* not mandatory; may be used for identification */ \ int ref_id; /* not mandatory; may be used for identification */ \
struct page *ref_page; \ struct page *ref_page; \
bool ref_skip_sync; /* skip sync for this particular mref */ \
bool ref_is_kmapped; /* tribute for higher-level IO abstraction */ \ bool ref_is_kmapped; /* tribute for higher-level IO abstraction */ \
/* maintained by the ref implementation, incrementable for \ /* maintained by the ref implementation, incrementable for \
* callers (but not decrementable! use ref_put()) */ \ * callers (but not decrementable! use ref_put()) */ \

View File

@ -84,21 +84,39 @@ static int aio_ref_get(struct aio_output *output, struct mref_object *mref)
_CHECK_ATOMIC(&mref->ref_count, !=, 0); _CHECK_ATOMIC(&mref->ref_count, !=, 0);
if (file) { if (file) {
mref->ref_total_size = i_size_read(file->f_mapping->host); loff_t total_size = i_size_read(file->f_mapping->host);
mref->ref_total_size = total_size;
/* Only check reads.
* Writes behind EOF are always allowed (sparse files)
*/
if (!mref->ref_may_write) {
loff_t len = total_size - mref->ref_pos;
if (unlikely(len <= 0)) {
/* Allow reads starting _exactly_ at EOF when a timeout is specified (special case).
*/
if (len < 0 || mref->ref_timeout <= 0) {
MARS_DBG("ENODATA %lld\n", len);
return -ENODATA;
}
}
// Shorten below EOF, but allow special case
if (mref->ref_len > len && len > 0) {
mref->ref_len = len;
}
}
} }
/* Buffered IO is implemented, but should not be used /* Buffered IO.
* except for testing.
* Always precede this with a buf brick -- otherwise you
* can get bad performance!
*/ */
if (!mref->ref_data) { if (!mref->ref_data) {
struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output, mref); struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output, mref);
if (!mref_a) if (!mref_a)
return -EILSEQ; return -EILSEQ;
mref->ref_data = kmalloc(mref->ref_len, GFP_MARS); mref->ref_data = kmalloc(mref->ref_len, GFP_MARS);
if (!mref->ref_data) if (!mref->ref_data) {
MARS_DBG("ENOMEM %d\n", mref->ref_len);
return -ENOMEM; return -ENOMEM;
}
#if 0 // ??? #if 0 // ???
mref->ref_flags = 0; mref->ref_flags = 0;
#endif #endif
@ -186,6 +204,7 @@ static int aio_submit(struct aio_output *output, struct aio_mref_aspect *mref_a,
.aio_buf = (unsigned long)mref->ref_data, .aio_buf = (unsigned long)mref->ref_data,
.aio_nbytes = mref->ref_len, .aio_nbytes = mref->ref_len,
.aio_offset = mref->ref_pos, .aio_offset = mref->ref_pos,
// .aio_reqprio = something(mref->ref_prio) field exists, but not yet implemented in kernelspace :(
}; };
struct iocb *iocbp = &iocb; struct iocb *iocbp = &iocb;
@ -259,26 +278,39 @@ static int aio_submit_thread(void *data)
continue; continue;
} }
// check for reads behind EOF // check for reads exactly at EOF (special case)
mref = mref_a->object; mref = mref_a->object;
if (!mref->ref_rw && mref->ref_pos + mref->ref_len > i_size_read(file->f_mapping->host)) { if (mref->ref_pos == mref->ref_total_size &&
if (mref->ref_timeout > 0 && !mref->ref_rw &&
((!mref_a->start_jiffies && (mref_a->start_jiffies = jiffies, true)) || mref->ref_timeout > 0) {
mref_a->start_jiffies + mref->ref_timeout >= (long long)jiffies)) { loff_t total_size = i_size_read(file->f_mapping->host);
msleep(50); loff_t len = total_size - mref->ref_pos;
_enqueue(tinfo, mref_a, mref->ref_prio, true); if (len > 0) {
mref->ref_total_size = total_size;
mref->ref_len = len;
} else {
if (!mref_a->start_jiffies) {
mref_a->start_jiffies = jiffies;
}
if ((long long)jiffies - mref_a->start_jiffies <= mref->ref_timeout) {
if (!_dequeue(tinfo, false)) {
msleep(1000 * 4 / HZ);
}
_enqueue(tinfo, mref_a, MARS_PRIO_LOW, true);
continue;
}
MARS_DBG("ENODATA %lld\n", len);
_complete(output, mref, -ENODATA);
continue; continue;
} }
_complete(output, mref, -ENODATA);
continue;
} }
err = aio_submit(output, mref_a, false); for (;;) {
err = aio_submit(output, mref_a, false);
if (err == -EAGAIN) { if (likely(err != -EAGAIN)) {
_enqueue(tinfo, mref_a, mref->ref_prio, false); break;
msleep(20); }
continue; msleep(1000 / HZ);
} }
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
_complete(output, mref, err); _complete(output, mref, err);
@ -360,6 +392,7 @@ static int aio_event_thread(void *data)
if (output->brick->o_fdsync if (output->brick->o_fdsync
&& err >= 0 && err >= 0
&& mref->ref_rw != READ && mref->ref_rw != READ
&& !mref->ref_skip_sync
&& !mref_a->resubmit++) { && !mref_a->resubmit++) {
// workaround for non-implemented AIO FSYNC operation // workaround for non-implemented AIO FSYNC operation
if (!output->filp->f_op->aio_fsync) { if (!output->filp->f_op->aio_fsync) {
@ -527,21 +560,6 @@ static int aio_switch(struct aio_brick *brick)
} }
} }
#endif #endif
#if 0 // not here
if (!output->ctxp) {
if (!current->mm) {
MARS_ERR("mm = %p\n", current->mm);
err = -EINVAL;
goto err;
}
oldfs = get_fs();
set_fs(get_ds());
err = sys_io_setup(MARS_MAX_AIO, &output->ctxp);
set_fs(oldfs);
if (unlikely(err))
goto err;
}
#endif
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
static int (*fn[])(void*) = { static int (*fn[])(void*) = {
@ -611,7 +629,7 @@ cleanup:
output->filp = NULL; output->filp = NULL;
} }
if (output->ctxp) { if (output->ctxp) {
#if 0 // FIXME this crashes #ifndef MEMLEAK // FIXME this crashes
sys_io_destroy(output->ctxp); sys_io_destroy(output->ctxp);
#endif #endif
output->ctxp = 0; output->ctxp = 0;

View File

@ -38,10 +38,11 @@ const struct meta mars_mref_meta[] = {
META_INI(ref_may_write, struct mref_object, FIELD_INT), META_INI(ref_may_write, struct mref_object, FIELD_INT),
META_INI(ref_prio, struct mref_object, FIELD_INT), META_INI(ref_prio, struct mref_object, FIELD_INT),
META_INI(ref_timeout, struct mref_object, FIELD_INT), META_INI(ref_timeout, struct mref_object, FIELD_INT),
META_INI(ref_total_size, struct mref_object, FIELD_INT),
META_INI(ref_flags, struct mref_object, FIELD_INT), META_INI(ref_flags, struct mref_object, FIELD_INT),
META_INI(ref_rw, struct mref_object, FIELD_INT), META_INI(ref_rw, struct mref_object, FIELD_INT),
META_INI(ref_id, struct mref_object, FIELD_INT), META_INI(ref_id, struct mref_object, FIELD_INT),
META_INI(ref_total_size, struct mref_object, FIELD_INT), META_INI(ref_skip_sync, struct mref_object, FIELD_INT),
META_INI(_ref_cb.cb_error, struct mref_object, FIELD_INT), META_INI(_ref_cb.cb_error, struct mref_object, FIELD_INT),
{} {}
}; };
@ -805,7 +806,7 @@ int mars_free_brick(struct mars_brick *brick)
status = generic_brick_exit_full((void*)brick); status = generic_brick_exit_full((void*)brick);
if (status >= 0) { if (status >= 0) {
#if 0 // TODO: check whether crash remains possible #ifndef MEMLEAK // TODO: check whether crash remains possible
if (brick->brick_name) if (brick->brick_name)
kfree(brick->brick_name); kfree(brick->brick_name);
if (brick->brick_path) if (brick->brick_path)
@ -1195,6 +1196,7 @@ EXPORT_SYMBOL_GPL(mm_fake);
static int __init init_mars(void) static int __init init_mars(void)
{ {
MARS_INF("init_mars()\n"); MARS_INF("init_mars()\n");
brick_obj_max = BRICK_OBJ_MAX;
set_fake(); set_fake();
return 0; return 0;
} }

View File

@ -100,6 +100,7 @@ void if_endio(struct generic_callback *cb)
*/ */
static void _if_unplug(struct if_input *input) static void _if_unplug(struct if_input *input)
{ {
struct if_brick *brick = input->brick;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
unsigned long flags; unsigned long flags;
@ -121,6 +122,9 @@ static void _if_unplug(struct if_input *input)
list_del_init(&mref_a->plug_head); list_del_init(&mref_a->plug_head);
mref = mref_a->object; mref = mref_a->object;
if (brick->skip_sync) {
}
GENERIC_INPUT_CALL(input, mref_io, mref); GENERIC_INPUT_CALL(input, mref_io, mref);
GENERIC_INPUT_CALL(input, mref_put, mref); GENERIC_INPUT_CALL(input, mref_put, mref);
} }
@ -139,7 +143,7 @@ static int if_make_request(struct request_queue *q, struct bio *bio)
int i; int i;
bool assigned = false; bool assigned = false;
const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
//const bool barrier = ((bio->bi_rw & 1) != READ && bio_rw_flagged(bio, BIO_RW_BARRIER)); const bool barrier = ((bio->bi_rw & 1) != READ && bio_rw_flagged(bio, BIO_RW_BARRIER));
loff_t pos = ((loff_t)bio->bi_sector) << 9; // TODO: make dynamic loff_t pos = ((loff_t)bio->bi_sector) << 9; // TODO: make dynamic
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
int error = -ENOSYS; int error = -ENOSYS;
@ -230,6 +234,9 @@ static int if_make_request(struct request_queue *q, struct bio *bio)
atomic_inc(&bio->bi_comp_cnt); atomic_inc(&bio->bi_comp_cnt);
mref_a->orig_bio[mref_a->bio_count++] = bio; mref_a->orig_bio[mref_a->bio_count++] = bio;
assigned = true; assigned = true;
if (barrier) {
mref->ref_skip_sync = false;
}
MARS_IO("merge bio = %p mref = %p bio_count = %d len = %d ref_len = %d\n", bio, mref, mref_a->bio_count, len, mref->ref_len); MARS_IO("merge bio = %p mref = %p bio_count = %d len = %d ref_len = %d\n", bio, mref, mref_a->bio_count, len, mref->ref_len);
break; break;
@ -258,6 +265,7 @@ static int if_make_request(struct request_queue *q, struct bio *bio)
mref_a->input = input; mref_a->input = input;
mref->ref_rw = mref->ref_may_write = rw; mref->ref_rw = mref->ref_may_write = rw;
mref->ref_pos = pos; mref->ref_pos = pos;
// FIXME: do better here!
mref->ref_len = PAGE_SIZE; mref->ref_len = PAGE_SIZE;
//mref->ref_len = 512; //mref->ref_len = 512;
mref->ref_data = data; // direct IO mref->ref_data = data; // direct IO
@ -287,6 +295,11 @@ static int if_make_request(struct request_queue *q, struct bio *bio)
atomic_inc(&input->io_count); atomic_inc(&input->io_count);
if (brick->skip_sync && !barrier) {
mref->ref_skip_sync = true;
}
traced_lock(&input->req_lock, flags); traced_lock(&input->req_lock, flags);
list_add_tail(&mref_a->plug_head, &input->plug_anchor); list_add_tail(&mref_a->plug_head, &input->plug_anchor);
traced_unlock(&input->req_lock, flags); traced_unlock(&input->req_lock, flags);
@ -417,14 +430,12 @@ static int if_switch(struct if_brick *brick)
q->queuedata = input; q->queuedata = input;
input->q = q; input->q = q;
//MARS_DBG("2\n");
disk = alloc_disk(1); disk = alloc_disk(1);
if (!disk) { if (!disk) {
MARS_ERR("cannot allocate gendisk\n"); MARS_ERR("cannot allocate gendisk\n");
return -ENOMEM; return -ENOMEM;
} }
//MARS_DBG("3\n");
minor = device_minor++; //TODO: protect against races (e.g. atomic_t) minor = device_minor++; //TODO: protect against races (e.g. atomic_t)
disk->queue = q; disk->queue = q;
disk->major = MARS_MAJOR; //TODO: make this dynamic for >256 devices disk->major = MARS_MAJOR; //TODO: make this dynamic for >256 devices
@ -462,7 +473,6 @@ static int if_switch(struct if_brick *brick)
q->unplug_fn = if_unplug; q->unplug_fn = if_unplug;
q->queue_lock = &input->req_lock; // needed! q->queue_lock = &input->req_lock; // needed!
//MARS_DBG("4\n");
input->bdev = bdget(MKDEV(disk->major, minor)); input->bdev = bdget(MKDEV(disk->major, minor));
/* we have no partitions. we contain only ourselves. */ /* we have no partitions. we contain only ourselves. */
input->bdev->bd_contains = input->bdev; input->bdev->bd_contains = input->bdev;
@ -478,7 +488,6 @@ static int if_switch(struct if_brick *brick)
#endif #endif
// point of no return // point of no return
//MARS_DBG("99999\n");
add_disk(disk); add_disk(disk);
input->disk = disk; input->disk = disk;
//set_device_ro(input->bdev, 0); // TODO: implement modes //set_device_ro(input->bdev, 0); // TODO: implement modes

View File

@ -23,6 +23,7 @@ struct if_mref_aspect {
struct if_input { struct if_input {
MARS_INPUT(if); MARS_INPUT(if);
// TODO: move this to if_brick (better systematics)
struct list_head plug_anchor; struct list_head plug_anchor;
struct request_queue *q; struct request_queue *q;
struct gendisk *disk; struct gendisk *disk;
@ -42,6 +43,7 @@ struct if_brick {
MARS_BRICK(if); MARS_BRICK(if);
// parameters // parameters
int readahead; int readahead;
bool skip_sync;
// inspectable // inspectable
bool has_closed; bool has_closed;
// private // private

View File

@ -69,6 +69,8 @@ struct light_class {
#define CONF_ALL_MAX_QUEUE 10000 #define CONF_ALL_MAX_QUEUE 10000
#define CONF_ALL_MAX_JIFFIES (180 * HZ) #define CONF_ALL_MAX_JIFFIES (180 * HZ)
#define IF_SKIP_SYNC true
#define IF_READAHEAD 1 #define IF_READAHEAD 1
//#define IF_READAHEAD 0 //#define IF_READAHEAD 0
#define BIO_READAHEAD 1 #define BIO_READAHEAD 1
@ -169,6 +171,7 @@ void _set_if_params(struct mars_brick *_brick, void *private)
MARS_ERR("bad brick type\n"); MARS_ERR("bad brick type\n");
return; return;
} }
if_brick->skip_sync = IF_SKIP_SYNC;
if_brick->readahead = IF_READAHEAD; if_brick->readahead = IF_READAHEAD;
} }

View File

@ -1133,8 +1133,9 @@ void trans_logger_log(struct trans_logger_output *output)
/* A kind of delayed plugging mechanism /* A kind of delayed plugging mechanism
*/ */
if (!brick->flush_delay || !log_jiffies || if (atomic_read(&output->q_phase1.q_queued) <= 0 &&
(long long)jiffies - log_jiffies >= 0) { (!brick->flush_delay || !log_jiffies ||
(long long)jiffies - log_jiffies >= 0)) {
log_flush(&brick->logst); log_flush(&brick->logst);
log_jiffies = 0; log_jiffies = 0;
} }

View File

@ -280,6 +280,13 @@ sub primary_res {
} elsif($old eq $host) { } elsif($old eq $host) {
print "I am already primary.\n"; print "I am already primary.\n";
exit(0); exit(0);
} else {
my $lnk = "$mars/resource-$res/syncstatus-$host";
if(lstat($lnk)) {
my $syncstatus = readlink($lnk);
my $size = readlink("$mars/resource-$res/size") or die "cannot read size\n";
die "sync has not yet finished, only $syncstatus / $size bytes transferred\n" unless $syncstatus >= $size;
}
} }
# TODO: check whether we can switch without interrupting service.... # TODO: check whether we can switch without interrupting service....