import mars-33.tgz

This commit is contained in:
Thomas Schoebel-Theuer 2010-08-09 17:57:56 +01:00
parent 52d6fadfe9
commit 4a47eac2ff
7 changed files with 186 additions and 108 deletions

View File

@ -576,14 +576,14 @@ GENERIC_OBJECT_FUNCTIONS(generic);
} \
atomic_inc(&current->lock_count); \
(void)flags; \
/*spin_lock(spinlock);*/ \
spin_lock_irqsave(spinlock, flags); \
spin_lock(spinlock); \
/*spin_lock_irqsave(spinlock, flags);*/ \
} while (0)
# define traced_unlock(spinlock,flags) \
do { \
/*spin_unlock(spinlock);*/ \
spin_unlock_irqrestore(spinlock, flags); \
spin_unlock(spinlock); \
/*spin_unlock_irqrestore(spinlock, flags);*/ \
atomic_dec(&current->lock_count); \
} while (0)

View File

@ -14,6 +14,8 @@
#include "mars.h"
//#define USE_VMALLOC
///////////////////////// own type definitions ////////////////////////
#include "mars_buf.h"
@ -32,18 +34,20 @@ static inline int buf_hash_fn(unsigned int base_index)
return tmp % MARS_BUF_HASH_MAX;
}
static struct buf_head *hash_find(struct buf_brick *brick, unsigned int base_index)
static struct buf_head *hash_find_insert(struct buf_brick *brick, unsigned int base_index, struct buf_head *new)
{
int hash = buf_hash_fn(base_index);
struct list_head *start = &brick->cache_anchors[hash];
spinlock_t *lock = &brick->cache_anchors[hash].hash_lock;
struct list_head *start = &brick->cache_anchors[hash].hash_anchor;
struct list_head *tmp;
struct buf_head *res;
int count = 0;
unsigned long flags;
for (tmp = start->next; ; tmp = tmp->next) {
if (tmp == start)
return NULL;
traced_lock(lock, flags);
for (tmp = start->next; tmp != start; tmp = tmp->next) {
#if 1
{
static int max = 0;
@ -56,23 +60,36 @@ static struct buf_head *hash_find(struct buf_brick *brick, unsigned int base_ind
}
#endif
res = container_of(tmp, struct buf_head, bf_hash_head);
if (res->bf_base_index == base_index)
break;
}
return res;
}
if (res->bf_base_index == base_index) {
CHECK_ATOMIC(&res->bf_count, 0);
atomic_inc(&res->bf_count);
static inline void hash_insert(struct buf_brick *brick, struct buf_head *elem)
{
int hash = buf_hash_fn(elem->bf_base_index);
struct list_head *start = &brick->cache_anchors[hash];
list_add(&elem->bf_hash_head, start);
traced_unlock(lock, flags);
return res;
}
}
if (new) {
atomic_inc(&brick->hashed_count);
CHECK_HEAD_EMPTY(&new->bf_hash_head);
list_add(&new->bf_hash_head, start);
}
traced_unlock(lock, flags);
return NULL;
}
static inline void free_bf(struct buf_brick *brick, struct buf_head *bf)
{
atomic_dec(&brick->alloc_count);
MARS_INF("really freeing bf=%p\n", bf);
#ifdef USE_VMALLOC
vfree(bf->bf_data);
#else
free_pages((unsigned long)bf->bf_data, brick->backing_order);
#endif
kfree(bf);
}
@ -83,29 +100,47 @@ static inline void __prune_cache(struct buf_brick *brick, int max_count, unsigne
#if 0
return;
#endif
while (brick->alloc_count >= max_count) {
while (atomic_read(&brick->alloc_count) >= max_count) {
struct buf_head *bf;
if (list_empty(&brick->free_anchor))
break;
bf = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
list_del_init(&bf->bf_lru_head);
brick->alloc_count--;
traced_unlock(&brick->brick_lock, *flags);
free_bf(brick, bf);
traced_lock(&brick->brick_lock, *flags);
}
}
static inline void __lru_free_one(struct buf_brick *brick)
static inline void __lru_free_one(struct buf_brick *brick, unsigned long *flags)
{
struct buf_head *bf;
int hash;
spinlock_t *lock;
if (list_empty(&brick->lru_anchor))
return;
bf = container_of(brick->lru_anchor.prev, struct buf_head, bf_lru_head);
list_del_init(&bf->bf_lru_head);
list_del_init(&bf->bf_hash_head);
brick->hashed_count--;
traced_unlock(&brick->brick_lock, *flags);
hash = buf_hash_fn(bf->bf_base_index);
lock = &brick->cache_anchors[hash].hash_lock;
traced_lock(lock, *flags);
if (!atomic_read(&bf->bf_count)) {
list_del_init(&bf->bf_hash_head);
atomic_dec(&brick->hashed_count);
}
traced_unlock(lock, *flags);
#if 1
if (unlikely(
!list_empty(&bf->bf_io_pending_anchor) ||
@ -114,17 +149,19 @@ static inline void __lru_free_one(struct buf_brick *brick)
MARS_ERR("freed bf is member in lists!\n");
}
#endif
traced_lock(&brick->brick_lock, *flags);
list_add(&bf->bf_lru_head, &brick->free_anchor);
}
static inline void __lru_free(struct buf_brick *brick)
static inline void __lru_free(struct buf_brick *brick, unsigned long *flags)
{
int max = brick->hashed_count; // limit lock contention time
while (brick->hashed_count >= brick->max_count) {
if (list_empty(&brick->lru_anchor) || --max < 0)
while (atomic_read(&brick->hashed_count) >= brick->max_count) {
if (list_empty(&brick->lru_anchor))
break;
__lru_free_one(brick);
__lru_free_one(brick, flags);
}
}
@ -248,7 +285,11 @@ static inline struct buf_head *_alloc_bf(struct buf_brick *brick)
if (!bf)
goto done;
#ifdef USE_VMALLOC
bf->bf_data = vmalloc(brick->backing_size);
#else
bf->bf_data = (void*)__get_free_pages(GFP_MARS, brick->backing_order);
#endif
if (unlikely(!bf->bf_data)) {
kfree(bf);
bf = NULL;
@ -256,6 +297,7 @@ static inline struct buf_head *_alloc_bf(struct buf_brick *brick)
spin_lock_init(&bf->bf_lock);
bf->bf_brick = brick;
atomic_inc(&brick->alloc_count);
done:
return bf;
@ -273,7 +315,6 @@ static void __pre_alloc_bf(struct buf_brick *brick, int max)
traced_lock(&brick->brick_lock, flags);
list_add(&bf->bf_lru_head, &brick->free_anchor);
brick->alloc_count++;
traced_unlock(&brick->brick_lock, flags);
}
@ -292,6 +333,7 @@ static int buf_ref_get(struct buf_output *output, struct mars_ref_object *mref)
struct buf_brick *brick = output->brick;
struct buf_mars_ref_aspect *mref_a;
struct buf_head *bf;
struct buf_head *new = NULL;
loff_t base_pos;
int base_offset;
int max_len;
@ -305,9 +347,9 @@ static int buf_ref_get(struct buf_output *output, struct mars_ref_object *mref)
}
#ifdef PRE_ALLOC
if (unlikely(brick->alloc_count < brick->max_count)) {
if (unlikely(atomic_read(&brick->alloc_count) < brick->max_count)) {
// grab all memory in one go => avoid memory fragmentation
__pre_alloc_bf(brick, brick->max_count + PRE_ALLOC - brick->alloc_count);
__pre_alloc_bf(brick, brick->max_count + PRE_ALLOC - atomic_read(&brick->alloc_count));
}
#endif
/* Grab reference.
@ -329,24 +371,39 @@ static int buf_ref_get(struct buf_output *output, struct mars_ref_object *mref)
if (mref->ref_len > max_len)
mref->ref_len = max_len;
traced_lock(&brick->brick_lock, flags);
again:
bf = hash_find(brick, ((unsigned int)base_pos) >> brick->backing_order);
bf = hash_find_insert(brick, ((unsigned int)base_pos) >> brick->backing_order, new);
if (bf) {
list_del_init(&bf->bf_lru_head);
CHECK_ATOMIC(&bf->bf_count, 0);
atomic_inc(&bf->bf_count);
if (!list_empty(&bf->bf_lru_head)) {
traced_lock(&brick->brick_lock, flags);
list_del_init(&bf->bf_lru_head);
traced_unlock(&brick->brick_lock, flags);
}
if (new) {
MARS_DBG("race detected: alias elem appeared in the meantime\n");
traced_lock(&brick->brick_lock, flags);
traced_unlock(&brick->brick_lock, flags);
list_add(&new->bf_lru_head, &brick->free_anchor);
traced_unlock(&brick->brick_lock, flags);
new = NULL;
atomic_inc(&brick->nr_collisions);
}
atomic_inc(&brick->hit_count);
} else if (new) {
MARS_DBG("new elem added\n");
bf = new;
new = NULL;
atomic_inc(&brick->miss_count);
} else {
MARS_DBG("buf_get() hash nothing found\n");
traced_lock(&brick->brick_lock, flags);
if (list_empty(&brick->free_anchor)) {
__lru_free_one(brick);
__lru_free_one(brick, &flags);
if (unlikely(list_empty(&brick->free_anchor))) {
MARS_INF("alloc new buf_head %d\n", brick->alloc_count);
MARS_INF("alloc new buf_head %d\n", atomic_read(&brick->alloc_count));
traced_unlock(&brick->brick_lock, flags);
@ -358,8 +415,8 @@ again:
traced_lock(&brick->brick_lock, flags);
list_add(&bf->bf_lru_head, &brick->free_anchor);
brick->alloc_count++;
traced_unlock(&brick->brick_lock, flags);
/* during the open lock, somebody might have
* raced against us at the same base_pos...
*/
@ -367,27 +424,26 @@ again:
}
}
bf = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
list_del_init(&bf->bf_lru_head);
bf->bf_pos = base_pos;
bf->bf_base_index = ((unsigned int)base_pos) >> brick->backing_order;
bf->bf_flags = 0;
atomic_set(&bf->bf_count, 1);
bf->bf_bio_status = 0;
atomic_set(&bf->bf_bio_count, 0);
//INIT_LIST_HEAD(&bf->bf_mref_anchor);
//INIT_LIST_HEAD(&bf->bf_lru_head);
INIT_LIST_HEAD(&bf->bf_hash_head);
INIT_LIST_HEAD(&bf->bf_io_pending_anchor);
INIT_LIST_HEAD(&bf->bf_postpone_anchor);
hash_insert(brick, bf);
brick->hashed_count++;
new = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
list_del_init(&new->bf_lru_head);
traced_unlock(&brick->brick_lock, flags);
atomic_inc(&brick->miss_count);
new->bf_pos = base_pos;
new->bf_base_index = ((unsigned int)base_pos) >> brick->backing_order;
new->bf_flags = 0;
atomic_set(&new->bf_count, 1);
new->bf_bio_status = 0;
atomic_set(&new->bf_bio_count, 0);
//INIT_LIST_HEAD(&new->bf_mref_anchor);
//INIT_LIST_HEAD(&new->bf_lru_head);
INIT_LIST_HEAD(&new->bf_hash_head);
INIT_LIST_HEAD(&new->bf_io_pending_anchor);
INIT_LIST_HEAD(&new->bf_postpone_anchor);
/* Check for races against us...
*/
goto again;
}
mref_a->rfa_bf = bf;
@ -462,7 +518,7 @@ static void __bf_put(struct buf_head *bf)
} // else no harm can happen
// lru freeing (this is completeley independent from bf)
__lru_free(brick);
__lru_free(brick, &flags);
__prune_cache(brick, brick->max_count * 2, &flags);
traced_unlock(&brick->brick_lock, flags);
@ -848,11 +904,11 @@ static void buf_ref_io(struct buf_output *output, struct mars_ref_object *mref,
#if 1
if (jiffies - brick->last_jiffies >= 30 * HZ) {
int hit = atomic_read(&brick->hit_count);
int miss = atomic_read(&brick->miss_count);
int perc = hit * 100 * 100 / (hit + miss);
unsigned long hit = atomic_read(&brick->hit_count);
unsigned long miss = atomic_read(&brick->miss_count);
unsigned long perc = hit * 100 * 100 / (hit + miss);
brick->last_jiffies = jiffies;
MARS_INF("STATISTICS: hashed=%d alloc=%d io_pending=%d hit=%d (%d.%02d%%) miss=%d io=%d\n", brick->hashed_count, brick->alloc_count, atomic_read(&brick->nr_io_pending), hit, perc / 100, perc % 100, miss, atomic_read(&brick->io_count));
MARS_INF("STATISTICS: hashed=%d alloc=%d io_pending=%d hit=%lu (%lu.%02lu%%) miss=%lu collisions=%d io=%d\n", atomic_read(&brick->hashed_count), atomic_read(&brick->alloc_count), atomic_read(&brick->nr_io_pending), hit, perc / 100, perc % 100, miss, atomic_read(&brick->nr_collisions), atomic_read(&brick->io_count));
}
#endif
@ -983,15 +1039,17 @@ static int buf_brick_construct(struct buf_brick *brick)
brick->backing_order = 5; // TODO: make this configurable
brick->backing_size = PAGE_SIZE << brick->backing_order;
brick->max_count = 32; // TODO: make this configurable
brick->hashed_count = 0;
brick->alloc_count = 0;
atomic_set(&brick->alloc_count, 0);
atomic_set(&brick->hashed_count, 0);
atomic_set(&brick->nr_io_pending, 0);
atomic_set(&brick->nr_collisions, 0);
spin_lock_init(&brick->brick_lock);
//rwlock_init(&brick->brick_lock);
INIT_LIST_HEAD(&brick->free_anchor);
INIT_LIST_HEAD(&brick->lru_anchor);
for (i = 0; i < MARS_BUF_HASH_MAX; i++) {
INIT_LIST_HEAD(&brick->cache_anchors[i]);
spin_lock_init(&brick->cache_anchors[i].hash_lock);
INIT_LIST_HEAD(&brick->cache_anchors[i].hash_anchor);
}
return 0;
}
@ -1008,7 +1066,7 @@ static int buf_brick_destruct(struct buf_brick *brick)
traced_lock(&brick->brick_lock, flags);
brick->max_count = 0;
__lru_free(brick);
__lru_free(brick, &flags);
__prune_cache(brick, 0, &flags);
traced_unlock(&brick->brick_lock, flags);

View File

@ -15,6 +15,11 @@ struct buf_mars_ref_aspect {
struct generic_callback cb;
};
struct cache_anchor {
spinlock_t hash_lock;
struct list_head hash_anchor;
};
struct buf_brick {
MARS_BRICK(buf);
/* brick parameters */
@ -24,15 +29,16 @@ struct buf_brick {
/* internals */
spinlock_t brick_lock;
int alloc_count;
int hashed_count;
atomic_t alloc_count;
atomic_t hashed_count;
atomic_t nr_io_pending;
atomic_t nr_collisions;
struct generic_object_layout mref_object_layout;
// lists for caching
struct list_head free_anchor; // members are not hashed
struct list_head lru_anchor; // members are hashed and not in use
struct list_head cache_anchors[MARS_BUF_HASH_MAX]; // hash table
struct cache_anchor cache_anchors[MARS_BUF_HASH_MAX]; // hash table
// for creation of bios
struct mars_info base_info;

View File

@ -26,6 +26,7 @@ static void check_buf_endio(struct generic_callback *cb)
struct mars_ref_object *mref = mref_a->object;
struct check_output *output = mref_a->output;
struct check_input *input = output->brick->inputs[0];
struct generic_callback *prev_cb;
unsigned long flags;
if (!mref_a) {
@ -34,8 +35,8 @@ static void check_buf_endio(struct generic_callback *cb)
return;
}
if (mref_a->call_count-- < 0) {
mref_a->call_count = 0;
if (atomic_dec_and_test(&mref_a->callback_count)) {
atomic_set(&mref_a->callback_count, 1);
MARS_ERR("instance %d/%s: too many callbacks on %p\n", output->instance_nr, input->connect->type->type_name, mref);
}
@ -48,18 +49,21 @@ static void check_buf_endio(struct generic_callback *cb)
list_del_init(&mref_a->mref_head);
traced_unlock(&output->check_lock, flags);
#else
(void)flags;
#endif
mref_a->last_jiffies = jiffies;
cb = cb->cb_prev;
if (!cb) {
prev_cb = cb->cb_prev;
if (!prev_cb) {
MARS_FAT("cannot get chain callback\n");
return;
}
cb->cb_fn(cb);
prev_cb->cb_fn(prev_cb);
}
#ifdef CHECK_LOCK
static void dump_mem(void *data, int len)
{
int i;
@ -91,7 +95,6 @@ static int check_watchdog(void *data)
msleep_interruptible(5000);
#ifdef CHECK_LOCK
traced_lock(&output->check_lock, flags);
now = jiffies;
@ -130,10 +133,10 @@ static int check_watchdog(void *data)
}
traced_unlock(&output->check_lock, flags);
#endif
}
return 0;
}
#endif
////////////////// own brick / input / output operations //////////////////
@ -168,10 +171,11 @@ static void check_ref_io(struct check_output *output, struct mars_ref_object *mr
return;
}
if (mref_a->call_count++ > 1) {
mref_a->call_count = 1;
if (atomic_dec_and_test(&mref_a->call_count)) {
atomic_set(&mref_a->call_count, 1);
MARS_ERR("instance %d/%s: multiple parallel calls on %p\n", output->instance_nr, input->connect->type->type_name, mref);
}
atomic_set(&mref_a->callback_count, 2);
#ifdef CHECK_LOCK
traced_lock(&output->check_lock, flags);
@ -181,6 +185,8 @@ static void check_ref_io(struct check_output *output, struct mars_ref_object *mr
}
list_add_tail(&mref_a->mref_head, &output->mref_anchor);
traced_unlock(&output->check_lock, flags);
#else
(void)flags;
#endif
if (mref->ref_cb != cb) {
@ -194,6 +200,8 @@ static void check_ref_io(struct check_output *output, struct mars_ref_object *mr
mref_a->last_jiffies = jiffies;
GENERIC_INPUT_CALL(input, mars_ref_io, mref, rw);
atomic_inc(&mref_a->call_count);
}
//////////////// object / aspect constructors / destructors ///////////////
@ -205,7 +213,8 @@ static int check_mars_ref_aspect_init_fn(struct generic_aspect *_ini, void *_ini
INIT_LIST_HEAD(&ini->mref_head);
#endif
ini->last_jiffies = jiffies;
ini->call_count = 0;
atomic_set(&ini->call_count, 2);
atomic_set(&ini->callback_count, 1);
return 0;
}
@ -230,19 +239,18 @@ static int check_brick_construct(struct check_brick *brick)
static int check_output_construct(struct check_output *output)
{
static int count = 0;
#ifdef CHECK_LOCK
struct task_struct *watchdog;
output->instance_nr = ++count;
#ifdef CHECK_LOCK
spin_lock_init(&output->check_lock);
INIT_LIST_HEAD(&output->mio_anchor);
INIT_LIST_HEAD(&output->mref_anchor);
#endif
watchdog = kthread_create(check_watchdog, output, "check_watchdog%d", output->instance_nr);
if (!IS_ERR(watchdog)) {
output->watchdog = watchdog;
wake_up_process(watchdog);
}
#endif
output->instance_nr = ++count;
return 0;
}

View File

@ -2,7 +2,7 @@
#ifndef MARS_CHECK_H
#define MARS_CHECK_H
//#define CHECK_LOCK
#define CHECK_LOCK
struct check_mars_ref_aspect {
GENERIC_ASPECT(mars_ref);
@ -12,7 +12,8 @@ struct check_mars_ref_aspect {
struct generic_callback cb;
struct check_output *output;
unsigned long last_jiffies;
int call_count;
atomic_t call_count;
atomic_t callback_count;
};
struct check_brick {
@ -26,10 +27,9 @@ struct check_input {
struct check_output {
MARS_OUTPUT(check);
int instance_nr;
struct task_struct *watchdog;
#ifdef CHECK_LOCK
struct task_struct *watchdog;
spinlock_t check_lock;
struct list_head mio_anchor;
struct list_head mref_anchor;
#endif
};

View File

@ -120,11 +120,16 @@ void make_test_instance(void)
buf_brick = brick(&buf_brick_type);
_buf_brick = (void*)buf_brick;
_buf_brick->backing_order = 0;
_buf_brick->backing_order = 4;
_buf_brick->backing_order = 7;
//_buf_brick->backing_order = 0;
_buf_brick->backing_order = 2;
//_buf_brick->backing_order = 4;
//_buf_brick->backing_order = 7;
_buf_brick->backing_size = PAGE_SIZE << _buf_brick->backing_order;
#if 0
_buf_brick->max_count = MEM >> _buf_brick->backing_order;
#else
_buf_brick->max_count = 32768 / 2;
#endif
connect(last, buf_brick->outputs[0]);

View File

@ -46,7 +46,8 @@ static struct trans_logger_mars_ref_aspect *hash_find(struct hash_anchor *table,
/* Caution: there may be duplicates in the list, some of them
* overlapping in many different ways.
* Always find the both _newest_ and _lowest_ overlapping element!
* Always find the both _newest_ and _lowest_ overlapping element.
* The lists are alwaysw sorted according to age.
*/
for (tmp = start->hash_anchor.next; tmp != &start->hash_anchor; tmp = tmp->next) {
#if 1
@ -60,19 +61,17 @@ static struct trans_logger_mars_ref_aspect *hash_find(struct hash_anchor *table,
#endif
test_a = container_of(tmp, struct trans_logger_mars_ref_aspect, hash_head);
test = test_a->object;
// are the regions overlapping?
if (pos < test->ref_pos + test->ref_len && pos + len > test->ref_pos) {
if (test->ref_pos >= pos) {
// always prefer the lowest distance, even if elder
if (test->ref_pos < min_pos || min_pos < 0) {
min_pos = test->ref_pos;
res = test_a;
}
} else {
// always take the newest one, distance does not matter
if (min_pos < 0) {
min_pos = test->ref_pos;
res = test_a;
}
if (
// always take the newest one
min_pos < 0 ||
// prefer the lowest positive distance
(test->ref_pos < min_pos && test->ref_pos >= pos)
) {
min_pos = test->ref_pos;
res = test_a;
}
}
}
@ -228,6 +227,8 @@ static const struct trans_logger_input_type trans_logger_input_type = {
static const struct trans_logger_input_type *trans_logger_input_types[] = {
&trans_logger_input_type,
&trans_logger_input_type,
&trans_logger_input_type,
};
static const struct trans_logger_output_type trans_logger_output_type = {
@ -248,7 +249,7 @@ static const struct trans_logger_output_type *trans_logger_output_types[] = {
const struct trans_logger_brick_type trans_logger_brick_type = {
.type_name = "trans_logger_brick",
.brick_size = sizeof(struct trans_logger_brick),
.max_inputs = 1,
.max_inputs = 3,
.max_outputs = 1,
.master_ops = &trans_logger_brick_ops,
.default_input_types = trans_logger_input_types,