improve detection of memleaks

This commit is contained in:
Thomas Schoebel-Theuer 2012-02-02 16:25:43 +01:00 committed by Thomas Schoebel-Theuer
parent 287bb506b9
commit 91f262b72b
19 changed files with 27 additions and 58 deletions

16
brick.c
View File

@ -364,21 +364,6 @@ int generic_brick_exit_recursively(struct generic_brick *brick, bool destroy_inp
}
EXPORT_SYMBOL_GPL(generic_brick_exit_recursively);
////////////////////////////////////////////////////////////////////////
// object_layout init stuff
void init_generic_object_layout(struct generic_object_layout *lay, const struct generic_object_type *type)
{
}
EXPORT_SYMBOL_GPL(init_generic_object_layout);
void exit_generic_object_layout(struct generic_object_layout *lay)
{
}
EXPORT_SYMBOL_GPL(exit_generic_object_layout);
////////////////////////////////////////////////////////////////////////
// default implementations
@ -407,6 +392,7 @@ struct generic_object *generic_alloc(struct generic_brick *brick, struct generic
goto err;
atomic_inc(&object_layout->alloc_count);
atomic_inc(&object_layout->total_alloc_count);
object = data;
object->object_type = object_type;

11
brick.h
View File

@ -89,18 +89,15 @@ struct generic_object_type {
#define GENERIC_OBJECT_LAYOUT(OBJTYPE) \
int size_hint; \
atomic_t alloc_count; \
atomic_t free_count; \
atomic_t total_alloc_count; \
struct generic_object_layout {
GENERIC_OBJECT_LAYOUT(generic);
};
extern void init_generic_object_layout(struct generic_object_layout *lay, const struct generic_object_type *type);
extern void exit_generic_object_layout(struct generic_object_layout *lay);
#define GENERIC_OBJECT(OBJTYPE) \
const struct generic_object_type *object_type; \
struct OBJTYPE##_object_layout *object_layout; \
struct generic_object_layout *object_layout; \
struct OBJTYPE##_aspect **aspects; \
int aspect_nr_max; \
int free_offset; \
@ -530,9 +527,9 @@ extern struct generic_aspect *generic_get_aspect(struct generic_brick *brick, st
#define DECLARE_ASPECT_FUNCTIONS(BRITYPE,OBJTYPE) \
\
INLINE struct OBJTYPE##_object *BRITYPE##_alloc_##OBJTYPE(struct BRITYPE##_brick *brick, struct generic_object_layout *object_layout) \
INLINE struct OBJTYPE##_object *BRITYPE##_alloc_##OBJTYPE(struct BRITYPE##_brick *brick) \
{ \
return (void*)generic_alloc((struct generic_brick*)brick, object_layout, &OBJTYPE##_type); \
return (void*)generic_alloc((struct generic_brick*)brick, &brick->OBJTYPE##_object_layout, &OBJTYPE##_type); \
} \
\
INLINE void BRITYPE##_free_##OBJTYPE(struct OBJTYPE##_object *object) \

View File

@ -22,7 +22,6 @@ void exit_logst(struct log_status *logst)
MARS_DBG("waiting for IO terminating...");
msleep(500);
}
exit_generic_object_layout(&logst->ref_object_layout);
}
EXPORT_SYMBOL_GPL(exit_logst);
@ -189,7 +188,7 @@ void *log_reserve(struct log_status *logst, struct log_header *lh)
sema_init(&cb_info->mutex, 1);
atomic_set(&cb_info->refcount, 2);
mref = mars_alloc_mref(logst->brick, &logst->ref_object_layout);
mref = mars_alloc_mref(logst->brick);
if (unlikely(!mref)) {
MARS_ERR("no mref\n");
goto err;
@ -488,7 +487,7 @@ restart:
logst->offset = 0;
}
mref = mars_alloc_mref(logst->brick, &logst->ref_object_layout);
mref = mars_alloc_mref(logst->brick);
if (unlikely(!mref)) {
MARS_ERR("no mref\n");
goto done;

View File

@ -100,7 +100,6 @@ struct log_status {
// internal
struct mars_input *input;
struct mars_brick *brick;
struct generic_object_layout ref_object_layout;
struct mars_info info;
int offset;
int validflag_offset;

11
mars.h
View File

@ -74,14 +74,6 @@
extern const struct generic_object_type mref_type;
struct mref_aspect {
GENERIC_ASPECT(mref);
};
struct mref_object_layout {
GENERIC_OBJECT_LAYOUT(mref);
};
#ifdef MARS_TRACING
extern unsigned long long start_trace_clock;
@ -144,6 +136,7 @@ struct mars_info {
#define MARS_BRICK(BRITYPE) \
GENERIC_BRICK(BRITYPE); \
struct generic_object_layout mref_object_layout; \
struct list_head global_brick_link; \
struct list_head dent_brick_link; \
const char *brick_path; \
@ -217,8 +210,6 @@ DECLARE_BRICK_FUNCTIONS(BRITYPE); \
\
_MARS_TYPES(BRITYPE) \
\
struct BRITYPE##_object_layout; \
\
DECLARE_ASPECT_FUNCTIONS(BRITYPE,mref); \
extern int init_mars_##BRITYPE(void); \
extern void exit_mars_##BRITYPE(void);

View File

@ -621,7 +621,7 @@ static int _buf_make_io(struct buf_brick *brick, struct buf_head *bf, void *star
struct buf_mref_aspect *mref_a;
int len;
mref = buf_alloc_mref(brick, &brick->mref_object_layout);
mref = buf_alloc_mref(brick);
if (unlikely(!mref))
break;

View File

@ -41,7 +41,6 @@ struct buf_brick {
atomic_t hashed_count;
atomic_t nr_io_pending;
atomic_t nr_collisions;
struct generic_object_layout mref_object_layout;
struct mars_info base_info;
bool got_info;

View File

@ -152,7 +152,7 @@ static int check_watchdog(void *data)
mref_a->last_jiffies = now + 600 * HZ;
MARS_INF("================================\n");
CHECK_ERR(output, "mref %p callback is missing for more than %d seconds.\n", mref, timeout);
object_layout = (void*)mref->object_layout;
object_layout = mref->object_layout;
dump_mem(mref, object_layout->size_hint);
MARS_INF("================================\n");
}

View File

@ -196,7 +196,7 @@ int _make_mref(struct copy_brick *brick, int index, int queue, void *data, loff_
if (brick->clash || !tmp_pos)
goto done;
mref = copy_alloc_mref(brick, &brick->mref_object_layout);
mref = copy_alloc_mref(brick);
status = -ENOMEM;
if (unlikely(!mref))
goto done;

View File

@ -63,7 +63,6 @@ struct copy_brick {
wait_queue_head_t event;
struct semaphore mutex;
struct task_struct *thread;
struct generic_object_layout mref_object_layout;
struct copy_state st[MAX_COPY_PARA];
};

View File

@ -380,7 +380,7 @@ static int if_make_request(struct request_queue *q, struct bio *bio)
if (!mref) {
int prefetch_len;
error = -ENOMEM;
mref = if_alloc_mref(brick, &input->mref_object_layout);
mref = if_alloc_mref(brick);
if (unlikely(!mref)) {
up(&input->kick_sem);
goto err;

View File

@ -61,7 +61,6 @@ struct if_input {
atomic_t total_mref_write_count;
spinlock_t req_lock;
struct semaphore kick_sem;
struct generic_object_layout mref_object_layout;
struct mars_info info;
spinlock_t hash_lock[IF_HASH_MAX];
struct list_head hash_table[IF_HASH_MAX];

View File

@ -145,7 +145,7 @@ int server_io(struct server_brick *brick, struct mars_socket *sock)
if (!brick->cb_running || !mars_socket_is_alive(sock))
goto done;
mref = server_alloc_mref(brick, &brick->mref_object_layout);
mref = server_alloc_mref(brick);
status = -ENOMEM;
if (!mref)
goto done;

View File

@ -26,7 +26,6 @@ struct server_brick {
struct task_struct *cb_thread;
wait_queue_head_t startup_event;
wait_queue_head_t cb_event;
struct generic_object_layout mref_object_layout;
spinlock_t cb_lock;
struct list_head cb_read_list;
struct list_head cb_write_list;

View File

@ -1040,7 +1040,7 @@ struct writeback_info *make_writeback(struct trans_logger_brick *brick, loff_t p
int this_len;
int status;
sub_mref = trans_logger_alloc_mref(brick, &read_input->sub_layout);
sub_mref = trans_logger_alloc_mref(brick);
if (unlikely(!sub_mref)) {
MARS_FAT("cannot alloc sub_mref\n");
goto err;
@ -1106,7 +1106,7 @@ struct writeback_info *make_writeback(struct trans_logger_brick *brick, loff_t p
}
data = orig_mref_a->shadow_data + diff;
sub_mref = trans_logger_alloc_mref(brick, &write_input->sub_layout);
sub_mref = trans_logger_alloc_mref(brick);
if (unlikely(!sub_mref)) {
MARS_FAT("cannot alloc sub_mref\n");
goto err;
@ -2257,7 +2257,7 @@ int apply_data(struct trans_logger_brick *brick, loff_t pos, void *buf, int len)
struct trans_logger_mref_aspect *mref_a;
status = -ENOMEM;
mref = trans_logger_alloc_mref(brick, &input->sub_layout);
mref = trans_logger_alloc_mref(brick);
if (unlikely(!mref)) {
MARS_ERR("no memory\n");
goto done;
@ -2497,13 +2497,14 @@ char *trans_logger_statistics(struct trans_logger_brick *brick, int verbose)
if (!res)
return NULL;
snprintf(res, 1023, "mode replay=%d continuous=%d replay_code=%d log_reads=%d | replay_start_pos = %lld replay_end_pos = %lld | new_input_nr = %d log_input_nr = %d (old = %d) replay_min_pos1 = %lld replay_max_pos1 = %lld replay_min_pos2 = %lld replay_max_pos2 = %lld | total replay=%d callbacks=%d reads=%d writes=%d flushes=%d (%d%%) wb_clusters=%d writebacks=%d (%d%%) shortcut=%d (%d%%) mshadow=%d sshadow=%d rounds=%d restarts=%d delays=%d phase1=%d phase2=%d phase3=%d phase4=%d | current shadow_mem_used=%ld/%lld replay=%d mshadow=%d/%d sshadow=%d hash_count=%d pos_count=%d balance=%d/%d/%d/%d fly=%d phase1=%d+%d phase2=%d+%d phase3=%d+%d phase4=%d+%d\n",
snprintf(res, 1023, "mode replay=%d continuous=%d replay_code=%d log_reads=%d | replay_start_pos = %lld replay_end_pos = %lld | new_input_nr = %d log_input_nr = %d (old = %d) replay_min_pos1 = %lld replay_max_pos1 = %lld replay_min_pos2 = %lld replay_max_pos2 = %lld | total replay=%d callbacks=%d reads=%d writes=%d flushes=%d (%d%%) wb_clusters=%d writebacks=%d (%d%%) shortcut=%d (%d%%) mshadow=%d sshadow=%d rounds=%d restarts=%d delays=%d phase1=%d phase2=%d phase3=%d phase4=%d | current #mrefs = %d shadow_mem_used=%ld/%lld replay=%d mshadow=%d/%d sshadow=%d hash_count=%d pos_count=%d balance=%d/%d/%d/%d fly=%d phase1=%d+%d phase2=%d+%d phase3=%d+%d phase4=%d+%d\n",
brick->do_replay, brick->do_continuous_replay, brick->replay_code, brick->log_reads,
brick->replay_start_pos, brick->replay_end_pos,
brick->new_input_nr, brick->log_input_nr, brick->old_input_nr,
brick->inputs[TL_INPUT_LOG1]->replay_min_pos, brick->inputs[TL_INPUT_LOG1]->replay_max_pos,
brick->inputs[TL_INPUT_LOG2]->replay_min_pos, brick->inputs[TL_INPUT_LOG2]->replay_max_pos,
atomic_read(&brick->total_replay_count), atomic_read(&brick->total_cb_count), atomic_read(&brick->total_read_count), atomic_read(&brick->total_write_count), atomic_read(&brick->total_flush_count), atomic_read(&brick->total_write_count) ? atomic_read(&brick->total_flush_count) * 100 / atomic_read(&brick->total_write_count) : 0, atomic_read(&brick->total_writeback_cluster_count), atomic_read(&brick->total_writeback_count), atomic_read(&brick->total_writeback_cluster_count) ? atomic_read(&brick->total_writeback_count) * 100 / atomic_read(&brick->total_writeback_cluster_count) : 0, atomic_read(&brick->total_shortcut_count), atomic_read(&brick->total_writeback_count) ? atomic_read(&brick->total_shortcut_count) * 100 / atomic_read(&brick->total_writeback_count) : 0, atomic_read(&brick->total_mshadow_count), atomic_read(&brick->total_sshadow_count), atomic_read(&brick->total_round_count), atomic_read(&brick->total_restart_count), atomic_read(&brick->total_delay_count), atomic_read(&brick->q_phase1.q_total), atomic_read(&brick->q_phase2.q_total), atomic_read(&brick->q_phase3.q_total), atomic_read(&brick->q_phase4.q_total),
atomic_read(&brick->mref_object_layout.alloc_count),
atomic64_read(&brick->shadow_mem_used), brick_global_memlimit, atomic_read(&brick->replay_count), atomic_read(&brick->mshadow_count), brick->shadow_mem_limit, atomic_read(&brick->sshadow_count), atomic_read(&brick->hash_count), atomic_read(&brick->pos_count), atomic_read(&brick->sub_balance_count), atomic_read(&brick->inner_balance_count), atomic_read(&brick->outer_balance_count), atomic_read(&brick->wb_balance_count), atomic_read(&brick->fly_count), atomic_read(&brick->q_phase1.q_queued), atomic_read(&brick->q_phase1.q_flying), atomic_read(&brick->q_phase2.q_queued), atomic_read(&brick->q_phase2.q_flying), atomic_read(&brick->q_phase3.q_queued), atomic_read(&brick->q_phase3.q_flying), atomic_read(&brick->q_phase4.q_queued), atomic_read(&brick->q_phase4.q_flying));
return res;
}

View File

@ -191,7 +191,6 @@ struct trans_logger_input {
bool is_operating;
// private
struct generic_object_layout sub_layout;
struct log_status logst;
spinlock_t pos_lock;
struct list_head pos_list;

View File

@ -130,7 +130,7 @@ static int usebuf_ref_get(struct usebuf_output *output, struct mref_object *mref
sub_mref_a = mref_a->sub_mref_a;
if (!sub_mref_a) {
sub_mref = usebuf_alloc_mref(output->brick, &output->mref_object_layout);
sub_mref = usebuf_alloc_mref(output->brick);
if (unlikely(!sub_mref)) {
MARS_FAT("cannot get sub_mref\n");
return -ENOMEM;

View File

@ -21,7 +21,6 @@ struct usebuf_input {
struct usebuf_output {
MARS_OUTPUT(usebuf);
struct generic_object_layout mref_object_layout;
};
MARS_TYPES(usebuf);

View File

@ -864,6 +864,7 @@ int mars_free_brick(struct mars_brick *brick)
{
struct mars_global *global;
int i;
int count;
int status;
if (!brick) {
@ -873,7 +874,7 @@ int mars_free_brick(struct mars_brick *brick)
}
if (!brick->power.force_off || !brick->power.led_off) {
MARS_DBG("brick '%s' is not freeable\n", brick->brick_path);
MARS_WRN("brick '%s' is not freeable\n", brick->brick_path);
status = -ETXTBSY;
goto done;
}
@ -882,12 +883,17 @@ int mars_free_brick(struct mars_brick *brick)
for (i = 0; i < brick->type->max_outputs; i++) {
struct mars_output *output = brick->outputs[i];
if (output && output->nr_connected > 0) {
MARS_DBG("brick '%s' not freeable, output %i is used\n", brick->brick_path, i);
MARS_WRN("brick '%s' not freeable, output %i is used\n", brick->brick_path, i);
status = -EEXIST;
goto done;
}
}
count = atomic_read(&brick->mref_object_layout.alloc_count);
if (count > 0) {
MARS_ERR("MEMLEAK: brick '%s' has %d mrefs allocated (total = %d)\n", brick->brick_path, count, atomic_read(&brick->mref_object_layout.total_alloc_count));
}
MARS_DBG("===> freeing brick name = '%s' path = '%s'\n", brick->brick_name, brick->brick_path);
global = brick->global;
@ -906,18 +912,14 @@ int mars_free_brick(struct mars_brick *brick)
}
}
#ifndef MEMLEAK // TODO: check whether crash remains possible
MARS_DBG("deallocate name = '%s' path = '%s'\n", SAFE_STR(brick->brick_name), SAFE_STR(brick->brick_path));
brick_string_free(brick->brick_name);
brick_string_free(brick->brick_path);
#endif
status = generic_brick_exit_full((void*)brick);
if (status >= 0) {
#ifndef MEMLEAK // TODO: check whether crash remains possible
brick_mem_free(brick);
#endif
mars_trigger();
} else {
MARS_ERR("error freeing brick, status = %d\n", status);