mars/mars_buf.c

1216 lines
31 KiB
C
Raw Normal View History

2010-07-07 14:09:16 +00:00
// (c) 2010 Thomas Schoebel-Theuer / 1&1 Internet AG
2010-07-30 11:50:20 +00:00
// Buf brick
2010-07-07 14:09:16 +00:00
2010-07-30 05:46:22 +00:00
//#define BRICK_DEBUGGING
2010-07-23 11:55:18 +00:00
//#define MARS_DEBUGGING
2010-07-07 14:09:16 +00:00
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
2010-07-23 11:55:18 +00:00
#include <linux/delay.h>
2010-07-07 14:09:16 +00:00
#include "mars.h"
2010-08-09 16:57:56 +00:00
//#define USE_VMALLOC
2010-08-20 10:58:24 +00:00
//#define FAKE_IO
2010-08-09 16:57:56 +00:00
2010-07-07 14:09:16 +00:00
///////////////////////// own type definitions ////////////////////////
#include "mars_buf.h"
2010-08-07 07:59:34 +00:00
#define PRE_ALLOC 8
2010-07-07 14:09:16 +00:00
///////////////////////// own helper functions ////////////////////////
2010-08-26 17:12:30 +00:00
static inline int buf_hash_fn(loff_t base_index)
2010-07-07 14:09:16 +00:00
{
2010-08-07 15:02:16 +00:00
// simple and stupid
2010-08-26 17:12:30 +00:00
loff_t tmp;
2010-08-07 15:02:16 +00:00
tmp = base_index ^ (base_index / MARS_BUF_HASH_MAX);
tmp += tmp / 13;
tmp ^= tmp / (MARS_BUF_HASH_MAX * MARS_BUF_HASH_MAX);
return tmp % MARS_BUF_HASH_MAX;
2010-07-07 14:09:16 +00:00
}
2010-08-26 17:12:30 +00:00
static struct buf_head *hash_find_insert(struct buf_brick *brick, loff_t base_index, struct buf_head *new)
2010-07-07 14:09:16 +00:00
{
2010-08-06 16:14:32 +00:00
2010-08-08 20:51:20 +00:00
int hash = buf_hash_fn(base_index);
2010-08-09 16:57:56 +00:00
spinlock_t *lock = &brick->cache_anchors[hash].hash_lock;
struct list_head *start = &brick->cache_anchors[hash].hash_anchor;
2010-07-07 14:09:16 +00:00
struct list_head *tmp;
struct buf_head *res;
2010-08-07 15:02:16 +00:00
int count = 0;
2010-08-09 16:57:56 +00:00
unsigned long flags;
2010-08-07 15:02:16 +00:00
2010-08-09 16:57:56 +00:00
traced_lock(lock, flags);
for (tmp = start->next; tmp != start; tmp = tmp->next) {
2010-08-07 15:02:16 +00:00
#if 1
{
static int max = 0;
if (++count > max) {
max = count;
if (!(max % 10)) {
2010-08-26 17:12:30 +00:00
MARS_INF("hash maxlen=%d hash=%d base_index=%llu\n", max, hash, base_index);
2010-08-07 15:02:16 +00:00
}
}
}
#endif
2010-07-07 14:09:16 +00:00
res = container_of(tmp, struct buf_head, bf_hash_head);
2010-08-23 15:52:42 +00:00
if (res->bf_base_index == base_index) { // found
int old_bf_count = atomic_read(&res->bf_count);
2010-08-09 16:57:56 +00:00
CHECK_ATOMIC(&res->bf_count, 0);
atomic_inc(&res->bf_count);
traced_unlock(lock, flags);
2010-08-23 15:52:42 +00:00
if (true || old_bf_count <= 0) {
traced_lock(&brick->brick_lock, flags);
if (!list_empty(&res->bf_lru_head)) {
list_del_init(&res->bf_lru_head);
atomic_dec(&brick->lru_count);
}
traced_unlock(&brick->brick_lock, flags);
}
2010-08-09 16:57:56 +00:00
return res;
}
2010-07-07 14:09:16 +00:00
}
2010-08-09 16:57:56 +00:00
if (new) {
atomic_inc(&brick->hashed_count);
CHECK_HEAD_EMPTY(&new->bf_hash_head);
list_add(&new->bf_hash_head, start);
}
traced_unlock(lock, flags);
return NULL;
2010-07-07 14:09:16 +00:00
}
2010-08-07 15:02:16 +00:00
static inline void free_bf(struct buf_brick *brick, struct buf_head *bf)
2010-07-07 14:09:16 +00:00
{
2010-08-09 16:57:56 +00:00
atomic_dec(&brick->alloc_count);
2010-08-07 15:02:16 +00:00
MARS_INF("really freeing bf=%p\n", bf);
2010-08-23 05:06:06 +00:00
CHECK_HEAD_EMPTY(&bf->bf_lru_head);
CHECK_HEAD_EMPTY(&bf->bf_hash_head);
CHECK_HEAD_EMPTY(&bf->bf_io_pending_anchor);
CHECK_HEAD_EMPTY(&bf->bf_postpone_anchor);
2010-08-09 16:57:56 +00:00
#ifdef USE_VMALLOC
vfree(bf->bf_data);
#else
2010-07-07 14:09:16 +00:00
free_pages((unsigned long)bf->bf_data, brick->backing_order);
2010-08-09 16:57:56 +00:00
#endif
2010-07-07 14:09:16 +00:00
kfree(bf);
}
2010-08-07 07:59:34 +00:00
/* brick->brick_lock must be held
2010-07-07 14:09:16 +00:00
*/
2010-07-30 11:50:20 +00:00
static inline void __prune_cache(struct buf_brick *brick, int max_count, unsigned long *flags)
2010-07-07 14:09:16 +00:00
{
2010-08-05 15:54:48 +00:00
#if 0
2010-07-30 05:46:22 +00:00
return;
#endif
2010-08-09 16:57:56 +00:00
while (atomic_read(&brick->alloc_count) >= max_count) {
2010-07-07 14:09:16 +00:00
struct buf_head *bf;
if (list_empty(&brick->free_anchor))
break;
bf = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
2010-08-03 09:38:12 +00:00
list_del_init(&bf->bf_lru_head);
2010-07-07 14:09:16 +00:00
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, *flags);
2010-08-09 16:57:56 +00:00
2010-08-07 15:02:16 +00:00
free_bf(brick, bf);
2010-08-09 16:57:56 +00:00
2010-08-07 07:59:34 +00:00
traced_lock(&brick->brick_lock, *flags);
2010-07-07 14:09:16 +00:00
}
}
2010-08-23 15:52:42 +00:00
static inline bool __remove_from_hash(struct buf_brick *brick, struct buf_head *bf, bool force)
2010-08-06 11:29:06 +00:00
{
2010-08-09 16:57:56 +00:00
int hash;
spinlock_t *lock;
2010-08-23 05:06:06 +00:00
unsigned long flags;
2010-08-23 15:52:42 +00:00
bool ok = false;
2010-08-09 16:57:56 +00:00
hash = buf_hash_fn(bf->bf_base_index);
lock = &brick->cache_anchors[hash].hash_lock;
2010-08-23 05:06:06 +00:00
traced_lock(lock, flags);
2010-08-09 16:57:56 +00:00
2010-08-23 15:52:42 +00:00
/* Attention! In seldom cases, the hash lock can race against the
* brick lock upon hash_find_insert().
* Be careful!
*/
if (likely(force || !atomic_read(&bf->bf_count))) {
2010-08-09 16:57:56 +00:00
list_del_init(&bf->bf_hash_head);
atomic_dec(&brick->hashed_count);
2010-08-23 15:52:42 +00:00
ok = true;
2010-08-09 16:57:56 +00:00
}
2010-08-23 05:06:06 +00:00
traced_unlock(lock, flags);
2010-08-23 15:52:42 +00:00
return ok;
2010-08-23 05:06:06 +00:00
}
static inline void __lru_free_one(struct buf_brick *brick, unsigned long *flags)
{
struct buf_head *bf;
2010-08-23 15:52:42 +00:00
bool ok;
2010-08-23 05:06:06 +00:00
if (list_empty(&brick->lru_anchor))
return;
bf = container_of(brick->lru_anchor.prev, struct buf_head, bf_lru_head);
2010-08-23 15:52:42 +00:00
list_del_init(&bf->bf_lru_head);
atomic_dec(&brick->lru_count);
2010-08-23 05:06:06 +00:00
2010-08-23 15:52:42 +00:00
/* Attention! In seldom cases, the hash lock can race against the
* brick lock upon hash_find_insert().
2010-08-23 05:06:06 +00:00
* Be careful!
*/
2010-08-23 15:52:42 +00:00
if (unlikely(atomic_read(&bf->bf_count) > 0))
return;
2010-08-09 16:57:56 +00:00
2010-08-23 15:52:42 +00:00
traced_unlock(&brick->brick_lock, *flags);
ok = __remove_from_hash(brick, bf, false);
2010-08-09 16:57:56 +00:00
2010-08-23 15:52:42 +00:00
traced_lock(&brick->brick_lock, *flags);
2010-08-09 16:57:56 +00:00
2010-08-23 15:52:42 +00:00
if (likely(ok)) {
list_add(&bf->bf_lru_head, &brick->free_anchor);
}
2010-08-06 11:29:06 +00:00
}
2010-08-09 16:57:56 +00:00
static inline void __lru_free(struct buf_brick *brick, unsigned long *flags)
2010-07-07 14:09:16 +00:00
{
2010-08-09 16:57:56 +00:00
while (atomic_read(&brick->hashed_count) >= brick->max_count) {
if (list_empty(&brick->lru_anchor))
2010-07-30 11:50:20 +00:00
break;
2010-08-09 16:57:56 +00:00
__lru_free_one(brick, flags);
2010-07-07 14:09:16 +00:00
}
}
2010-07-30 11:50:20 +00:00
2010-07-07 14:09:16 +00:00
static inline int get_info(struct buf_brick *brick)
{
struct buf_input *input = brick->inputs[0];
int status = GENERIC_INPUT_CALL(input, mars_get_info, &brick->base_info);
return status;
}
2010-07-30 05:46:22 +00:00
/* Convert from arbitrary/odd kernel address/length to struct page,
2010-07-07 14:09:16 +00:00
* create bio from it, round up/down to full sectors.
2010-07-30 05:46:22 +00:00
* return the length (may be smaller or even larger than requested)
2010-07-07 14:09:16 +00:00
*/
2010-07-30 05:46:22 +00:00
static int make_bio(struct buf_brick *brick, struct bio **_bio, void *data, loff_t pos, int len)
2010-07-07 14:09:16 +00:00
{
2010-07-30 05:46:22 +00:00
unsigned long long sector;
2010-07-07 14:09:16 +00:00
int sector_offset;
2010-08-20 10:58:24 +00:00
int data_offset;
2010-07-07 14:09:16 +00:00
int page_offset;
2010-07-30 05:46:22 +00:00
int page_len;
2010-07-07 14:09:16 +00:00
int bvec_count;
2010-08-20 10:58:24 +00:00
int ilen = len;
2010-07-07 14:09:16 +00:00
int status;
int i;
struct page *page;
struct bio *bio = NULL;
struct block_device *bdev;
2010-08-27 07:20:26 +00:00
status = -EINVAL;
CHECK_PTR(brick, out);
2010-08-27 13:17:04 +00:00
bdev = brick->bdev;
if (unlikely(!bdev)) {
2010-07-07 14:09:16 +00:00
struct request_queue *q;
status = get_info(brick);
if (status < 0)
goto out;
2010-08-27 07:20:26 +00:00
status = -EINVAL;
CHECK_PTR(brick->base_info.backing_file, out);
CHECK_PTR(brick->base_info.backing_file->f_mapping, out);
CHECK_PTR(brick->base_info.backing_file->f_mapping->host, out);
CHECK_PTR(brick->base_info.backing_file->f_mapping->host->i_sb, out);
2010-07-23 11:55:18 +00:00
bdev = brick->base_info.backing_file->f_mapping->host->i_sb->s_bdev;
2010-08-27 07:20:26 +00:00
if (!bdev && S_ISBLK(brick->base_info.backing_file->f_mapping->host->i_mode)) {
bdev = brick->base_info.backing_file->f_mapping->host->i_bdev;
}
CHECK_PTR(bdev, out);
brick->bdev = bdev;
2010-07-07 14:09:16 +00:00
q = bdev_get_queue(bdev);
2010-08-27 07:20:26 +00:00
CHECK_PTR(q, out);
2010-07-07 14:09:16 +00:00
brick->bvec_max = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
}
2010-08-20 10:58:24 +00:00
if (unlikely(ilen <= 0)) {
MARS_ERR("bad bio len %d\n", ilen);
2010-07-30 05:46:22 +00:00
status = -EINVAL;
goto out;
}
sector = pos >> 9; // TODO: make dynamic
2010-07-07 14:09:16 +00:00
sector_offset = pos & ((1 << 9) - 1); // TODO: make dynamic
2010-08-20 10:58:24 +00:00
data_offset = ((unsigned long)data) & ((1 << 9) - 1); // TODO: make dynamic
if (unlikely(sector_offset != data_offset)) {
MARS_ERR("bad alignment: offset %d != %d\n", sector_offset, data_offset);
}
2010-07-30 05:46:22 +00:00
2010-07-07 14:09:16 +00:00
// round down to start of first sector
data -= sector_offset;
2010-08-20 10:58:24 +00:00
ilen += sector_offset;
2010-07-07 14:09:16 +00:00
pos -= sector_offset;
2010-07-30 05:46:22 +00:00
// round up to full sector
2010-08-20 10:58:24 +00:00
ilen = (((ilen - 1) >> 9) + 1) << 9; // TODO: make dynamic
2010-07-30 05:46:22 +00:00
// map onto pages. TODO: allow higher-order pages (performance!)
2010-07-07 14:09:16 +00:00
page_offset = pos & (PAGE_SIZE - 1);
2010-08-20 10:58:24 +00:00
page_len = ilen + page_offset;
2010-07-30 05:46:22 +00:00
bvec_count = (page_len - 1) / PAGE_SIZE + 1;
2010-07-07 14:09:16 +00:00
if (bvec_count > brick->bvec_max)
bvec_count = brick->bvec_max;
2010-08-04 17:32:04 +00:00
bio = bio_alloc(GFP_MARS, bvec_count);
2010-07-07 14:09:16 +00:00
status = -ENOMEM;
if (!bio)
goto out;
status = 0;
2010-08-20 10:58:24 +00:00
for (i = 0; i < bvec_count && ilen > 0; i++) {
2010-07-07 14:09:16 +00:00
int myrest = PAGE_SIZE - page_offset;
2010-08-20 10:58:24 +00:00
int mylen = ilen;
2010-07-30 05:46:22 +00:00
2010-07-07 14:09:16 +00:00
if (mylen > myrest)
mylen = myrest;
page = virt_to_page(data);
2010-08-27 07:20:26 +00:00
if (!page)
goto out;
2010-07-07 14:09:16 +00:00
bio->bi_io_vec[i].bv_page = page;
2010-07-30 05:46:22 +00:00
bio->bi_io_vec[i].bv_len = mylen;
2010-07-07 14:09:16 +00:00
bio->bi_io_vec[i].bv_offset = page_offset;
data += mylen;
2010-08-20 10:58:24 +00:00
ilen -= mylen;
2010-07-07 14:09:16 +00:00
status += mylen;
page_offset = 0;
2010-08-20 10:58:24 +00:00
//MARS_INF("page_offset=%d mylen=%d (new len=%d, new status=%d)\n", page_offset, mylen, ilen, status);
2010-07-07 14:09:16 +00:00
}
2010-08-20 10:58:24 +00:00
if (unlikely(ilen != 0)) {
2010-07-30 05:46:22 +00:00
bio_put(bio);
bio = NULL;
2010-08-20 10:58:24 +00:00
MARS_ERR("computation of bvec_count %d was wrong, diff=%d\n", bvec_count, ilen);
2010-07-30 05:46:22 +00:00
status = -EIO;
goto out;
}
2010-07-07 14:09:16 +00:00
bio->bi_vcnt = i;
bio->bi_idx = 0;
2010-07-30 05:46:22 +00:00
bio->bi_size = status;
2010-07-07 14:09:16 +00:00
bio->bi_sector = sector;
bio->bi_bdev = bdev;
2010-07-23 11:55:18 +00:00
bio->bi_private = NULL; // must be filled in later
bio->bi_end_io = NULL; // must be filled in later
2010-07-30 05:46:22 +00:00
bio->bi_rw = 0; // must be filled in later
2010-08-20 10:58:24 +00:00
// ignore rounding on return
if (status > len)
status = len;
2010-07-07 14:09:16 +00:00
out:
*_bio = bio;
2010-08-20 10:58:24 +00:00
if (status < 0)
MARS_ERR("error %d\n", status);
2010-07-07 14:09:16 +00:00
return status;
}
2010-08-06 11:29:06 +00:00
static inline struct buf_head *_alloc_bf(struct buf_brick *brick)
{
struct buf_head *bf = kzalloc(sizeof(struct buf_head), GFP_MARS);
if (!bf)
goto done;
2010-08-09 16:57:56 +00:00
#ifdef USE_VMALLOC
bf->bf_data = vmalloc(brick->backing_size);
#else
2010-08-06 11:29:06 +00:00
bf->bf_data = (void*)__get_free_pages(GFP_MARS, brick->backing_order);
2010-08-09 16:57:56 +00:00
#endif
2010-08-06 11:29:06 +00:00
if (unlikely(!bf->bf_data)) {
kfree(bf);
bf = NULL;
}
2010-08-07 15:02:16 +00:00
spin_lock_init(&bf->bf_lock);
bf->bf_brick = brick;
2010-08-09 16:57:56 +00:00
atomic_inc(&brick->alloc_count);
2010-08-07 15:02:16 +00:00
2010-08-06 11:29:06 +00:00
done:
return bf;
}
static void __pre_alloc_bf(struct buf_brick *brick, int max)
{
while (max-- > 0) {
struct buf_head *bf = _alloc_bf(brick);
unsigned long flags;
if (unlikely(!bf))
break;
2010-08-07 07:59:34 +00:00
traced_lock(&brick->brick_lock, flags);
2010-08-06 11:29:06 +00:00
list_add(&bf->bf_lru_head, &brick->free_anchor);
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-08-06 11:29:06 +00:00
}
}
2010-07-07 14:09:16 +00:00
////////////////// own brick / input / output operations //////////////////
static int buf_get_info(struct buf_output *output, struct mars_info *info)
{
struct buf_input *input = output->brick->inputs[0];
return GENERIC_INPUT_CALL(input, mars_get_info, info);
}
2010-08-05 15:54:48 +00:00
static int buf_ref_get(struct buf_output *output, struct mars_ref_object *mref)
2010-07-07 14:09:16 +00:00
{
struct buf_brick *brick = output->brick;
2010-08-05 15:54:48 +00:00
struct buf_mars_ref_aspect *mref_a;
2010-07-07 14:09:16 +00:00
struct buf_head *bf;
2010-08-09 16:57:56 +00:00
struct buf_head *new = NULL;
2010-07-07 14:09:16 +00:00
loff_t base_pos;
2010-07-30 05:46:22 +00:00
int base_offset;
2010-08-02 16:31:10 +00:00
int max_len;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-07-07 14:09:16 +00:00
int status = -EILSEQ;
2010-08-03 16:03:32 +00:00
might_sleep();
2010-08-02 16:31:10 +00:00
2010-08-07 07:59:34 +00:00
if (unlikely(mref->orig_bio)) {
2010-08-05 15:54:48 +00:00
MARS_ERR("illegal: mref has a bio assigend\n");
2010-08-02 16:31:10 +00:00
}
2010-07-07 14:09:16 +00:00
2010-08-07 07:59:34 +00:00
#ifdef PRE_ALLOC
2010-08-09 16:57:56 +00:00
if (unlikely(atomic_read(&brick->alloc_count) < brick->max_count)) {
2010-08-06 11:29:06 +00:00
// grab all memory in one go => avoid memory fragmentation
2010-08-09 16:57:56 +00:00
__pre_alloc_bf(brick, brick->max_count + PRE_ALLOC - atomic_read(&brick->alloc_count));
2010-08-06 11:29:06 +00:00
}
2010-08-07 07:59:34 +00:00
#endif
2010-08-05 15:54:48 +00:00
/* Grab reference.
*/
2010-08-08 09:03:42 +00:00
_CHECK_ATOMIC(&mref->ref_count, !=, 0);
2010-08-05 15:54:48 +00:00
atomic_inc(&mref->ref_count);
mref_a = buf_mars_ref_get_aspect(output, mref);
if (unlikely(!mref_a))
2010-08-03 16:03:32 +00:00
goto done;
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
base_pos = mref->ref_pos & ~(loff_t)(brick->backing_size - 1);
base_offset = (mref->ref_pos - base_pos);
2010-08-02 16:31:10 +00:00
if (unlikely(base_offset < 0 || base_offset >= brick->backing_size)) {
MARS_ERR("bad base_offset %d\n", base_offset);
}
max_len = brick->backing_size - base_offset;
2010-08-05 15:54:48 +00:00
if (mref->ref_len > max_len)
mref->ref_len = max_len;
2010-07-07 14:09:16 +00:00
2010-08-06 11:29:06 +00:00
again:
2010-08-26 17:12:30 +00:00
bf = hash_find_insert(brick, base_pos >> brick->backing_order, new);
2010-08-05 15:54:48 +00:00
if (bf) {
2010-08-26 17:12:30 +00:00
#if 1
loff_t end_pos = bf->bf_pos + brick->backing_size;
if (mref->ref_pos < bf->bf_pos || mref->ref_pos >= end_pos) {
MARS_ERR("hash value corruption. %lld not in (%lld ... %lld)\n", mref->ref_pos, bf->bf_pos, end_pos);
}
#endif
2010-08-10 17:39:30 +00:00
atomic_inc(&brick->hit_count);
if (unlikely(new)) {
atomic_inc(&brick->nr_collisions);
2010-08-09 16:57:56 +00:00
MARS_DBG("race detected: alias elem appeared in the meantime\n");
traced_lock(&brick->brick_lock, flags);
2010-08-07 15:02:16 +00:00
2010-08-23 15:52:42 +00:00
list_del(&new->bf_lru_head);
2010-08-09 16:57:56 +00:00
list_add(&new->bf_lru_head, &brick->free_anchor);
2010-08-07 15:02:16 +00:00
2010-08-09 16:57:56 +00:00
traced_unlock(&brick->brick_lock, flags);
new = NULL;
}
} else if (new) {
2010-08-10 17:39:30 +00:00
atomic_inc(&brick->miss_count);
2010-08-09 16:57:56 +00:00
MARS_DBG("new elem added\n");
bf = new;
new = NULL;
2010-08-05 15:54:48 +00:00
} else {
2010-07-23 11:55:18 +00:00
MARS_DBG("buf_get() hash nothing found\n");
2010-08-09 16:57:56 +00:00
traced_lock(&brick->brick_lock, flags);
2010-08-06 11:29:06 +00:00
if (list_empty(&brick->free_anchor)) {
2010-08-09 16:57:56 +00:00
__lru_free_one(brick, &flags);
2010-08-07 07:59:34 +00:00
if (unlikely(list_empty(&brick->free_anchor))) {
2010-08-09 16:57:56 +00:00
MARS_INF("alloc new buf_head %d\n", atomic_read(&brick->alloc_count));
2010-07-07 14:09:16 +00:00
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-07-07 14:09:16 +00:00
2010-08-07 07:59:34 +00:00
status = -ENOMEM;
bf = _alloc_bf(brick);
if (!bf)
goto done;
traced_lock(&brick->brick_lock, flags);
2010-08-06 11:29:06 +00:00
2010-08-07 07:59:34 +00:00
list_add(&bf->bf_lru_head, &brick->free_anchor);
2010-08-09 16:57:56 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-08-07 15:02:16 +00:00
/* during the open lock, somebody might have
* raced against us at the same base_pos...
2010-08-07 07:59:34 +00:00
*/
goto again;
}
2010-07-07 14:09:16 +00:00
}
2010-08-06 11:29:06 +00:00
2010-08-09 16:57:56 +00:00
new = container_of(brick->free_anchor.next, struct buf_head, bf_lru_head);
list_del_init(&new->bf_lru_head);
2010-08-07 15:02:16 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-08-09 16:57:56 +00:00
new->bf_pos = base_pos;
2010-08-26 17:12:30 +00:00
new->bf_base_index = base_pos >> brick->backing_order;
2010-08-09 16:57:56 +00:00
new->bf_flags = 0;
2010-08-20 10:58:24 +00:00
/* Important optimization: treat whole buffers as uptodate
* upon first write.
*/
if (mref->ref_may_write != READ &&
((!base_offset && mref->ref_len == brick->backing_size) ||
(mref->ref_pos >= brick->base_info.current_size))) {
new->bf_flags |= MARS_REF_UPTODATE;
}
2010-08-09 16:57:56 +00:00
atomic_set(&new->bf_count, 1);
new->bf_bio_status = 0;
atomic_set(&new->bf_bio_count, 0);
//INIT_LIST_HEAD(&new->bf_mref_anchor);
//INIT_LIST_HEAD(&new->bf_lru_head);
INIT_LIST_HEAD(&new->bf_hash_head);
INIT_LIST_HEAD(&new->bf_io_pending_anchor);
INIT_LIST_HEAD(&new->bf_postpone_anchor);
/* Check for races against us...
*/
goto again;
2010-07-07 14:09:16 +00:00
}
2010-08-07 15:02:16 +00:00
mref_a->rfa_bf = bf;
2010-08-07 07:59:34 +00:00
2010-08-03 09:38:12 +00:00
MARS_DBG("bf=%p initial bf_count=%d\n", bf, atomic_read(&bf->bf_count));
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
mref->ref_flags = bf->bf_flags;
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
mref->ref_data = bf->bf_data + base_offset;
2010-07-07 14:09:16 +00:00
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&mref->ref_count, 1);
2010-08-05 15:54:48 +00:00
return mref->ref_len;
2010-07-07 14:09:16 +00:00
2010-08-03 16:03:32 +00:00
done:
2010-07-07 14:09:16 +00:00
return status;
}
2010-08-04 17:32:04 +00:00
static void __bf_put(struct buf_head *bf)
2010-07-07 14:09:16 +00:00
{
2010-07-23 11:55:18 +00:00
struct buf_brick *brick;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-07-07 14:09:16 +00:00
2010-07-23 11:55:18 +00:00
brick = bf->bf_brick;
2010-08-07 07:59:34 +00:00
traced_lock(&brick->brick_lock, flags);
2010-07-23 11:55:18 +00:00
2010-08-23 15:52:42 +00:00
CHECK_ATOMIC(&bf->bf_count, 1);
2010-08-06 11:29:06 +00:00
if (!atomic_dec_and_test(&bf->bf_count)) {
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-08-06 11:29:06 +00:00
return;
}
2010-08-23 15:52:42 +00:00
2010-08-06 11:29:06 +00:00
MARS_DBG("ZERO_COUNT\n");
2010-08-23 15:52:42 +00:00
if (unlikely(!list_empty(&bf->bf_io_pending_anchor))) {
MARS_ERR("bf_io_pending_anchor is not empty!\n");
}
if (unlikely(!list_empty(&bf->bf_postpone_anchor))) {
MARS_ERR("bf_postpone_anchor is not empty!\n");
}
CHECK_HEAD_EMPTY(&bf->bf_lru_head);
atomic_inc(&brick->lru_count);
if (likely(bf->bf_flags & MARS_REF_UPTODATE)) {
list_add(&bf->bf_lru_head, &brick->lru_anchor);
} else {
list_add_tail(&bf->bf_lru_head, &brick->lru_anchor);
}
2010-07-23 11:55:18 +00:00
2010-08-23 15:52:42 +00:00
// lru freeing (this is completely independent from bf)
2010-08-09 16:57:56 +00:00
__lru_free(brick, &flags);
2010-08-05 15:54:48 +00:00
__prune_cache(brick, brick->max_count * 2, &flags);
2010-07-23 11:55:18 +00:00
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-07-07 14:09:16 +00:00
}
2010-08-05 15:54:48 +00:00
static void _buf_ref_put(struct buf_mars_ref_aspect *mref_a)
2010-07-07 14:09:16 +00:00
{
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref = mref_a->object;
2010-07-07 14:09:16 +00:00
struct buf_head *bf;
2010-08-02 16:31:10 +00:00
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&mref->ref_count, 1);
2010-08-05 15:54:48 +00:00
if (!atomic_dec_and_test(&mref->ref_count))
2010-08-03 09:38:12 +00:00
return;
2010-07-07 14:09:16 +00:00
2010-08-07 15:02:16 +00:00
bf = mref_a->rfa_bf;
2010-08-05 15:54:48 +00:00
if (bf) {
MARS_DBG("buf_ref_put() mref=%p mref_a=%p bf=%p\n", mref, mref_a, bf);
__bf_put(bf);
}
2010-08-02 16:31:10 +00:00
2010-08-05 15:54:48 +00:00
buf_free_mars_ref(mref);
2010-07-23 11:55:18 +00:00
}
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
static void buf_ref_put(struct buf_output *output, struct mars_ref_object *mref)
2010-07-23 11:55:18 +00:00
{
2010-08-05 15:54:48 +00:00
struct buf_mars_ref_aspect *mref_a;
mref_a = buf_mars_ref_get_aspect(output, mref);
if (unlikely(!mref_a)) {
2010-08-04 17:32:04 +00:00
MARS_FAT("cannot get aspect\n");
return;
}
2010-08-05 15:54:48 +00:00
_buf_ref_put(mref_a);
2010-08-04 17:32:04 +00:00
}
2010-08-08 14:02:54 +00:00
static void _buf_endio(struct generic_callback *cb)
2010-08-04 17:32:04 +00:00
{
2010-08-08 14:02:54 +00:00
struct buf_mars_ref_aspect *mref_a = cb->cb_private;
struct mars_ref_object *mref = mref_a->object;
int error = cb->cb_error;
2010-08-05 15:54:48 +00:00
struct bio *bio = mref->orig_bio;
2010-08-08 14:02:54 +00:00
2010-08-05 15:54:48 +00:00
MARS_DBG("_buf_endio() mref=%p bio=%p\n", mref, bio);
2010-07-23 11:55:18 +00:00
if (bio) {
2010-07-30 05:46:22 +00:00
if (error < 0) {
MARS_ERR("_buf_endio() error=%d bi_size=%d\n", error, bio->bi_size);
2010-07-07 14:09:16 +00:00
}
2010-08-04 17:32:04 +00:00
if (error > 0)
error = 0;
2010-07-30 05:46:22 +00:00
bio_endio(bio, error);
bio_put(bio);
2010-08-04 17:32:04 +00:00
} else {
//...
}
2010-07-23 11:55:18 +00:00
}
static void _buf_bio_callback(struct bio *bio, int code);
2010-07-07 14:09:16 +00:00
2010-07-30 05:46:22 +00:00
static int _buf_make_bios(struct buf_brick *brick, struct buf_head *bf, void *start_data, loff_t start_pos, int start_len, int rw)
2010-07-23 11:55:18 +00:00
{
2010-08-02 16:31:10 +00:00
struct buf_input *input;
LIST_HEAD(tmp);
int status = EINVAL;
int iters = 0;
2010-07-30 05:46:22 +00:00
#if 1
loff_t bf_end = bf->bf_pos + brick->backing_size;
loff_t end_pos;
if (start_pos < bf->bf_pos || start_pos >= bf_end) {
MARS_ERR("bad start_pos %llu (%llu ... %llu)\n", start_pos, bf->bf_pos, bf_end);
2010-08-02 16:31:10 +00:00
goto done;
2010-07-30 05:46:22 +00:00
}
end_pos = start_pos + start_len;
if (end_pos <= bf->bf_pos || end_pos > bf_end) {
MARS_ERR("bad end_pos %llu (%llu ... %llu)\n", end_pos, bf->bf_pos, bf_end);
2010-08-02 16:31:10 +00:00
goto done;
2010-07-30 05:46:22 +00:00
}
if (!start_data) {
MARS_ERR("bad start_data\n");
2010-08-02 16:31:10 +00:00
goto done;
2010-07-30 05:46:22 +00:00
}
if (start_len <= 0) {
MARS_ERR("bad start_len %d\n", start_len);
2010-08-02 16:31:10 +00:00
goto done;
2010-07-30 05:46:22 +00:00
}
#endif
2010-08-02 16:31:10 +00:00
status = -ENOMEM;
2010-07-23 11:55:18 +00:00
while (start_len > 0) {
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref;
struct buf_mars_ref_aspect *mref_a;
2010-07-07 14:09:16 +00:00
struct bio *bio = NULL;
2010-07-23 11:55:18 +00:00
int len;
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
mref = buf_alloc_mars_ref(brick->outputs[0], &brick->mref_object_layout);
if (unlikely(!mref))
2010-08-02 16:31:10 +00:00
break;
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
mref_a = buf_mars_ref_get_aspect(brick->outputs[0], mref);
if (unlikely(!mref_a)) {
buf_free_mars_ref(mref);
2010-08-02 16:31:10 +00:00
break;
}
2010-08-05 15:54:48 +00:00
list_add(&mref_a->tmp_head, &tmp);
2010-08-07 15:02:16 +00:00
mref_a->rfa_bf = bf;
2010-08-08 14:02:54 +00:00
mref_a->cb.cb_fn = _buf_endio;
mref_a->cb.cb_private = mref_a;
mref_a->cb.cb_error = 0;
mref_a->cb.cb_prev = NULL;
2010-07-23 11:55:18 +00:00
2010-07-30 05:46:22 +00:00
len = make_bio(brick, &bio, start_data, start_pos, start_len);
2010-08-20 10:58:24 +00:00
if (unlikely(len < 0)) {
status = len;
break;
}
if (unlikely(len == 0 || !bio)) {
status = -EIO;
//buf_free_mars_ref(mref);
2010-08-02 16:31:10 +00:00
break;
}
2010-07-07 14:09:16 +00:00
2010-08-05 15:54:48 +00:00
bio->bi_private = mref_a;
2010-07-23 11:55:18 +00:00
bio->bi_end_io = _buf_bio_callback;
2010-07-30 05:46:22 +00:00
bio->bi_rw = rw;
2010-08-08 14:02:54 +00:00
mref->ref_cb = &mref_a->cb;
2010-08-04 17:32:04 +00:00
2010-08-05 15:54:48 +00:00
mars_ref_attach_bio(mref, bio);
2010-07-23 11:55:18 +00:00
2010-08-02 16:31:10 +00:00
start_data += len;
start_pos += len;
start_len -= len;
iters++;
}
2010-08-20 10:58:24 +00:00
if (likely(!start_len))
2010-08-02 16:31:10 +00:00
status = 0;
2010-07-23 11:55:18 +00:00
#if 1
2010-08-20 10:58:24 +00:00
else {
MARS_ERR("start_len %d != 0 (error %d)\n", start_len, status);
}
2010-08-02 16:31:10 +00:00
if (iters != 1) {
MARS_INF("start_pos=%lld start_len=%d iters=%d, status=%d\n", start_pos, start_len, iters, status);
}
iters = 0;
#endif
input = brick->inputs[0];
while (!list_empty(&tmp)) {
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref;
struct buf_mars_ref_aspect *mref_a;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb;
2010-08-05 15:54:48 +00:00
mref_a = container_of(tmp.next, struct buf_mars_ref_aspect, tmp_head);
mref = mref_a->object;
list_del_init(&mref_a->tmp_head);
2010-08-02 16:31:10 +00:00
iters++;
2010-08-08 14:02:54 +00:00
cb = mref->ref_cb;
2010-08-04 17:32:04 +00:00
if (status < 0) { // clean up
2010-08-20 10:58:24 +00:00
MARS_ERR("reporting error %d\n", status);
2010-08-08 14:02:54 +00:00
cb->cb_error = status;
cb->cb_fn(cb);
2010-08-04 17:32:04 +00:00
#if 0
2010-08-05 15:54:48 +00:00
if (mref->orig_bio)
bio_put(mref->orig_bio);
2010-08-04 17:32:04 +00:00
#endif
2010-08-05 15:54:48 +00:00
buf_free_mars_ref(mref);
2010-08-02 16:31:10 +00:00
continue;
}
2010-08-03 16:03:32 +00:00
/* Remember the number of bios we are submitting.
*/
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&bf->bf_bio_count, 0);
2010-08-02 16:31:10 +00:00
atomic_inc(&bf->bf_bio_count);
2010-08-05 15:54:48 +00:00
MARS_DBG("starting buf IO mref=%p bio=%p bf=%p bf_count=%d bf_bio_count=%d\n", mref, mref->orig_bio, bf, atomic_read(&bf->bf_count), atomic_read(&bf->bf_bio_count));
2010-08-20 10:58:24 +00:00
#ifndef FAKE_IO
2010-08-05 15:54:48 +00:00
GENERIC_INPUT_CALL(input, mars_ref_io, mref, rw);
2010-07-23 11:55:18 +00:00
#else
// fake IO for testing
2010-08-08 14:02:54 +00:00
cb->cb_error = status;
cb->cb_fn(cb);
2010-08-04 17:32:04 +00:00
#if 0
2010-08-05 15:54:48 +00:00
if (mref->orig_bio)
bio_put(mref->orig_bio);
2010-08-04 17:32:04 +00:00
#endif
2010-08-08 14:02:54 +00:00
buf_free_mars_ref(mref);
2010-07-23 11:55:18 +00:00
#endif
2010-07-07 14:09:16 +00:00
}
2010-08-02 16:31:10 +00:00
#if 1
if (iters != 1) {
MARS_INF("start_pos=%lld start_len=%d iters=%d, status=%d\n", start_pos, start_len, iters, status);
}
iters = 0;
#endif
done:
return status;
2010-07-07 14:09:16 +00:00
}
2010-08-04 17:32:04 +00:00
/* This is called from the bio layer.
*/
2010-07-23 11:55:18 +00:00
static void _buf_bio_callback(struct bio *bio, int code)
{
2010-08-05 15:54:48 +00:00
struct buf_mars_ref_aspect *mref_a;
2010-07-23 11:55:18 +00:00
struct buf_head *bf;
struct buf_brick *brick;
void *start_data = NULL;
loff_t start_pos = 0;
int start_len = 0;
int old_flags;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-08-02 16:31:10 +00:00
LIST_HEAD(tmp);
2010-08-03 16:03:32 +00:00
#if 1
int count = 0;
#endif
2010-07-23 11:55:18 +00:00
2010-08-05 15:54:48 +00:00
mref_a = bio->bi_private;
2010-08-07 15:02:16 +00:00
bf = mref_a->rfa_bf;
2010-07-23 11:55:18 +00:00
2010-08-05 15:54:48 +00:00
MARS_DBG("_buf_bio_callback() mref=%p bio=%p bf=%p bf_count=%d bf_bio_count=%d code=%d\n", mref_a->object, bio, bf, atomic_read(&bf->bf_count), atomic_read(&bf->bf_bio_count), code);
2010-07-23 11:55:18 +00:00
2010-08-04 17:32:04 +00:00
if (unlikely(code < 0)) {
2010-07-30 05:46:22 +00:00
MARS_ERR("BIO ERROR %d (old=%d)\n", code, bf->bf_bio_status);
2010-07-23 11:55:18 +00:00
// this can race, but we don't worry about the exact error code
bf->bf_bio_status = code;
}
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&bf->bf_bio_count, 1);
2010-07-23 11:55:18 +00:00
if (!atomic_dec_and_test(&bf->bf_bio_count))
return;
2010-08-05 15:54:48 +00:00
MARS_DBG("_buf_bio_callback() ZERO_COUNT mref=%p bio=%p bf=%p code=%d\n", mref_a->object, bio, bf, code);
2010-07-23 11:55:18 +00:00
brick = bf->bf_brick;
2010-08-03 09:38:12 +00:00
// get an extra reference, to avoid freeing bf underneath during callbacks
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&bf->bf_count, 1);
2010-08-03 09:38:12 +00:00
atomic_inc(&bf->bf_count);
2010-08-07 15:02:16 +00:00
traced_lock(&bf->bf_lock, flags);
2010-07-23 11:55:18 +00:00
2010-08-02 16:31:10 +00:00
// update flags. this must be done before the callbacks.
old_flags = bf->bf_flags;
2010-08-05 15:54:48 +00:00
if (!bf->bf_bio_status && (old_flags & MARS_REF_READING)) {
bf->bf_flags |= MARS_REF_UPTODATE;
2010-08-02 16:31:10 +00:00
}
// clear the flags, callbacks must not see them. may be re-enabled later.
2010-08-05 15:54:48 +00:00
bf->bf_flags &= ~(MARS_REF_READING | MARS_REF_WRITING);
2010-08-02 16:31:10 +00:00
2010-08-03 09:38:12 +00:00
/* Remember current version of pending list.
* This is necessary because later the callbacks might
2010-08-02 16:31:10 +00:00
* change it underneath.
*/
if (!list_empty(&bf->bf_io_pending_anchor)) {
2010-08-03 09:38:12 +00:00
struct list_head *next = bf->bf_io_pending_anchor.next;
list_del_init(&bf->bf_io_pending_anchor);
list_add_tail(&tmp, next);
2010-07-23 11:55:18 +00:00
}
2010-08-03 09:38:12 +00:00
/* Move pending jobs to work.
* This is in essence an automatic restart mechanism.
* do this before the callbacks, because they may start
* new IOs. If not done in the right order, this could violate
* IO ordering semantics.
2010-07-23 11:55:18 +00:00
*/
2010-08-07 15:02:16 +00:00
while (!list_empty(&bf->bf_postpone_anchor)) {
struct buf_mars_ref_aspect *mref_a = container_of(bf->bf_postpone_anchor.next, struct buf_mars_ref_aspect, rfa_pending_head);
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref = mref_a->object;
2010-08-07 15:02:16 +00:00
if (mref_a->rfa_bf != bf) {
MARS_ERR("bad pointers %p != %p\n", mref_a->rfa_bf, bf);
2010-07-30 05:46:22 +00:00
}
2010-08-03 16:03:32 +00:00
#if 1
2010-08-06 11:29:06 +00:00
if (!(++count % 1000)) {
2010-08-03 16:03:32 +00:00
MARS_ERR("endless loop 1\n");
}
#endif
2010-08-07 15:02:16 +00:00
list_del_init(&mref_a->rfa_pending_head);
list_add_tail(&mref_a->rfa_pending_head, &bf->bf_io_pending_anchor);
2010-08-03 09:38:12 +00:00
2010-07-23 11:55:18 +00:00
// re-enable flags
2010-08-05 15:54:48 +00:00
bf->bf_flags |= MARS_REF_WRITING;
2010-07-23 11:55:18 +00:00
bf->bf_bio_status = 0;
if (!start_len) {
2010-08-02 16:31:10 +00:00
// first time: only flush the affected area
2010-08-05 15:54:48 +00:00
start_data = mref->ref_data;
start_pos = mref->ref_pos;
start_len = mref->ref_len;
} else if (start_data != mref->ref_data ||
start_pos != mref->ref_pos ||
start_len != mref->ref_len) {
2010-08-27 13:17:04 +00:00
// another time: flush larger parts
loff_t start_diff = mref->ref_pos - start_pos;
loff_t end_diff;
if (start_diff < 0) {
start_data += start_diff;
start_pos += start_diff;
start_len -= start_diff;
}
end_diff = (mref->ref_pos + mref->ref_len) - (start_pos + start_len);
if (end_diff > 0) {
start_len += end_diff;
}
2010-07-23 11:55:18 +00:00
}
}
2010-08-07 15:02:16 +00:00
traced_unlock(&bf->bf_lock, flags);
2010-07-23 11:55:18 +00:00
2010-08-03 09:38:12 +00:00
/* Signal success by calling all callbacks.
* Thanks to the tmp list, we can do this outside the spinlock.
*/
2010-08-06 11:29:06 +00:00
count = 0;
2010-08-03 09:38:12 +00:00
while (!list_empty(&tmp)) {
2010-08-07 15:02:16 +00:00
struct buf_mars_ref_aspect *mref_a = container_of(tmp.next, struct buf_mars_ref_aspect, rfa_pending_head);
2010-08-05 15:54:48 +00:00
struct mars_ref_object *mref = mref_a->object;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb = mref->ref_cb;
2010-08-03 09:38:12 +00:00
2010-08-07 15:02:16 +00:00
if (mref_a->rfa_bf != bf) {
MARS_ERR("bad pointers %p != %p\n", mref_a->rfa_bf, bf);
2010-08-03 09:38:12 +00:00
}
2010-08-03 16:03:32 +00:00
#if 1
2010-08-06 11:29:06 +00:00
if (!(++count % 1000)) {
2010-08-03 16:03:32 +00:00
MARS_ERR("endless loop 2\n");
}
#endif
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&mref->ref_count, 1);
2010-08-07 15:02:16 +00:00
/* It should be safe to do this without locking, because
* tmp is on the stack, so there is no concurrency.
*/
list_del_init(&mref_a->rfa_pending_head);
2010-08-03 09:38:12 +00:00
// update infos for callbacks, they may inspect it.
2010-08-05 15:54:48 +00:00
mref->ref_flags = bf->bf_flags;
2010-08-08 14:02:54 +00:00
cb->cb_error = bf->bf_bio_status;
2010-08-05 15:54:48 +00:00
atomic_dec(&brick->nr_io_pending);
2010-08-03 09:38:12 +00:00
2010-08-08 14:02:54 +00:00
cb->cb_fn(cb);
2010-08-03 09:38:12 +00:00
2010-08-05 15:54:48 +00:00
_buf_ref_put(mref_a);
2010-08-03 09:38:12 +00:00
}
2010-07-23 11:55:18 +00:00
if (start_len) {
2010-07-30 05:46:22 +00:00
MARS_DBG("ATTENTION %d\n", start_len);
_buf_make_bios(brick, bf, start_data, start_pos, start_len, WRITE);
2010-07-23 11:55:18 +00:00
}
2010-08-03 09:38:12 +00:00
// drop the extra reference from above
2010-08-04 17:32:04 +00:00
__bf_put(bf);
2010-07-23 11:55:18 +00:00
}
2010-08-05 15:54:48 +00:00
static void buf_ref_io(struct buf_output *output, struct mars_ref_object *mref, int rw)
2010-07-23 11:55:18 +00:00
{
struct buf_brick *brick = output->brick;
2010-08-05 15:54:48 +00:00
struct buf_mars_ref_aspect *mref_a;
2010-08-08 14:02:54 +00:00
struct generic_callback *cb;
2010-07-23 11:55:18 +00:00
struct buf_head *bf;
void *start_data = NULL;
loff_t start_pos = 0;
int start_len = 0;
2010-08-03 09:38:12 +00:00
int status = -EINVAL;
2010-08-05 15:54:48 +00:00
bool delay = false;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-07-23 11:55:18 +00:00
2010-08-05 15:54:48 +00:00
if (unlikely(!mref)) {
MARS_FAT("internal problem: forgotten to supply mref\n");
goto fatal;
2010-07-23 11:55:18 +00:00
}
2010-08-05 15:54:48 +00:00
mref_a = buf_mars_ref_get_aspect(output, mref);
if (unlikely(!mref_a)) {
MARS_ERR("internal problem: mref aspect does not work\n");
goto fatal;
2010-07-23 11:55:18 +00:00
}
2010-08-05 15:54:48 +00:00
/* Grab an extra reference.
* This will be released later in _buf_bio_callback() after
* calling the callbacks.
*/
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&mref->ref_count, 1);
2010-08-05 15:54:48 +00:00
atomic_inc(&mref->ref_count);
2010-08-07 15:02:16 +00:00
bf = mref_a->rfa_bf;
2010-07-30 11:50:20 +00:00
if (unlikely(!bf)) {
MARS_ERR("internal problem: forgotten bf\n");
2010-08-03 09:38:12 +00:00
goto callback;
2010-07-30 11:50:20 +00:00
}
2010-08-08 09:03:42 +00:00
CHECK_ATOMIC(&bf->bf_count, 1);
2010-07-23 11:55:18 +00:00
2010-08-02 16:31:10 +00:00
if (rw != READ) {
2010-08-20 10:58:24 +00:00
loff_t end;
2010-08-05 15:54:48 +00:00
if (unlikely(mref->ref_may_write == READ)) {
MARS_ERR("sorry, forgotten to set ref_may_write\n");
2010-08-03 09:38:12 +00:00
goto callback;
2010-08-02 16:31:10 +00:00
}
2010-08-05 15:54:48 +00:00
if (unlikely(!(bf->bf_flags & MARS_REF_UPTODATE))) {
2010-08-02 16:31:10 +00:00
MARS_ERR("sorry, writing is only allowed on UPTODATE buffers\n");
2010-08-03 09:38:12 +00:00
goto callback;
2010-08-02 16:31:10 +00:00
}
2010-08-20 10:58:24 +00:00
end = mref->ref_pos + mref->ref_len;
//FIXME: race condition :(
if (end > brick->base_info.current_size) {
brick->base_info.current_size = end;
}
2010-08-02 16:31:10 +00:00
}
2010-08-05 15:54:48 +00:00
mref->ref_rw = rw;
2010-08-02 16:31:10 +00:00
2010-08-05 15:54:48 +00:00
#if 1
if (jiffies - brick->last_jiffies >= 30 * HZ) {
2010-08-09 16:57:56 +00:00
unsigned long hit = atomic_read(&brick->hit_count);
unsigned long miss = atomic_read(&brick->miss_count);
unsigned long perc = hit * 100 * 100 / (hit + miss);
2010-08-05 15:54:48 +00:00
brick->last_jiffies = jiffies;
2010-08-23 15:52:42 +00:00
MARS_INF("STATISTICS: hashed=%d lru=%d alloc=%d io_pending=%d hit=%lu (%lu.%02lu%%) miss=%lu collisions=%d io=%d\n", atomic_read(&brick->hashed_count), atomic_read(&brick->lru_count), atomic_read(&brick->alloc_count), atomic_read(&brick->nr_io_pending), hit, perc / 100, perc % 100, miss, atomic_read(&brick->nr_collisions), atomic_read(&brick->io_count));
2010-08-05 15:54:48 +00:00
}
#endif
2010-08-07 15:02:16 +00:00
traced_lock(&bf->bf_lock, flags);
2010-08-07 07:59:34 +00:00
2010-08-07 15:02:16 +00:00
if (!list_empty(&mref_a->rfa_pending_head)) {
2010-08-05 15:54:48 +00:00
MARS_ERR("trying to start IO on an already started mref\n");
goto already_done;
}
2010-08-02 16:31:10 +00:00
if (rw) { // WRITE
2010-08-05 15:54:48 +00:00
if (bf->bf_flags & MARS_REF_READING) {
2010-07-30 05:46:22 +00:00
MARS_ERR("bad bf_flags %d\n", bf->bf_flags);
}
2010-08-05 15:54:48 +00:00
if (!(bf->bf_flags & MARS_REF_WRITING)) {
2010-08-07 15:02:16 +00:00
// by definition, a writeout buffer is always uptodate
2010-08-05 15:54:48 +00:00
bf->bf_flags |= (MARS_REF_WRITING | MARS_REF_UPTODATE);
2010-07-23 11:55:18 +00:00
bf->bf_bio_status = 0;
2010-08-03 09:38:12 +00:00
#if 1
2010-08-05 15:54:48 +00:00
start_data = mref->ref_data;
start_pos = mref->ref_pos;
start_len = mref->ref_len;
2010-08-03 09:38:12 +00:00
#else // only for testing: write the full buffer
2010-08-05 15:54:48 +00:00
start_data = (void*)((unsigned long)mref->ref_data & ~(unsigned long)(brick->backing_size - 1));
start_pos = mref->ref_pos & ~(loff_t)(brick->backing_size - 1);
2010-07-30 05:46:22 +00:00
start_len = brick->backing_size;
#endif
2010-08-07 15:02:16 +00:00
list_add(&mref_a->rfa_pending_head, &bf->bf_io_pending_anchor);
2010-08-05 15:54:48 +00:00
delay = true;
2010-07-23 11:55:18 +00:00
} else {
2010-08-07 15:02:16 +00:00
list_add(&mref_a->rfa_pending_head, &bf->bf_postpone_anchor);
2010-08-05 15:54:48 +00:00
delay = true;
MARS_DBG("postponing %lld %d\n", mref->ref_pos, mref->ref_len);
2010-07-23 11:55:18 +00:00
}
} else { // READ
2010-08-05 15:54:48 +00:00
if (bf->bf_flags & (MARS_REF_UPTODATE | MARS_REF_WRITING)) {
2010-07-30 05:46:22 +00:00
goto already_done;
2010-07-23 11:55:18 +00:00
}
2010-08-05 15:54:48 +00:00
if (!(bf->bf_flags & MARS_REF_READING)) {
bf->bf_flags |= MARS_REF_READING;
2010-07-23 11:55:18 +00:00
bf->bf_bio_status = 0;
2010-08-03 09:38:12 +00:00
2010-08-03 16:03:32 +00:00
// always read the whole buffer.
2010-08-05 15:54:48 +00:00
start_data = (void*)((unsigned long)mref->ref_data & ~(unsigned long)(brick->backing_size - 1));
start_pos = mref->ref_pos & ~(loff_t)(brick->backing_size - 1);
2010-07-23 11:55:18 +00:00
start_len = brick->backing_size;
}
2010-08-07 15:02:16 +00:00
list_add(&mref_a->rfa_pending_head, &bf->bf_io_pending_anchor);
2010-08-05 15:54:48 +00:00
delay = true;
2010-07-23 11:55:18 +00:00
}
2010-08-05 15:54:48 +00:00
mref->ref_flags = bf->bf_flags;
2010-08-08 14:02:54 +00:00
mref->ref_cb->cb_error = bf->bf_bio_status;
2010-07-23 11:55:18 +00:00
2010-08-05 15:54:48 +00:00
if (likely(delay)) {
atomic_inc(&brick->nr_io_pending);
atomic_inc(&brick->io_count);
}
2010-08-03 09:38:12 +00:00
2010-08-07 15:02:16 +00:00
traced_unlock(&bf->bf_lock, flags);
2010-07-30 05:46:22 +00:00
2010-08-02 16:31:10 +00:00
if (!start_len) {
2010-08-03 16:03:32 +00:00
// nothing to start, IO is already started.
2010-08-05 15:54:48 +00:00
goto no_callback;
2010-08-02 16:31:10 +00:00
}
status = _buf_make_bios(brick, bf, start_data, start_pos, start_len, rw);
2010-08-03 09:38:12 +00:00
if (likely(status >= 0)) {
2010-08-03 16:03:32 +00:00
/* No immediate callback, this time.
* Callbacks will be called later from _buf_bio_callback().
2010-08-03 09:38:12 +00:00
*/
2010-08-05 15:54:48 +00:00
goto no_callback;
2010-08-02 16:31:10 +00:00
}
2010-08-03 09:38:12 +00:00
2010-08-05 15:54:48 +00:00
MARS_ERR("error %d during buf_ref_io()\n", status);
buf_ref_put(output, mref);
2010-08-03 09:38:12 +00:00
goto callback;
2010-07-30 05:46:22 +00:00
already_done:
2010-08-05 15:54:48 +00:00
mref->ref_flags = bf->bf_flags;
2010-08-03 09:38:12 +00:00
status = bf->bf_bio_status;
2010-08-07 15:02:16 +00:00
traced_unlock(&bf->bf_lock, flags);
2010-08-03 09:38:12 +00:00
callback:
2010-08-08 14:02:54 +00:00
cb = mref->ref_cb;
cb->cb_error = status;
2010-08-05 15:54:48 +00:00
2010-08-08 14:02:54 +00:00
cb->cb_fn(cb);
2010-08-05 15:54:48 +00:00
no_callback:
if (!delay) {
buf_ref_put(output, mref);
} // else the ref_put() will be later carried out upon IO completion.
fatal: // no chance to call callback: may produce hanging tasks :(
;
2010-07-23 11:55:18 +00:00
}
2010-07-07 14:09:16 +00:00
//////////////// object / aspect constructors / destructors ///////////////
2010-08-05 15:54:48 +00:00
static int buf_mars_ref_aspect_init_fn(struct generic_aspect *_ini, void *_init_data)
2010-07-07 14:09:16 +00:00
{
2010-08-05 15:54:48 +00:00
struct buf_mars_ref_aspect *ini = (void*)_ini;
2010-08-07 15:02:16 +00:00
ini->rfa_bf = NULL;
INIT_LIST_HEAD(&ini->rfa_pending_head);
2010-08-04 17:32:04 +00:00
INIT_LIST_HEAD(&ini->tmp_head);
2010-07-07 14:09:16 +00:00
return 0;
}
2010-08-08 09:03:42 +00:00
static void buf_mars_ref_aspect_exit_fn(struct generic_aspect *_ini, void *_init_data)
{
struct buf_mars_ref_aspect *ini = (void*)_ini;
(void)ini;
#if 1
CHECK_HEAD_EMPTY(&ini->rfa_pending_head);
CHECK_HEAD_EMPTY(&ini->tmp_head);
#endif
}
2010-07-23 11:55:18 +00:00
MARS_MAKE_STATICS(buf);
2010-07-07 14:09:16 +00:00
////////////////////// brick constructors / destructors ////////////////////
static int buf_brick_construct(struct buf_brick *brick)
{
int i;
brick->backing_order = 5; // TODO: make this configurable
brick->backing_size = PAGE_SIZE << brick->backing_order;
brick->max_count = 32; // TODO: make this configurable
2010-08-09 16:57:56 +00:00
atomic_set(&brick->alloc_count, 0);
atomic_set(&brick->hashed_count, 0);
2010-08-23 15:52:42 +00:00
atomic_set(&brick->lru_count, 0);
2010-08-05 15:54:48 +00:00
atomic_set(&brick->nr_io_pending, 0);
2010-08-09 16:57:56 +00:00
atomic_set(&brick->nr_collisions, 0);
2010-08-07 07:59:34 +00:00
spin_lock_init(&brick->brick_lock);
2010-08-07 15:02:16 +00:00
//rwlock_init(&brick->brick_lock);
2010-07-07 14:09:16 +00:00
INIT_LIST_HEAD(&brick->free_anchor);
INIT_LIST_HEAD(&brick->lru_anchor);
for (i = 0; i < MARS_BUF_HASH_MAX; i++) {
2010-08-09 16:57:56 +00:00
spin_lock_init(&brick->cache_anchors[i].hash_lock);
INIT_LIST_HEAD(&brick->cache_anchors[i].hash_anchor);
2010-07-07 14:09:16 +00:00
}
return 0;
}
static int buf_output_construct(struct buf_output *output)
{
return 0;
}
2010-07-30 05:46:22 +00:00
static int buf_brick_destruct(struct buf_brick *brick)
{
2010-08-23 05:06:06 +00:00
int i;
2010-07-30 05:46:22 +00:00
unsigned long flags;
2010-08-07 07:59:34 +00:00
traced_lock(&brick->brick_lock, flags);
2010-07-30 05:46:22 +00:00
2010-07-30 11:50:20 +00:00
brick->max_count = 0;
2010-08-09 16:57:56 +00:00
__lru_free(brick, &flags);
2010-07-30 11:50:20 +00:00
__prune_cache(brick, 0, &flags);
2010-07-30 05:46:22 +00:00
2010-08-07 07:59:34 +00:00
traced_unlock(&brick->brick_lock, flags);
2010-07-30 05:46:22 +00:00
2010-08-23 05:06:06 +00:00
CHECK_HEAD_EMPTY(&brick->free_anchor);
CHECK_HEAD_EMPTY(&brick->lru_anchor);
for (i = 0; i < MARS_BUF_HASH_MAX; i++) {
CHECK_HEAD_EMPTY(&brick->cache_anchors[i].hash_anchor);
}
2010-07-30 05:46:22 +00:00
return 0;
}
2010-07-07 14:09:16 +00:00
///////////////////////// static structs ////////////////////////
static struct buf_brick_ops buf_brick_ops = {
};
static struct buf_output_ops buf_output_ops = {
.make_object_layout = buf_make_object_layout,
.mars_get_info = buf_get_info,
2010-08-05 15:54:48 +00:00
.mars_ref_get = buf_ref_get,
.mars_ref_put = buf_ref_put,
.mars_ref_io = buf_ref_io,
2010-07-07 14:09:16 +00:00
};
2010-08-10 17:39:30 +00:00
const struct buf_input_type buf_input_type = {
2010-07-07 14:09:16 +00:00
.type_name = "buf_input",
.input_size = sizeof(struct buf_input),
};
2010-07-23 11:55:18 +00:00
static const struct buf_input_type *buf_input_types[] = {
2010-07-07 14:09:16 +00:00
&buf_input_type,
};
2010-08-10 17:39:30 +00:00
const struct buf_output_type buf_output_type = {
2010-07-07 14:09:16 +00:00
.type_name = "buf_output",
.output_size = sizeof(struct buf_output),
.master_ops = &buf_output_ops,
.output_construct = &buf_output_construct,
2010-07-30 05:46:22 +00:00
.aspect_types = buf_aspect_types,
.layout_code = {
2010-08-05 15:54:48 +00:00
[BRICK_OBJ_MARS_REF] = LAYOUT_ALL,
2010-07-30 05:46:22 +00:00
}
2010-07-07 14:09:16 +00:00
};
2010-07-23 11:55:18 +00:00
static const struct buf_output_type *buf_output_types[] = {
2010-07-07 14:09:16 +00:00
&buf_output_type,
};
2010-07-23 11:55:18 +00:00
const struct buf_brick_type buf_brick_type = {
2010-07-07 14:09:16 +00:00
.type_name = "buf_brick",
.brick_size = sizeof(struct buf_brick),
.max_inputs = 1,
.max_outputs = 1,
.master_ops = &buf_brick_ops,
.default_input_types = buf_input_types,
.default_output_types = buf_output_types,
.brick_construct = &buf_brick_construct,
2010-07-30 05:46:22 +00:00
.brick_destruct = &buf_brick_destruct,
2010-07-07 14:09:16 +00:00
};
EXPORT_SYMBOL_GPL(buf_brick_type);
////////////////// module init stuff /////////////////////////
static int __init init_buf(void)
{
printk(MARS_INFO "init_buf()\n");
return buf_register_brick_type();
}
static void __exit exit_buf(void)
{
printk(MARS_INFO "exit_buf()\n");
buf_unregister_brick_type();
}
MODULE_DESCRIPTION("MARS buf brick");
MODULE_AUTHOR("Thomas Schoebel-Theuer <tst@1und1.de>");
MODULE_LICENSE("GPL");
module_init(init_buf);
module_exit(exit_buf);