mars/kernel/lib_log.c

969 lines
24 KiB
C
Raw Normal View History

2014-11-21 10:51:34 +00:00
/*
* MARS Long Distance Replication Software
*
* This file is part of MARS project: http://schoebel.github.io/mars/
*
* Copyright (C) 2010-2014 Thomas Schoebel-Theuer
* Copyright (C) 2011-2014 1&1 Internet AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
2011-03-10 11:40:06 +00:00
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
2022-02-24 20:47:13 +00:00
#include <linux/bitops.h>
2011-03-10 11:40:06 +00:00
2012-02-02 15:36:57 +00:00
//#define BRICK_DEBUGGING
//#define MARS_DEBUGGING
//#define IO_DEBUGGING
2012-02-06 10:40:42 +00:00
#include "lib_log.h"
2019-02-11 21:39:47 +00:00
#include "brick_wait.h"
2012-02-02 15:36:57 +00:00
2019-07-25 08:27:43 +00:00
__u32 enabled_log_compressions = 0;
__u32 used_log_compression = 0;
atomic_t global_mref_flying = ATOMIC_INIT(0);
EXPORT_SYMBOL_GPL(global_mref_flying);
void exit_logst(struct log_status *logst)
{
int count = 0;
log_flush(logst);
while (atomic_read(&logst->mref_flying) > 0) {
if (!count++)
MARS_DBG("waiting for IO terminating...");
brick_msleep(500);
}
2012-02-02 15:36:57 +00:00
if (logst->read_mref) {
MARS_DBG("putting read_mref\n");
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, logst->read_mref);
2012-02-02 15:36:57 +00:00
logst->read_mref = NULL;
}
if (logst->log_mref) {
MARS_DBG("putting log_mref\n");
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, logst->log_mref);
2012-02-02 15:36:57 +00:00
logst->log_mref = NULL;
}
}
EXPORT_SYMBOL_GPL(exit_logst);
void init_logst(struct log_status *logst, struct mars_input *input, loff_t start_pos, loff_t end_pos)
2011-03-10 11:40:06 +00:00
{
exit_logst(logst);
2011-07-28 11:41:06 +00:00
memset(logst, 0, sizeof(struct log_status));
2011-03-10 11:40:06 +00:00
logst->input = input;
logst->brick = input->brick;
logst->start_pos = start_pos;
2011-03-18 13:15:40 +00:00
logst->log_pos = start_pos;
logst->end_pos = end_pos;
2011-03-22 14:36:26 +00:00
init_waitqueue_head(&logst->event);
2011-03-10 11:40:06 +00:00
}
EXPORT_SYMBOL_GPL(init_logst);
2011-04-08 09:52:46 +00:00
#define MARS_LOG_CB_MAX 32
2011-03-11 13:57:54 +00:00
struct log_cb_info {
2011-03-27 15:18:38 +00:00
struct mref_object *mref;
struct log_status *logst;
2011-05-26 14:32:32 +00:00
struct semaphore mutex;
atomic_t refcount;
int nr_cb;
2011-03-11 13:57:54 +00:00
void (*endios[MARS_LOG_CB_MAX])(void *private, int error);
void *privates[MARS_LOG_CB_MAX];
};
2011-05-26 14:32:32 +00:00
static
void put_log_cb_info(struct log_cb_info *cb_info)
{
if (atomic_dec_and_test(&cb_info->refcount)) {
2011-08-12 11:09:48 +00:00
brick_mem_free(cb_info);
2011-05-26 14:32:32 +00:00
}
}
2012-02-06 10:40:42 +00:00
static
2013-01-03 09:12:20 +00:00
void _do_callbacks(struct log_cb_info *cb_info, int error)
2012-02-06 10:40:42 +00:00
{
int i;
down(&cb_info->mutex);
for (i = 0; i < cb_info->nr_cb; i++) {
void (*end_fn)(void *private, int error);
end_fn = cb_info->endios[i];
cb_info->endios[i] = NULL;
if (end_fn) {
end_fn(cb_info->privates[i], error);
}
}
up(&cb_info->mutex);
}
2011-03-11 13:57:54 +00:00
static
2011-04-08 09:52:46 +00:00
void log_write_endio(struct generic_callback *cb)
2011-03-10 11:40:06 +00:00
{
2011-03-11 13:57:54 +00:00
struct log_cb_info *cb_info = cb->cb_private;
struct log_status *logst;
2011-03-18 13:15:40 +00:00
LAST_CALLBACK(cb);
2011-03-18 13:15:40 +00:00
CHECK_PTR(cb_info, err);
2011-03-27 15:18:38 +00:00
if (cb_info->mref) {
mars_trace(cb_info->mref, "log_endio");
mars_log_trace(cb_info->mref);
}
2011-11-14 14:21:15 +00:00
logst = cb_info->logst;
CHECK_PTR(logst, done);
2011-05-26 14:32:32 +00:00
MARS_IO("nr_cb = %d\n", cb_info->nr_cb);
2011-04-08 09:52:46 +00:00
2013-01-03 09:12:20 +00:00
_do_callbacks(cb_info, cb->cb_error);
2011-11-14 14:21:15 +00:00
done:
2011-05-26 14:32:32 +00:00
put_log_cb_info(cb_info);
2012-12-11 12:51:07 +00:00
atomic_dec(&logst->mref_flying);
atomic_dec(&global_mref_flying);
2019-02-11 21:39:47 +00:00
if (logst->signal_event && logst->signal_flag)
brick_wake(logst->signal_event, *(logst->signal_flag));
2012-12-11 15:33:26 +00:00
2011-03-18 13:15:40 +00:00
return;
err:
MARS_FAT("internal pointer corruption\n");
2011-03-10 11:40:06 +00:00
}
2011-03-11 13:57:54 +00:00
2019-07-25 08:27:43 +00:00
static
int log_compress(struct log_status *logst, int len,
int *padded_len,
__u32 *result_flags)
2019-07-25 08:27:43 +00:00
{
struct mref_object *mref = logst->log_mref;
void *inplace_buf;
void *compr_buf;
int compr_len;
2019-07-25 08:27:43 +00:00
int res;
if (unlikely(!mref || !mref->ref_data || len <= 0))
return 0;
compr_len = len + compress_overhead;
compr_buf = brick_mem_alloc(compr_len);
inplace_buf = mref->ref_data + logst->payload_offset;
res = mars_compress(inplace_buf, len,
compr_buf, compr_len,
2019-07-25 08:27:43 +00:00
enabled_log_compressions,
result_flags);
/* pad the resulting length */
if (res > 0) {
int pad_len = ((res + (_LOG_PAD - 1)) / _LOG_PAD) * _LOG_PAD;
/* Did compression pay off? */
if (pad_len < len) {
*padded_len = pad_len;
used_log_compression = *result_flags;
memcpy(inplace_buf, compr_buf, res);
} else {
*padded_len = len;
res = 0;
}
}
brick_mem_free(compr_buf);
2019-07-25 08:27:43 +00:00
return res;
}
2011-03-22 14:36:26 +00:00
void log_flush(struct log_status *logst)
2011-03-11 13:57:54 +00:00
{
struct mref_object *mref = logst->log_mref;
2011-05-26 14:32:32 +00:00
struct log_cb_info *cb_info;
2012-12-31 17:45:16 +00:00
int align_size;
2011-03-11 13:57:54 +00:00
int gap;
2011-05-13 11:19:28 +00:00
if (!mref || !logst->count)
2011-03-11 13:57:54 +00:00
return;
gap = 0;
2012-12-31 17:45:16 +00:00
align_size = (logst->align_size / PAGE_SIZE) * PAGE_SIZE;
if (align_size > 0) {
2011-03-22 14:36:26 +00:00
// round up to next alignment border
2012-12-31 17:45:16 +00:00
int align_offset = logst->offset & (align_size-1);
2011-03-18 13:15:40 +00:00
if (align_offset > 0) {
2011-03-29 14:40:40 +00:00
int restlen = mref->ref_len - logst->offset;
2012-12-31 17:45:16 +00:00
gap = align_size - align_offset;
if (unlikely(gap > restlen)) {
2011-03-29 14:40:40 +00:00
gap = restlen;
2011-03-22 14:36:26 +00:00
}
2011-03-18 13:15:40 +00:00
}
2011-03-11 13:57:54 +00:00
}
2011-03-22 14:36:26 +00:00
if (gap > 0) {
// don't leak information from kernelspace
memset(mref->ref_data + logst->offset, 0, gap);
2011-03-11 13:57:54 +00:00
logst->offset += gap;
}
2011-03-22 14:36:26 +00:00
mref->ref_len = logst->offset;
memcpy(&logst->log_pos_stamp, &logst->tmp_pos_stamp, sizeof(logst->log_pos_stamp));
2011-03-11 13:57:54 +00:00
2011-05-26 14:32:32 +00:00
cb_info = logst->private;
2011-03-11 13:57:54 +00:00
logst->private = NULL;
SETUP_CALLBACK(mref, log_write_endio, cb_info);
cb_info->logst = logst;
mref->ref_flags |= MREF_WRITE | MREF_MAY_WRITE;
2011-03-11 13:57:54 +00:00
2011-03-27 15:18:38 +00:00
mars_trace(mref, "log_flush");
atomic_inc(&logst->mref_flying);
atomic_inc(&global_mref_flying);
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_io, mref);
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, mref);
2011-03-11 13:57:54 +00:00
logst->log_pos += logst->offset;
2011-03-11 13:57:54 +00:00
logst->offset = 0;
2011-05-13 11:19:28 +00:00
logst->count = 0;
2011-03-11 13:57:54 +00:00
logst->log_mref = NULL;
2011-05-26 14:32:32 +00:00
put_log_cb_info(cb_info);
2011-03-11 13:57:54 +00:00
}
EXPORT_SYMBOL_GPL(log_flush);
2011-03-10 11:40:06 +00:00
void *log_reserve(struct log_status *logst, struct log_header *lh)
{
2011-03-11 13:57:54 +00:00
struct log_cb_info *cb_info = logst->private;
2011-03-10 11:40:06 +00:00
struct mref_object *mref;
void *data;
2011-03-27 15:18:38 +00:00
short total_len = lh->l_len + OVERHEAD;
2011-03-10 11:40:06 +00:00
int offset;
2011-03-11 13:57:54 +00:00
int status;
2011-03-10 11:40:06 +00:00
if (unlikely(lh->l_len <= 0 || lh->l_len > logst->max_size)) {
MARS_ERR("trying to write %d bytes, max allowed = %d\n", lh->l_len, logst->max_size);
2012-12-10 15:42:48 +00:00
goto err;
}
2012-02-06 12:00:26 +00:00
MARS_IO("reserving %d bytes at %lld\n", lh->l_len, logst->log_pos);
2011-03-10 11:40:06 +00:00
2011-03-29 14:40:40 +00:00
mref = logst->log_mref;
if ((mref && total_len > mref->ref_len - logst->offset)
2011-05-26 14:32:32 +00:00
|| !cb_info || cb_info->nr_cb >= MARS_LOG_CB_MAX) {
2011-03-22 14:36:26 +00:00
log_flush(logst);
2011-03-10 11:40:06 +00:00
}
2011-03-11 13:57:54 +00:00
mref = logst->log_mref;
if (!mref) {
if (unlikely(logst->private)) {
MARS_ERR("oops\n");
2011-08-12 11:09:48 +00:00
brick_mem_free(logst->private);
2011-03-11 13:57:54 +00:00
}
2011-08-12 11:09:48 +00:00
logst->private = brick_zmem_alloc(sizeof(struct log_cb_info));
2011-03-11 13:57:54 +00:00
if (unlikely(!logst->private)) {
MARS_ERR("no memory\n");
goto err;
}
2011-03-27 15:18:38 +00:00
cb_info = logst->private;
2011-05-26 14:32:32 +00:00
sema_init(&cb_info->mutex, 1);
atomic_set(&cb_info->refcount, 2);
2011-03-10 11:40:06 +00:00
2012-02-02 15:25:43 +00:00
mref = mars_alloc_mref(logst->brick);
2011-03-11 13:57:54 +00:00
if (unlikely(!mref)) {
MARS_ERR("no mref\n");
goto err;
}
2011-03-27 15:18:38 +00:00
cb_info->mref = mref;
2011-03-11 13:57:54 +00:00
mref->ref_pos = logst->log_pos;
2012-12-31 17:45:16 +00:00
mref->ref_len = logst->chunk_size ? logst->chunk_size : total_len;
mref->ref_flags = MREF_MAY_WRITE;
2011-03-29 14:40:40 +00:00
mref->ref_prio = logst->io_prio;
2011-03-10 11:40:06 +00:00
2011-06-30 13:15:52 +00:00
for (;;) {
status = GENERIC_INPUT_CALL(logst->input, mref_get, mref);
if (likely(status >= 0)) {
break;
}
if (status != -ENOMEM && status != -EAGAIN) {
MARS_ERR("mref_get() failed, status = %d\n", status);
goto err_free;
}
brick_msleep(100);
2011-03-11 13:57:54 +00:00
}
2011-03-27 15:18:38 +00:00
mars_trace(mref, "log_start");
2011-03-11 13:57:54 +00:00
if (unlikely(mref->ref_len < total_len)) {
MARS_ERR("ref_len = %d total_len = %d\n", mref->ref_len, total_len);
goto put;
}
logst->offset = 0;
logst->log_mref = mref;
2011-03-10 11:40:06 +00:00
}
2011-03-11 13:57:54 +00:00
offset = logst->offset;
2011-03-10 11:40:06 +00:00
data = mref->ref_data;
DATA_PUT(data, offset, START_MAGIC);
DATA_PUT(data, offset, (char)FORMAT_VERSION);
logst->validflag_offset = offset;
DATA_PUT(data, offset, (char)0); // valid_flag
2019-07-25 08:27:43 +00:00
logst->totallen_offset = offset;
2011-03-10 11:40:06 +00:00
DATA_PUT(data, offset, total_len); // start of next header
DATA_PUT(data, offset, lh->l_stamp.tv_sec);
DATA_PUT(data, offset, lh->l_stamp.tv_nsec);
DATA_PUT(data, offset, lh->l_pos);
logst->reallen_offset = offset;
DATA_PUT(data, offset, lh->l_len);
2019-07-25 08:27:43 +00:00
logst->decompresslen_offset = offset;
DATA_PUT(data, offset, (short)0); /* placeholder */
DATA_PUT(data, offset, (int)0); // spare
2011-03-10 11:40:06 +00:00
DATA_PUT(data, offset, lh->l_code);
DATA_PUT(data, offset, (short)0); // spare
2011-03-10 11:40:06 +00:00
// remember the last timestamp
memcpy(&logst->tmp_pos_stamp, &lh->l_stamp, sizeof(logst->tmp_pos_stamp));
2011-03-10 11:40:06 +00:00
logst->payload_offset = offset;
logst->payload_len = lh->l_len;
return data + offset;
put:
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, mref);
2011-03-29 14:40:40 +00:00
logst->log_mref = NULL;
2011-03-10 11:40:06 +00:00
return NULL;
err_free:
mars_free_mref(mref);
2011-03-24 16:05:46 +00:00
if (logst->private) {
2011-04-08 09:52:46 +00:00
// TODO: if callbacks are already registered, call them here with some error code
2011-08-12 11:09:48 +00:00
brick_mem_free(logst->private);
2011-03-24 16:05:46 +00:00
}
2011-03-10 11:40:06 +00:00
err:
return NULL;
}
EXPORT_SYMBOL_GPL(log_reserve);
2019-08-07 06:40:43 +00:00
/* Unfortunately, the old logfile format had only 32 bit
* (4 byte) checksums.
* By converting the unused l_written to a checksum, we now
* can use 16 bytes.
* TODO: new backwards-compatible logfile format with even bigger
* checksums (32 bytes).
*/
static
void fold_crc(void *src, void *dst)
{
int i;
memset(dst, 0, LOG_CHKSUM_SIZE);
for (i = 0; i < MARS_DIGEST_SIZE; i += LOG_CHKSUM_SIZE) {
*(__u64*)dst ^= *(__u64*)(src + i);
*(__u64*)(dst + sizeof(__u64)) ^= *(__u64*)(src + i + sizeof(__u64));
}
}
2013-01-03 09:12:20 +00:00
bool log_finalize(struct log_status *logst, int len, void (*endio)(void *private, int error), void *private)
2011-03-10 11:40:06 +00:00
{
struct mref_object *mref = logst->log_mref;
2011-03-11 13:57:54 +00:00
struct log_cb_info *cb_info = logst->private;
2011-03-10 11:40:06 +00:00
void *data;
int offset;
2011-03-29 14:40:40 +00:00
int restlen;
2019-07-25 08:27:43 +00:00
int decompr_len;
int padded_len;
2011-05-26 14:32:32 +00:00
int nr_cb;
2019-08-07 06:40:43 +00:00
unsigned char crc[LOG_CHKSUM_SIZE] = {};
__u32 old_crc = 0;
2019-03-26 09:57:33 +00:00
__u32 check_flags;
__u16 crc_flags;
2011-03-10 11:40:06 +00:00
bool ok = false;
CHECK_PTR(mref, err);
2011-03-29 14:40:40 +00:00
if (unlikely(len > logst->payload_len)) {
MARS_ERR("trying to write more than reserved (%d > %d)\n", len, logst->payload_len);
goto err;
}
restlen = mref->ref_len - logst->offset;
if (unlikely(len + END_OVERHEAD > restlen)) {
MARS_ERR("trying to write more than available (%d > %d)\n", len, (int)(restlen - END_OVERHEAD));
2011-03-11 13:57:54 +00:00
goto err;
}
2011-05-26 14:32:32 +00:00
if (unlikely(!cb_info || cb_info->nr_cb >= MARS_LOG_CB_MAX)) {
2011-03-11 13:57:54 +00:00
MARS_ERR("too many endio() calls\n");
goto err;
2011-03-10 11:40:06 +00:00
}
data = mref->ref_data;
2019-03-26 09:57:33 +00:00
check_flags = 0;
2019-07-25 08:27:43 +00:00
/* Run the CRC on the _original_ data, before compression */
{
2019-03-26 09:57:33 +00:00
unsigned char checksum[MARS_DIGEST_SIZE];
2019-03-26 09:57:33 +00:00
check_flags |=
2019-07-23 07:41:42 +00:00
mars_digest(usable_digest_mask & ~disabled_log_digests,
2019-03-26 09:57:33 +00:00
&used_log_digest,
checksum,
data + logst->payload_offset, len);
2019-08-07 06:40:43 +00:00
if (check_flags & MREF_CHKSUM_MD5_OLD)
old_crc = *(__u32*)checksum;
else
fold_crc(checksum, crc);
}
2019-07-25 08:27:43 +00:00
/*
* Important: when somebody else would be later unable to decompress,
* it will then automatically result in a CRC mismatch.
*/
decompr_len = 0;
padded_len = len;
if (logst->do_compress) {
__u32 new_check_flags = check_flags;
int new_len = log_compress(logst, len,
&padded_len,
&new_check_flags);
/* When compression did not pay off, treat as uncompressed */
if (new_len > 0) {
check_flags = new_check_flags;
2019-07-25 08:27:43 +00:00
/* exchange the lengths */
decompr_len = len;
len = new_len;
}
}
2019-03-26 09:57:33 +00:00
/*
* We have only 16 flag bits in the traditional
* logfile format, which is in production over
* years. To remain compatible, we strip off
* non-checksum related bits.
*/
crc_flags = check_flags >> _MREF_CHKSUM_MD5_OLD;
2011-03-10 11:40:06 +00:00
/* Correct the length in the header.
*/
2019-07-25 08:27:43 +00:00
offset = logst->decompresslen_offset;
DATA_PUT(data, offset, (short)decompr_len);
2011-03-10 11:40:06 +00:00
offset = logst->reallen_offset;
DATA_PUT(data, offset, (short)len);
2019-07-25 08:27:43 +00:00
offset = logst->totallen_offset;
DATA_PUT(data, offset, (short)(padded_len + OVERHEAD));
2011-03-10 11:40:06 +00:00
/* Write the trailer.
*/
2019-07-25 08:27:43 +00:00
offset = logst->payload_offset + padded_len;
2011-03-10 11:40:06 +00:00
DATA_PUT(data, offset, END_MAGIC);
2019-08-07 06:40:43 +00:00
DATA_PUT(data, offset, old_crc);
2011-03-10 11:40:06 +00:00
DATA_PUT(data, offset, (char)1); // valid_flag copy
DATA_PUT(data, offset, (char)0); // spare
2019-03-26 09:57:33 +00:00
DATA_PUT(data, offset, crc_flags);
2013-01-04 21:11:22 +00:00
DATA_PUT(data, offset, logst->seq_nr + 1);
2019-08-07 06:40:43 +00:00
memcpy(data + offset, crc, LOG_CHKSUM_SIZE);
2019-08-07 12:10:45 +00:00
offset += LOG_CHKSUM_SIZE;
2011-03-10 11:40:06 +00:00
2011-03-29 14:40:40 +00:00
if (unlikely(offset > mref->ref_len)) {
2013-01-04 21:11:22 +00:00
MARS_FAT("length calculation was wrong: %d > %d\n", offset, mref->ref_len);
2011-03-29 14:40:40 +00:00
goto err;
}
2011-03-11 13:57:54 +00:00
logst->offset = offset;
2011-03-10 11:40:06 +00:00
/* This must come last. In case of incomplete
2011-03-27 15:18:38 +00:00
* or even overlapping disk transfers, this indicates
2011-03-10 11:40:06 +00:00
* the completeness / integrity of the payload at
* the time of starting the transfer.
*/
offset = logst->validflag_offset;
2021-03-10 14:48:44 +00:00
barrier();
2011-03-10 11:40:06 +00:00
DATA_PUT(data, offset, (char)1);
2011-05-26 14:32:32 +00:00
nr_cb = cb_info->nr_cb++;
cb_info->endios[nr_cb] = endio;
cb_info->privates[nr_cb] = private;
2011-03-10 11:40:06 +00:00
2013-01-04 21:11:22 +00:00
// report success
logst->seq_nr++;
2011-05-13 11:19:28 +00:00
logst->count++;
2011-03-10 11:40:06 +00:00
ok = true;
2011-03-11 13:57:54 +00:00
2011-03-10 11:40:06 +00:00
err:
return ok;
}
EXPORT_SYMBOL_GPL(log_finalize);
2011-03-22 14:36:26 +00:00
static
2011-04-08 09:52:46 +00:00
void log_read_endio(struct generic_callback *cb)
2011-03-18 13:15:40 +00:00
{
2011-03-22 14:36:26 +00:00
struct log_status *logst = cb->cb_private;
LAST_CALLBACK(cb);
2011-03-22 14:36:26 +00:00
CHECK_PTR(logst, err);
logst->error_code = cb->cb_error;
logst->got = true;
wake_up_interruptible(&logst->event);
return;
err:
MARS_FAT("internal pointer corruption\n");
2011-03-18 13:15:40 +00:00
}
int log_read(struct log_status *logst,
bool sloppy,
struct log_header *lh,
void **payload, int *payload_len,
void **dealloc)
2011-03-18 13:15:40 +00:00
{
2011-04-08 09:52:46 +00:00
struct mref_object *mref;
int old_offset;
2011-04-08 09:52:46 +00:00
int status;
*dealloc = NULL;
2011-04-08 09:52:46 +00:00
restart:
status = 0;
mref = logst->read_mref;
if (!mref || logst->do_free) {
loff_t this_len;
2011-04-08 09:52:46 +00:00
if (mref) {
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, mref);
2011-04-08 09:52:46 +00:00
logst->read_mref = NULL;
logst->log_pos += logst->offset;
2011-04-08 09:52:46 +00:00
logst->offset = 0;
}
this_len = logst->end_pos - logst->log_pos;
if (this_len > logst->chunk_size) {
this_len = logst->chunk_size;
} else if (unlikely(this_len <= 0)) {
MARS_ERR("tried bad IO len %lld, start_pos = %lld log_pos = %lld end_pos = %lld\n", this_len, logst->start_pos, logst->log_pos, logst->end_pos);
status = -EOVERFLOW;
goto done;
}
2012-02-02 15:25:43 +00:00
mref = mars_alloc_mref(logst->brick);
2011-03-22 14:36:26 +00:00
if (unlikely(!mref)) {
MARS_ERR("no mref\n");
2011-03-27 15:18:38 +00:00
goto done;
2011-03-22 14:36:26 +00:00
}
mref->ref_pos = logst->log_pos;
mref->ref_len = this_len;
2011-05-19 11:36:00 +00:00
mref->ref_prio = logst->io_prio;
2011-03-22 14:36:26 +00:00
status = GENERIC_INPUT_CALL(logst->input, mref_get, mref);
if (unlikely(status < 0)) {
2011-04-08 09:52:46 +00:00
if (status != -ENODATA) {
MARS_ERR("mref_get() failed, status = %d\n", status);
}
2011-03-27 15:18:38 +00:00
goto done_free;
2011-03-22 14:36:26 +00:00
}
if (unlikely(mref->ref_len <= OVERHEAD)) { // EOF
status = 0;
goto done_put;
}
2011-03-22 14:36:26 +00:00
SETUP_CALLBACK(mref, log_read_endio, logst);
2011-03-22 14:36:26 +00:00
logst->offset = 0;
logst->got = false;
2011-04-08 09:52:46 +00:00
logst->do_free = false;
2011-03-22 14:36:26 +00:00
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_io, mref);
2011-03-22 14:36:26 +00:00
wait_event_interruptible_timeout(logst->event, logst->got, 60 * HZ);
status = -ETIME;
2011-03-22 14:36:26 +00:00
if (!logst->got)
2011-04-08 09:52:46 +00:00
goto done_put;
2011-03-22 14:36:26 +00:00
status = logst->error_code;
if (status < 0)
2011-04-08 09:52:46 +00:00
goto done_put;
if (mref->ref_len < this_len) {
/* Short read. May happen when logfiles are
* truncated underneath.
*/
logst->end_pos = logst->log_pos + mref->ref_len;
}
2011-03-22 14:36:26 +00:00
logst->read_mref = mref;
}
status = log_scan(mref->ref_data + logst->offset,
mref->ref_len - logst->offset,
mref->ref_pos,
logst->offset,
sloppy,
lh,
payload,
payload_len,
dealloc,
&logst->seq_nr);
2011-03-29 14:40:40 +00:00
if (unlikely(status == 0)) {
MARS_ERR("bad logfile scan\n");
status = -EINVAL;
}
if (unlikely(status < 0)) {
2011-04-08 09:52:46 +00:00
goto done_put;
2011-03-22 14:36:26 +00:00
}
// memoize success
2011-03-27 15:18:38 +00:00
logst->offset += status;
if (logst->offset + (logst->max_size + OVERHEAD) * 2 >= mref->ref_len) {
2011-04-08 09:52:46 +00:00
logst->do_free = true;
2011-03-27 15:18:38 +00:00
}
2011-03-22 14:36:26 +00:00
2011-04-08 09:52:46 +00:00
done:
if (status == -ENODATA) {
status = 0; // indicates EOF
}
return status;
done_put:
old_offset = logst->offset;
2011-03-22 14:36:26 +00:00
if (mref) {
2021-06-29 08:52:11 +00:00
GENERIC_INPUT_CALL_VOID(logst->input, mref_put, mref);
2011-03-27 15:18:38 +00:00
logst->read_mref = NULL;
logst->log_pos += logst->offset;
2011-04-08 09:52:46 +00:00
logst->offset = 0;
2011-03-22 14:36:26 +00:00
}
if (status == -EAGAIN && old_offset > 0) {
if (*dealloc) {
brick_mem_free(*dealloc);
}
2011-04-08 09:52:46 +00:00
goto restart;
}
goto done;
done_free:
if (mref) {
mars_free_mref(mref);
}
logst->read_mref = NULL;
goto done;
2011-03-18 13:15:40 +00:00
}
2022-02-24 06:27:42 +00:00
static
int _check_crc(struct log_header *lh,
void *crc,
void *crc_buf,
int crc_len,
__u32 check_flags)
{
__u32 invalid_check_flags;
bool is_invalid = false;
2022-02-24 20:47:13 +00:00
bool did_simple_retry = false;
int did_iterative_retry = 0;
int bit;
2022-02-24 06:27:42 +00:00
int res;
2022-02-24 20:47:13 +00:00
retry:
2022-02-24 06:27:42 +00:00
res = -EBADMSG;
if (check_flags & (MREF_CHKSUM_ANY - MREF_CHKSUM_MD5_OLD)) {
unsigned char checksum[MARS_DIGEST_SIZE];
unsigned char check_crc[LOG_CHKSUM_SIZE];
mars_digest(check_flags,
&used_log_digest,
checksum,
crc_buf, crc_len);
fold_crc(checksum, check_crc);
if (!memcmp(crc, check_crc, LOG_CHKSUM_SIZE))
res = 0;
} else if (lh->l_crc_old) {
unsigned char checksum[MARS_DIGEST_SIZE];
__u32 old_crc;
mars_digest(check_flags,
&used_log_digest,
checksum,
crc_buf, crc_len);
old_crc = *(int*)checksum;
if (old_crc == lh->l_crc_old)
res = 0;
2022-02-24 20:47:13 +00:00
} else if (!did_iterative_retry) {
invalid_check_flags = check_flags;
is_invalid = true;
}
2022-02-24 20:47:13 +00:00
/* simple retry method */
if (unlikely(res && !did_simple_retry)) {
did_simple_retry = true;
MARS_WRN("RETRY crc check flags=0x%x crc_len=%d\n",
check_flags, crc_len);
check_flags |= usable_digest_mask;
cond_resched();
goto retry;
}
/* when simple retry failed: try the painful iterative method */
if (unlikely(res)) {
/* We are desperate. We want to recover as much data
* as possible. Performance does no longer matter.
* Try to _wildly_ _guess_ what check_flags might by usable.
*/
if (!did_iterative_retry) {
bit = find_first_bit((void *)&usable_digest_mask,
sizeof(usable_digest_mask));
} else {
int next_bit =
find_next_bit((void *)&usable_digest_mask,
sizeof(usable_digest_mask),
bit + 1);
if (next_bit <= bit)
goto failed;
bit = next_bit;
}
if (bit >= 32)
goto failed;
check_flags = (1 << bit);
if (!(check_flags & usable_digest_mask))
goto failed;
did_iterative_retry++;
cond_resched();
goto retry;
failed:
MARS_WRN("ITERATIVE RETRY %d failed\n",
did_iterative_retry);
}
if (is_invalid) {
2022-02-24 20:47:13 +00:00
MARS_ERR("Found invalid crc flags=0x%x retried=%d+%d\n",
invalid_check_flags,
did_simple_retry, did_iterative_retry);
2022-02-24 06:27:42 +00:00
}
return res;
}
2019-07-25 09:14:24 +00:00
int log_scan(void *buf,
int len,
loff_t file_pos,
int file_offset,
bool sloppy,
struct log_header *lh,
void **payload, int *payload_len,
void **dealloc,
2019-07-25 09:14:24 +00:00
unsigned int *seq_nr)
{
bool dirty = false;
int offset;
int i;
*payload = NULL;
*payload_len = 0;
for (i = 0; i < len && i <= len - OVERHEAD; i += sizeof(long)) {
2019-08-07 06:40:43 +00:00
unsigned char crc[LOG_CHKSUM_SIZE];
2019-07-25 09:14:24 +00:00
long long start_magic;
char format_version;
char valid_flag;
short total_len;
long long end_magic;
char valid_copy;
2019-03-26 09:57:33 +00:00
__u32 check_flags;
2019-07-25 09:14:24 +00:00
int restlen = 0;
2019-07-25 08:27:43 +00:00
int crc_len;
int decompr_len;
2019-07-25 09:14:24 +00:00
int found_offset;
2022-02-24 06:27:42 +00:00
int crc_status;
2019-07-25 08:27:43 +00:00
void *new_buf = NULL;
void *crc_buf;
2019-07-25 09:14:24 +00:00
offset = i;
if (unlikely(i > 0 && !sloppy)) {
MARS_ERR(SCAN_TXT "detected a hole / bad data\n", SCAN_PAR);
return -EBADMSG;
}
DATA_GET(buf, offset, start_magic);
if (unlikely(start_magic != START_MAGIC)) {
if (start_magic != 0)
dirty = true;
continue;
}
restlen = len - i;
if (unlikely(restlen < START_OVERHEAD)) {
MARS_WRN(SCAN_TXT "magic found, but restlen is too small\n", SCAN_PAR);
return -EAGAIN;
}
DATA_GET(buf, offset, format_version);
if (unlikely(format_version != FORMAT_VERSION)) {
MARS_ERR(SCAN_TXT "found unknown data format %d\n", SCAN_PAR, (int)format_version);
return -EBADMSG;
}
DATA_GET(buf, offset, valid_flag);
if (unlikely(!valid_flag)) {
MARS_WRN(SCAN_TXT "data is explicitly marked invalid (was there a short write?)\n", SCAN_PAR);
continue;
}
DATA_GET(buf, offset, total_len);
if (unlikely(total_len > restlen)) {
MARS_WRN(SCAN_TXT "total_len = %d but available data restlen = %d. Was the logfile truncated?\n", SCAN_PAR, total_len, restlen);
return -EAGAIN;
}
memset(lh, 0, sizeof(struct log_header));
DATA_GET(buf, offset, lh->l_stamp.tv_sec);
DATA_GET(buf, offset, lh->l_stamp.tv_nsec);
DATA_GET(buf, offset, lh->l_pos);
DATA_GET(buf, offset, lh->l_len);
2019-07-25 08:27:43 +00:00
DATA_GET(buf, offset, lh->l_decompress_len);
2019-07-25 09:14:24 +00:00
offset += 4; // skip spare
DATA_GET(buf, offset, lh->l_code);
offset += 2; // skip spare
found_offset = offset;
2019-07-25 08:27:43 +00:00
offset += total_len - OVERHEAD;
2019-07-25 09:14:24 +00:00
restlen = len - offset;
if (unlikely(restlen < END_OVERHEAD)) {
MARS_WRN(SCAN_TXT "restlen %d is too small\n", SCAN_PAR, restlen);
return -EAGAIN;
}
DATA_GET(buf, offset, end_magic);
if (unlikely(end_magic != END_MAGIC)) {
MARS_WRN(SCAN_TXT "bad end_magic 0x%llx, is the logfile truncated?\n", SCAN_PAR, end_magic);
return -EBADMSG;
}
2019-08-07 06:40:43 +00:00
DATA_GET(buf, offset, lh->l_crc_old);
2019-07-25 09:14:24 +00:00
DATA_GET(buf, offset, valid_copy);
if (unlikely(valid_copy != 1)) {
MARS_WRN(SCAN_TXT "found data marked as uncompleted / invalid, len = %d, valid_flag = %d\n", SCAN_PAR, lh->l_len, (int)valid_copy);
return -EBADMSG;
}
// skip spares
2019-03-26 09:57:33 +00:00
offset += 1;
2019-07-25 09:14:24 +00:00
2019-03-26 09:57:33 +00:00
DATA_GET(buf, offset, lh->l_crc_flags);
2019-07-25 09:14:24 +00:00
DATA_GET(buf, offset, lh->l_seq_nr);
2019-08-07 06:40:43 +00:00
memcpy(crc, buf + offset, LOG_CHKSUM_SIZE);
2019-08-07 12:10:45 +00:00
offset += LOG_CHKSUM_SIZE;
2019-07-25 09:14:24 +00:00
if (unlikely(lh->l_seq_nr > *seq_nr + 1 && lh->l_seq_nr && *seq_nr)) {
MARS_ERR(SCAN_TXT "record sequence number %u mismatch, expected was %u\n", SCAN_PAR, lh->l_seq_nr, *seq_nr + 1);
return -EBADMSG;
} else if (unlikely(lh->l_seq_nr != *seq_nr + 1 && lh->l_seq_nr && *seq_nr)) {
MARS_WRN(SCAN_TXT "record sequence number %u mismatch, expected was %u\n", SCAN_PAR, lh->l_seq_nr, *seq_nr + 1);
}
*seq_nr = lh->l_seq_nr;
2019-03-26 09:57:33 +00:00
/*
* We have only 16 flag bits in the traditional
* logfile format, which is in production over
* years. To remain compatible, we strip off
* non-checksum related bits.
*/
check_flags =
(((__u32)lh->l_crc_flags) << _MREF_CHKSUM_MD5_OLD) &
2019-07-25 08:27:43 +00:00
(available_digest_mask | available_compression_mask);
2019-03-26 09:57:33 +00:00
/* compatibility with old logfiles during upgrade */
if (!check_flags)
check_flags = MREF_CHKSUM_MD5_OLD;
2019-07-25 08:27:43 +00:00
decompr_len = lh->l_decompress_len;
crc_len = lh->l_len;
if (decompr_len > 0 &&
unlikely(decompr_len > MARS_MAX_COMPR_SIZE ||
decompr_len <= crc_len ||
(decompr_len % 512) != 0)) {
MARS_ERR(SCAN_TXT "implausible decompr_len: %d ~~ %d\n",
SCAN_PAR, decompr_len, crc_len);
return -EBADMSG;
}
if (unlikely(crc_len > MARS_MAX_COMPR_SIZE)) {
MARS_ERR(SCAN_TXT "implausible crc_len: %d > %ld\n",
SCAN_PAR, crc_len, MARS_MAX_COMPR_SIZE);
return -EBADMSG;
}
crc_buf = buf + found_offset;
if ((check_flags & MREF_COMPRESS_ANY) &&
decompr_len > 0) {
new_buf =
mars_decompress(crc_buf, crc_len,
NULL, decompr_len,
check_flags);
if (likely(new_buf)) {
*dealloc = new_buf;
crc_buf = new_buf;
crc_len = decompr_len;
2021-03-10 14:59:15 +00:00
} else {
MARS_ERR(SCAN_TXT "decompression 0x%x failure len=%d/%d\n",
SCAN_PAR, check_flags,
crc_len, decompr_len);
return -EBADMSG;
2019-07-25 08:27:43 +00:00
}
}
2022-02-24 06:27:42 +00:00
crc_status =
_check_crc(lh,
crc,
crc_buf,
crc_len,
check_flags);
if (crc_status) {
MARS_ERR(SCAN_TXT
"data checksumming mismatch, flags=0x%x len=%d/%d err=%d\n",
SCAN_PAR,
check_flags,
lh->l_len, crc_len,
crc_status);
return -EBADMSG;
2019-07-25 09:14:24 +00:00
}
// last check
if (unlikely(total_len != offset - i)) {
MARS_ERR(SCAN_TXT "internal size mismatch: %d != %d\n", SCAN_PAR, total_len, offset - i);
return -EBADMSG;
}
// Success...
2019-07-25 08:27:43 +00:00
*payload = crc_buf;
*payload_len = crc_len;
2019-07-25 09:14:24 +00:00
// don't cry when nullbytes have been skipped
if (i > 0 && dirty) {
MARS_WRN(SCAN_TXT "skipped %d dirty bytes to find valid data\n", SCAN_PAR, i);
}
return offset;
}
MARS_ERR("could not find any useful data within len=%d bytes\n", len);
return -EAGAIN;
}
2011-03-10 11:40:06 +00:00
////////////////// module init stuff /////////////////////////
2011-08-25 10:16:32 +00:00
int __init init_log_format(void)
2011-03-10 11:40:06 +00:00
{
MARS_INF("init_log_format()\n");
return 0;
}
2014-04-23 11:16:26 +00:00
void exit_log_format(void)
2011-03-10 11:40:06 +00:00
{
MARS_INF("exit_log_format()\n");
}