/*
 * chunkserver-journal.c
 *
 *  Created on: Jun 16, 2016
 *      Author: zhangzm
 */
#define USE_LOCAL_LOG_LEVEL
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <limits.h>
#include <st.h>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <linux/falloc.h>
#include <signal.h>
#include "../utcache.h"
#include "../protocol.h"
#include "../st_pio.h"
#include "../ursax-list.h"
#include "../ursax.h"
#include "../thread-pool.h"
#include "../heartbeat.h"
#include "../backend-fs.h"
#include "../crc32.h"
#include "../ioengine.h"
#include "chunkserver-journal-entry.h"
#include "chunkserver-journal.h"

#define REALLOC_SIZE_LINE (64UL*1024*1024)
#define REALLOC_SIZE_STEP (1024UL*1024*1024)
#define META_MAGIC 0x5656565656565656UL
#define JOURANL_FILE_NAME "big-journal"
#define JOURNAL_META_FILE_NAME "big-journal-meta"
#define FILE_HEAD_SIZE SIZE_4K

#define STATE_INIT (0)
#define STATE_OK (1)


#define USE_MMAP_META

extern struct thread_pool* th_pool; // in operations.c
bool big_journal_delayio = false;
bool big_journal_full_flush = false;
bool big_journal_fallocate = true;

bool data_have_align_meta_head = true;

bool enable_auto_rotate = false;

const int jmeta_size = sizeof(struct JMeta);
extern int io_extra_flag;


uint32_t journal_meta_block_size = JOURNAL_META_SIZE_DEFAULT;
uint32_t journal_cluster_nb = JOURNAL_CLUSTER_DEFAULT_NB;

void set_journal_size(uint64_t journal_file_size, uint32_t journal_meta_nb)
{
	if(journal_meta_nb == 0){
		journal_meta_nb = JOURNAL_META_NB_DEFAULT;
	}
	journal_meta_block_size = journal_meta_nb * sizeof(JMeta);
	journal_cluster_nb = journal_file_size / (journal_meta_nb * SIZE_4K);
}

void set_journal_rotate_size(uint64_t size)
{

}

int read_all_meta(struct ChunkServerJournal *j);
int read_head(struct ChunkServerJournal *j);

struct ChunkServerJournal *_g_big_journal = NULL;
bool current_journal_broken = false;
bool broken_handled = false;
bool write_back_by_hand = false;
bool pause_journal_fetch = false;
bool big_journal_null_return = false;
bool stop_use_journal = false;

void* broken_gc_thread(void* arg);
struct ChunkServerJournal *get_big_journal(char use_journal_hint)
{
	if (use_journal_hint == USE_JOURNAL_DONT) {
		return NULL;
	}

	if(current_journal_broken){
		if(!broken_handled){
			LOG_WARN("ssd journal broken");
			broken_handled = true;
			rotate_chunkserver_journal(_g_big_journal);
			st_thread_create(broken_gc_thread, _g_big_journal, 0, 0);
		}
	}

	while(pause_journal_fetch){
		st_usleep(100*1000);
	}

	if(big_journal_null_return){
		return NULL;
	}

	return _g_big_journal;
}

void set_write_back_and_wait_over()
{
	write_back_by_hand = true;
	while(write_back_by_hand){
		st_usleep(100*1000);
	}
}

void set_big_journal(struct ChunkServerJournal *journal)
{
	_g_big_journal = journal;
}

#pragma pack(push, 1)
struct JFileHead
{
	union{
		struct {
			uint64_t magic; // META_MAGIC
			uint64_t journal_cluster_nb;
			uint64_t file_size; // (journal_meta_size + journal_data_size) * journal_cluster_nb + FILE_HEAD_SIZE
			int journal_meta_size; // 8k
			int max_meta_nb_per_cluster; // journal_meta_size / sizeof(JMeta) = 256
			int journal_data_size; // 256 * 4k = 1024k, max request size is 1M
			// per block size is: journal_data_size + journal_meta_size = 1M + 8k
			uint64_t write_back_cluster_index; // not done
			uint32_t write_back_meta_index; // not done
			uint32_t write_mini_size;
			uint64_t current_using_cluster_index;
			uint32_t meta_index_in_cluster; // can be used
			uint32_t data_index_in_cluster;
		};
		char padding[FILE_HEAD_SIZE];
	};
};
// |---------|------------------------|-------|------------------------|-------|
// <  head  > <         data         > < meta> <         data         > < meta>
#pragma pack(pop)

typedef struct clusterRecordInfo
{
	uint32_t meta_nb;
	uint32_t done_nb;
	bool dirty_meta;
} clusterRecordInfo;

#define RECORDS (64)

typedef struct ChunkServerJournal
{
	int state;
	int journal_fd;
	char ckroot[PATH_MAX];
	char journal_file_path[PATH_MAX];
	bool is_journal_file_symbol_link;

	union{
		struct JFileHead *file_head;
		void *file_head_addr;
	};
	JMeta **journal_meta_clusters;
	uint32_t io_Record[RECORDS];
	uint32_t recordpoint;

	clusterRecordInfo *clusters_meta_info;
	uint64_t journal_meta_count;

	uint64_t write_count;

	UrsaXList io_request_list;
	int io_request_counter;
	int inflight_io_counter;
	int get_version_request_counter;
	bool stop;
	bool pause_io;
	bool paused_io;
	bool io_thread_in_sleep;
	st_thread_t io_thread;
	st_cond_t io_thread_cond;
	struct ChunkServerJournalEntry * cache_entry;

	st_mutex_t write_chunk_lock;
	write_back_jouranl_cb_t wb_cb;
	st_thread_t wb_thread;
	uint64_t wb_offset;
	bool stop_write_back;

	Delayer disk_write_meta;
	Delayer disk_write;
	Delayer disk_read;
} ChunkServerJournal;

char *wb_buf;

void stop_using_journal()
{
	if(big_journal_null_return){
		return;
	}

	struct ChunkServerJournal *j = get_big_journal(USE_JOURNAL_DEFAULT);
	set_write_back_and_wait_over();

	pause_journal_fetch = true;

	while(j->io_request_counter){
		st_usleep(100*1000);
	}

	set_write_back_and_wait_over();

	big_journal_null_return = true;
	pause_journal_fetch = false;
}

void check_and_start_use_journal(struct ChunkServerJournal *j, char *ckroot,
		write_back_jouranl_cb_t wbj_cb)
{
	while(1){
		bool journal_exist = check_journal(ckroot);
		if(!journal_exist){
			LOG_INFO("wait for new journal exist");
			st_sleep(600);
			continue;
		}
		break;
	}
	current_journal_broken = false;
	j = init_chunkserver_journal(ckroot, wbj_cb);
	if(j == NULL){
		LOG_ERROR("error init chunkserver journal");
		return;
	}
	_g_big_journal = j;
	broken_handled = false;
	async_start_use_journal();
}

void* broken_gc_thread(void* arg)
{
	struct ChunkServerJournal *j = (struct ChunkServerJournal *)arg;
	char ckroot[PATH_MAX];
	strcpy(ckroot, j->ckroot);
	write_back_jouranl_cb_t wbj_cb = j->wb_cb;
	st_sleep(3600);
	_g_big_journal = NULL;
	fini_chunkserver_journal(j);
	return NULL; // todo if want to open journal again, delete this line
	check_and_start_use_journal(j, ckroot, wbj_cb);
	return NULL;
}

void* check_control_thread(void* arg)
{
	while(true){
		st_usleep(100*1000);
		if(current_journal_broken){
			break;
		}
		if(stop_use_journal){
			stop_using_journal();
			stop_use_journal = false;
		}
	}
	return NULL;
}

void start_journal_control_thread()
{
	st_thread_create(check_control_thread, NULL, 0, 0);
}

static inline void wait_for_state_ok(struct ChunkServerJournal *j)
{
	while(j->state != STATE_OK){
		st_usleep(100*1000);
		LOG_INFO("j->state != STATE_OK, wait for some time, state is %d", j->state);
	}
}

void record_write(struct ChunkServerJournal* j)
{
	uint32_t current_point = time(0) % RECORDS;
	if(current_point == j->recordpoint){
		j->io_Record[current_point]++;
		return;
	}
	j->recordpoint = current_point;
	j->io_Record[current_point] = 1;
}

int log_records(struct ChunkServerJournal* j)
{
	char buf[512];
	int len = 0;
	for(int i = 0; i < RECORDS; i++){
		len += sprintf(buf+len, "%d ", j->io_Record[i]);
	}
	uint32_t index = time(0);
	index %= RECORDS;
	LOG_INFO("records are : %s, now index is %d", buf, index);
	return 0;
}

int get_lastest_n_record(struct ChunkServerJournal* j, int n)
{
	uint32_t current = time(0);
	uint32_t point = current - n;
	point %= RECORDS;
	current %= RECORDS;
	if(current == j->recordpoint){
		// nothing
	} else {
		// current > j->recordpoint
		LOG_DEBUG("set index %d to 0", current);
		j->io_Record[current] = 0;
	}

	return j->io_Record[point];
}

void begin_write_back_journal(struct ChunkServerJournal* j);
void end_write_back_journal(struct ChunkServerJournal* j);

static inline uint64_t cluster_offset(ChunkServerJournal* j, uint64_t cluster_index)
{
	return (j->file_head->journal_meta_size + j->file_head->journal_data_size)
	* cluster_index + FILE_HEAD_SIZE;
}

static inline uint64_t journal_meta_offset(ChunkServerJournal* j,
        uint64_t cluster_index, uint32_t meta_index)
{
	return cluster_offset(j, cluster_index) + j->file_head->journal_data_size
	+ sizeof(JMeta) * meta_index;
}

static inline JMeta * journal_meta_addr(ChunkServerJournal* j,
        uint64_t cluster_index, uint32_t meta_index)
{
	return &j->journal_meta_clusters[cluster_index][meta_index];
}

static inline uint64_t journal_data_offset(ChunkServerJournal* j,
        uint64_t cluster_index, uint32_t data_index)
{
	return cluster_offset(j, cluster_index) + data_index;
}

int flush_meta(ChunkServerJournal* j, uint64_t cluster_index)
{
#ifndef USE_MMAP_META
	int ret = ioengine->pwrite(j->journal_fd, j->journal_meta_clusters[cluster_index],
							j->file_head->journal_meta_size,
							journal_meta_offset(j, cluster_index, 0));
	if(ret != j->file_head->journal_meta_size){
		LOG_ERROR("error pwrite index %ld %m", cluster_index);
		return ret;
	}
#endif
	return 0;
}

int sync_meta_block(ChunkServerJournal* j, uint64_t cluster_index)
{
	while(j->clusters_meta_info[cluster_index].meta_nb
	!= j->clusters_meta_info[cluster_index].done_nb) {
		st_usleep(100);
	}
	LOG_DEBUG("start msync %ld", cluster_index);
	int ret = flush_meta(j, cluster_index);
	j->clusters_meta_info[cluster_index].dirty_meta = false;
	LOG_DEBUG("end msync %ld", cluster_index);
	return ret;
}

struct SyncMetaArgs
{
	ChunkServerJournal* j;
	uint64_t cluster_index;
};

void* async_meta_block(void* arg)
{
	struct SyncMetaArgs *args = (struct SyncMetaArgs *)arg;
#ifdef NO_SYNC_META
	free(args);
	return NULL;
#endif
	sync_meta_block(args->j, args->cluster_index);
	free(args);
	return NULL;
}

int get_meta_addr_and_data_offset(ChunkServerJournal* j, uint32_t data_size,
        JMeta** meta_addr, uint64_t* data_offset, uint32_t* cluster_index)
{
	while(1){
		int32_t meta_space = j->file_head->max_meta_nb_per_cluster
		        - j->file_head->meta_index_in_cluster;
		int32_t data_space = j->file_head->journal_data_size
		        - j->file_head->data_index_in_cluster;
		if(meta_space < 1 || data_space < (int32_t)data_size) {
			if(j->file_head->current_using_cluster_index + 1
			>= j->file_head->journal_cluster_nb) {
				LOG_ERROR("no extra cluster can be used, "
              "current using cluster index is %ld",
              j->file_head->current_using_cluster_index);
				return -1;
			}

			struct SyncMetaArgs *args = (struct SyncMetaArgs *)malloc(sizeof(*args));
			args->j = j;
			args->cluster_index = j->file_head->current_using_cluster_index;
			st_thread_create(async_meta_block, args, 0, 0);

			j->file_head->current_using_cluster_index++;
			j->file_head->meta_index_in_cluster = 0;
			j->file_head->data_index_in_cluster = 0;
			LOG_DEBUG("current_using_cluster_index set to %ld ",
			        j->file_head->current_using_cluster_index);
			continue;
		}
		*meta_addr = journal_meta_addr(j, j->file_head->current_using_cluster_index,
		        j->file_head->meta_index_in_cluster);
		*data_offset = journal_data_offset(j, j->file_head->current_using_cluster_index,
		        j->file_head->data_index_in_cluster);
		*cluster_index = j->file_head->current_using_cluster_index;
		j->clusters_meta_info[j->file_head->current_using_cluster_index].meta_nb++;
		j->file_head->meta_index_in_cluster++;
		j->file_head->data_index_in_cluster += data_size;
		return 0;
	}
	return 0;
}

static inline uint32_t get_req_write_size(struct JMeta *jmeta, ChunkServerJournal *j)
{
	uint32_t w_size = ROUND_UP(jmeta->length, j->file_head->write_mini_size);
	if(jmeta->direct_write_chunk){
		w_size = 0;
	}
	return w_size;
}

static void* do_process_wio(void* req_)
{
	struct IORequest *req = (struct IORequest *)req_;
	struct JMeta *target_meta = (struct JMeta *)req->meta_addr;
	ChunkServerJournal *j = (ChunkServerJournal *)req->j;

	begin_proc(&j->disk_write);
	if(req->jmeta.direct_write_chunk){
		lock_chunk_entry(j->cache_entry, req->jmeta.volumeid, req->jmeta.index);
	}

	int ret = 0, w_size = ROUND_UP(req->jmeta.length, j->file_head->write_mini_size);
	if(likely(w_size != 0)){
		LOG_DEBUG("ioengine.pwrite chunk %08x.%d version %ld real-file-offset %ld offset %d length %d w size %d checksum %08x",
				   req->jmeta.volumeid, req->jmeta.index, req->jmeta.version,
				   req->real_file_offset, req->jmeta.begin, req->jmeta.length,
				   w_size, crc32_n(req->buf, req->jmeta.length));
		ret = req->jmeta.direct_write_chunk ?
					j->wb_cb(req->jmeta.volumeid, req->jmeta.index, req->buf,
					        req->jmeta.begin, req->jmeta.length, INVALID_VERSION,
					        false, true):
					ioengine->pwrite(j->journal_fd, req->buf, w_size,
					        req->real_file_offset);
	}
	if(ret == w_size){
		LOG_DEBUG("start insert range 1");
		if(req->jmeta.direct_write_chunk){
			clear_range(j->cache_entry, req->jmeta.volumeid, req->jmeta.index,
			        req->jmeta.begin, req->jmeta.length,
							req->jmeta.version, req->jmeta.time0);
		}else{
			insert_range(j->cache_entry, req->jmeta.volumeid, req->jmeta.index,
			        req->jmeta.begin, req->jmeta.length,
							req->real_file_offset, req->jmeta.version, req->jmeta.time);
		}

		req->req_ret = req->jmeta.length;
		// *target_meta = req->jmeta;&journal->disk_write_meta
		LOG_DEBUG("start insert range 2");
		uint32_t tmp_volumeid = req->jmeta.volumeid;
		req->jmeta.volumeid = 0;
		memcpy(target_meta, &req->jmeta, sizeof(req->jmeta));
		req->jmeta.volumeid = tmp_volumeid;
		target_meta->volumeid = tmp_volumeid;
		j->clusters_meta_info[req->cluster_index].done_nb++;
		j->clusters_meta_info[req->cluster_index].dirty_meta = true;
		j->write_count++;
		record_write(j);
		LOG_DEBUG("start insert range 3");
	}else{
		LOG_ERROR("chunk %08x.%d offset %d real offset %ld len %d ret %d %m",
		        req->jmeta.volumeid, req->jmeta.index,
					req->jmeta.begin, req->real_file_offset, req->jmeta.length, ret);
		req->req_ret = -1;
	}
	if(req->jmeta.direct_write_chunk){
		unlock_chunk_entry(j->cache_entry, req->jmeta.volumeid, req->jmeta.index);
	}
	end_proc(&j->disk_write);

	req->cb(req);
	return NULL;
}

static void* do_process_rio(void* req_)
{
	struct IORequest *req = (struct IORequest *)req_;
	ChunkServerJournal *j = (ChunkServerJournal *)req->j;
	begin_proc(&j->disk_read);
	uint32_t volumeid = req->jmeta.volumeid, index = req->jmeta.index;
	uint32_t offset = req->jmeta.begin, len = req->jmeta.length;
	char *buf = (char *)req->buf;
	while(len > 0){
		uint32_t rbegin, rend;
		uint64_t rpos;
		uint32_t chunk_read_len = 0;
		uint32_t end = offset + len;
		int ret = lookup_range(j->cache_entry, volumeid, index, offset, len,
		        &rbegin, &rend, &rpos);
		if(ret < 0){
			chunk_read_len = len;
		}else{
			chunk_read_len = MAX(rbegin, offset) - offset;
		}
		if(chunk_read_len){
			LOG_DEBUG("do_process_rio read chunk %08x.%d chunk read len %d offset %d",
			        volumeid, index, chunk_read_len, offset);
			ret = req->readfun(req->chunk, buf, chunk_read_len, offset, NULL);
			if(ret != (int)chunk_read_len){
				LOG_ERROR("align_pread_align_size req->chunk_fd_for_read %m len %d "
                          "offset %d buf %p", chunk_read_len, offset, buf);
				req->req_ret = -1;
				goto out;
			}
			buf += chunk_read_len;
			offset += chunk_read_len;
			len -= chunk_read_len;
			if(len == 0){
				break;
			}
		}

		uint32_t journal_read_len = MIN(end, rend) - offset;
		uint64_t journal_file_offset = rpos + offset - rbegin;
		LOG_DEBUG(" chunk %08x.%d read from journal offset %ld len %d", volumeid, index, journal_file_offset, journal_read_len);
		ret = align_pread_align_size(j->journal_fd, buf, journal_read_len, journal_file_offset, NULL, j->file_head->write_mini_size);
		if(ret != (int)journal_read_len){
			LOG_ERROR("align_pread_align_size j->journal_fd (%m) len %d offset %d buf %p begin %d len %d, rbegin %d rend %d offset %d end %d",
					  journal_read_len, journal_file_offset, buf, req->jmeta.begin, req->jmeta.length, rbegin, rend, offset, end);
			req->req_ret = -1;
			goto out;
		}
		buf += journal_read_len;
		offset += journal_read_len;
		len -= journal_read_len;
	}
	req->req_ret = req->jmeta.length;
out:
	req->cb(req);
	end_proc(&j->disk_read);
	return NULL;
}

static int process_io(UrsaXList *io_list, ChunkServerJournal *j)
{
	struct workers_tracker tracker;
	workers_tracker_init(&tracker);
	int wio_nb = 0, rio_nb = 0;
	while(true){
		j->inflight_io_counter = j->io_request_counter;
		j->io_request_counter = 0;
		CAT_LIST(io_list, &j->io_request_list);
		if(IS_EMPTY_LIST(io_list)){
			break;
		}
		// get all request
		NODE_FOR_EACH(io_list, p){
			struct IORequest *req = container_of(p, struct IORequest, list);
			LOG_DEBUG("io_list p %p io_list %p, chunk %08x.%d io_type %d offset %d len %d",
					p, io_list, req->jmeta.volumeid, req->jmeta.index, req->io_type, req->jmeta.begin, req->jmeta.length);
			if(req->io_type == IO_WRITE){
				wio_nb++;
				uint64_t data_offset = 0;
				JMeta* meta_addr = NULL;
				uint32_t cluster_index = 0;
				uint32_t align_len = get_req_write_size(&req->jmeta, j);
				int ret = get_meta_addr_and_data_offset(j, align_len, &meta_addr, &data_offset, &cluster_index);
				if(ret < 0){
					LOG_INFO("get_meta_addr_and_data_offset failed, return -1 here and will redo next");
					st_usleep(100*1000);
					goto out;
				}
				req->meta_addr = meta_addr;
				req->real_file_offset = data_offset;
				req->cluster_index = cluster_index;

				j->journal_meta_count++;

				set_big_journal_size(data_offset);

				DELETE_NODE(p);
				pool_thread_create(th_pool, do_process_wio, req, &tracker);
			} else if (req->io_type == IO_READ) {
				rio_nb++;
				DELETE_NODE(p);
				pool_thread_create(th_pool, do_process_rio, req, &tracker);
			}
			st_sleep(0);
		}
	}
out:
	workers_tracker_wait(&tracker, -1);
	j->inflight_io_counter = 0;
	LOG_DEBUG("processed %d wio and %d rio", wio_nb, rio_nb);
	return 0;
}

static inline void check_pause_io(ChunkServerJournal *j)
{
	j->paused_io = true;
	while(j->pause_io){
		LOG_INFO("j->pause_io is set true, sleep 0.5 second");
		st_usleep(500*1000);
	}
	j->paused_io = false;
}

static void* io_th(void* data)
{
	LOG_INFO("start");
	ChunkServerJournal *j = (ChunkServerJournal *)data;

	int ret = 0;
	while(!j->stop){
		if(j->io_request_counter == 0){
			j->paused_io = true;
			j->io_thread_in_sleep = true;
			ret = st_cond_wait(j->io_thread_cond);
			if(ret < 0){
				LOG_INFO("st_cond_wait ret %d errno %d", ret, errno);
				continue;
			}
			j->io_thread_in_sleep = false;
			check_pause_io(j);
		}
		UrsaXList io_list;
		INIT_URSAX_LIST(io_list);
		while(true){
			process_io(&io_list, j);
			if(IS_EMPTY_LIST(&io_list)){
				break;
			}
			check_pause_io(j);
		}
	}
	LOG_INFO("stop");
	return NULL;
}

int play_back_one_cluster(struct ChunkServerJournal *j, uint64_t cluster_index, uint32_t start, uint32_t end)
{
	uint64_t data_start_index = journal_data_offset(j, cluster_index, 0);
	uint32_t data_index = 0;
	JMeta *jmetas = journal_meta_addr(j, cluster_index, 0);
	for(uint32_t i = 0; i < start; i++){
		if(!jmetas[i].volumeid){
			LOG_ERROR("play back error, cluster %ld index %d volumeid is 0", cluster_index, i);
			return 0;
		}
		uint32_t data_size = get_req_write_size(&jmetas[i], j);
		LOG_DEBUG("jump over(%ld) %08x.%d version %ld offset(%ld) len(%d) datasize(%d)", cluster_index,
					jmetas[i].volumeid, jmetas[i].index, jmetas[i].version, jmetas[i].begin, jmetas[i].length, data_size);
		data_index += data_size;
	}

	if(!jmetas[0].volumeid){
		LOG_INFO("cluster %ld index 0 is empty");
		return -1;
	}

	uint32_t meta_index = 0;
	for(meta_index = start; meta_index < end; meta_index++){
		if(!jmetas[meta_index].volumeid){
			break;
		}
		j->clusters_meta_info[cluster_index].meta_nb++;
		j->clusters_meta_info[cluster_index].done_nb++;
		uint32_t data_size = get_req_write_size(&jmetas[meta_index], j);

		if(jmetas[meta_index].direct_write_chunk){
			clear_range(j->cache_entry, jmetas[meta_index].volumeid, jmetas[meta_index].index,
							jmetas[meta_index].begin, jmetas[meta_index].length,
							jmetas[meta_index].version, jmetas[meta_index].time0);
		}else{
			insert_range(j->cache_entry, jmetas[meta_index].volumeid, jmetas[meta_index].index,
							jmetas[meta_index].begin, jmetas[meta_index].length, data_start_index+data_index,
							jmetas[meta_index].version, jmetas[meta_index].time);
		}
		LOG_DEBUG("play back(%ld) %08x.%d version %ld offset(%ld) len(%d) datasize(%d)",
					cluster_index, jmetas[meta_index].volumeid, jmetas[meta_index].index,
					jmetas[meta_index].version, jmetas[meta_index].begin, jmetas[meta_index].length, data_size);

		data_index += data_size;
		set_big_journal_size(data_index + data_start_index);
	}

	j->file_head->current_using_cluster_index = cluster_index;
	j->file_head->meta_index_in_cluster = meta_index;
	j->file_head->data_index_in_cluster = data_index;
	return 0;
}

static void wait_lock_journal_file(struct ChunkServerJournal *j)
{
	while(true){
		int ret = flock(j->journal_fd, LOCK_EX|LOCK_NB);
		LOG_DEBUG("flock ret %d", ret);
		if(ret < 0){
			st_usleep(100*1000);
			continue;
		}
		return;
	}
}

static void* _play_back_journals(void* _j)
{
	struct ChunkServerJournal *j = (struct ChunkServerJournal *)_j;
	LOG_INFO("start _play_back_journals");
	wait_lock_journal_file(j);
	read_head(j);
	read_all_meta(j);
	LOG_INFO("play back journal write back cluster index is %ld j->file_head->journal_cluster_nb %ld",
				j->file_head->write_back_cluster_index, j->file_head->journal_cluster_nb);
	for(uint64_t cluster_index = j->file_head->write_back_cluster_index; cluster_index < j->file_head->journal_cluster_nb; cluster_index++){
		uint32_t meta_start = 0, meta_end = j->file_head->max_meta_nb_per_cluster;
		if(cluster_index == j->file_head->write_back_cluster_index){
			meta_start = j->file_head->write_back_meta_index;
		}
		int ret = play_back_one_cluster(j, cluster_index, meta_start, meta_end);
		if(ret < 0){
			break;
		}
	}
	LOG_INFO("init_chunkserver_journal run success");
	j->state = STATE_OK;
	begin_write_back_journal(j);
	start_journal_control_thread();
	return 0;
}

int play_back_journals(struct ChunkServerJournal*j)
{
	st_thread_create(_play_back_journals, j, 0, 0);
	return 0;
}

int journal_meta_blocks_do(struct ChunkServerJournal* j, journals_cb_t jcb, void* data)
{
	return 0;
}

uint64_t journal_meta_block_nb(void* _j)
{
	struct ChunkServerJournal* j = (struct ChunkServerJournal*)_j;
	return j->journal_meta_count;
}

void stop_io_thread(struct ChunkServerJournal* j)
{
	j->stop = true;
	if(j->io_thread_in_sleep){
		st_thread_interrupt(j->io_thread);
	}
	st_thread_join(j->io_thread, NULL);
}


#define BIG_JOURNAL_DIR "big-journal"
static inline void check_mkdir(char* ckroot)
{
	char path[PATH_MAX];
	sprintf(path, "%s/" BIG_JOURNAL_DIR, ckroot);
	if(0 != access(path, F_OK)){
		LOG_INFO("mkdir %s", path);
		mkdir(path, 0755);
	}
}

static inline void get_journal_path(char* ckroot, char* path)
{
	sprintf(path, "%s/" BIG_JOURNAL_DIR "/%s", ckroot, JOURANL_FILE_NAME);
}

void rotate_chunkserver_journal(struct ChunkServerJournal* j)
{
	file_rotate(j->journal_file_path);
}

bool check_journal(char* ckroot)
{
	char path[PATH_MAX];
	get_journal_path(ckroot, path);
	int ret = access(path, F_OK);
	if(ret == 0){
		LOG_INFO("big-journal file exist !!!");
		return true;
	}
	LOG_INFO("can't access big journal");
	return false;
}

int create_journal_link(char *ckroot, char* realpath)
{
	char journal_path[PATH_MAX];
	bool exist = check_journal(ckroot);
	if(exist){
		return -1;
	}
	check_mkdir(ckroot);
	get_journal_path(ckroot, journal_path);
	LOG_INFO("symlink %s %s", realpath, journal_path);
	return symlink(realpath, journal_path);
}

int create_journal(char *ckroot)
{
	char journal_path[PATH_MAX];
	bool exist = check_journal(ckroot);
	if(exist){
		return -1;
	}
	check_mkdir(ckroot);
	get_journal_path(ckroot, journal_path);
	return create_journal_with_path(journal_path);
}

extern uint32_t o_direct_size;

int create_journal_with_path(char *journal_path)
{
	int ret = access(journal_path, F_OK);
	if(ret == 0){
		LOG_ERROR("error file exist %s", journal_path);
		return -1;
	}
	uint32_t meta_size = journal_meta_block_size;
	uint64_t block_nb = journal_cluster_nb;

	uint32_t data_size = meta_size / sizeof(JMeta) * SIZE_4K;
	uint64_t file_size_real = block_nb * (meta_size + data_size) + FILE_HEAD_SIZE;
	uint32_t max_meta_nb_per_cluster = meta_size / sizeof(JMeta);

	int journal_fd = open(journal_path, O_RDWR | O_CREAT , 0600);
	if(journal_fd < 0){
		LOG_ERROR("error open journal fd %s, ret %d %m", journal_path, journal_fd);
		return journal_fd;
	}
	struct JFileHead *file_head = (struct JFileHead *)zalloc(sizeof(*file_head));
	file_head->magic = META_MAGIC;
	file_head->file_size = file_size_real;
	file_head->journal_cluster_nb = block_nb;
	file_head->journal_meta_size = meta_size;
	file_head->journal_data_size = data_size;
	file_head->max_meta_nb_per_cluster = max_meta_nb_per_cluster;
	file_head->write_mini_size = o_direct_size;
	LOG_INFO("write_mini_size IS %d", o_direct_size);

	ret = fallocate(journal_fd, 0, 0, file_size_real);
	if(ret < 0){
		LOG_ERROR("error fallocate ret %d %m", ret);
		goto out;
	}
	ret = write(journal_fd, file_head, sizeof(*file_head));
	if(ret != sizeof(*file_head)){
		LOG_ERROR("error write file head, %m");
		ret = -1;
		goto out;
	}

out:
	free(file_head);
	close(journal_fd);
	return ret;
}

int read_all_meta(struct ChunkServerJournal *j)
{
	j->journal_meta_clusters = (JMeta **)zalloc(sizeof(void*) * j->file_head->journal_cluster_nb);
	LOG_DEBUG("journal_data_size %d journal_meta_size %d journal_data_size %d journal_cluster_nb %d",
			j->file_head->journal_data_size, j->file_head->journal_meta_size, j->file_head->journal_data_size, j->file_head->journal_cluster_nb);
	bool meta_read_nil = false;
	uint64_t write_back_index = j->file_head->write_back_cluster_index;
	for(uint64_t i = 0; i < j->file_head->journal_cluster_nb; i++){
		uint64_t offset = journal_meta_offset(j, i, 0);
#ifdef USE_MMAP_META
		j->journal_meta_clusters[i] = (JMeta *)mmap(NULL, j->file_head->journal_meta_size, PROT_WRITE|PROT_READ,
										MAP_SHARED, j->journal_fd, offset);
#else
		j->journal_meta_clusters[i] = ursax_4k_align_alloc(j->file_head->journal_meta_size);
#endif
		if(meta_read_nil && i < write_back_index){
			memset(j->journal_meta_clusters[i], 0, j->file_head->journal_meta_size);
			continue;
		}
#ifndef USE_MMAP_META
		int ret = pread(j->journal_fd, j->journal_meta_clusters[i], j->file_head->journal_meta_size, offset);
		if(ret != j->file_head->journal_meta_size){
			LOG_ERROR("error pread journal file %m");
			return ret;
		}
#endif
		if(j->journal_meta_clusters[i][0].volumeid == 0){
			LOG_INFO("cluster index %ld is 0", i);
			meta_read_nil = true;
		}

		LOG_DEBUG("read index %d addr %p, file offset %ld", i, j->journal_meta_clusters[i], offset);
	}
	return 0;
}

int flush_free_all_meta(struct ChunkServerJournal *j)
{
	for(uint64_t i = 0; i < j->file_head->journal_cluster_nb; i++){
		if(j->clusters_meta_info[i].dirty_meta){
			flush_meta(j, i);
		}
#ifdef USE_MMAP_META
		munmap(j->journal_meta_clusters[i], j->file_head->journal_meta_size);
#else
		free(j->journal_meta_clusters[i]);
#endif
	}
	free(j->journal_meta_clusters);
	j->journal_meta_clusters = NULL;
	return 0;
}

int read_head(struct ChunkServerJournal *j)
{
#ifndef USE_MMAP_META
	int ret = pread(j->journal_fd, j->file_head, FILE_HEAD_SIZE, 0);
	if(ret != FILE_HEAD_SIZE){
		LOG_ERROR("error pread head %m");
		return ret;
	}
#endif
	return 0;
}

int write_head(struct ChunkServerJournal *j)
{
#ifndef USE_MMAP_META
	int ret = ioengine->pwrite(j->journal_fd, j->file_head, FILE_HEAD_SIZE, 0);
	if(ret != FILE_HEAD_SIZE){
		LOG_ERROR("error pwrite head %d %m", ret);
		return ret;
	}
#endif
	return 0;
}

int new_read_head(struct ChunkServerJournal *j)
{
#ifdef USE_MMAP_META
	j->file_head = (struct JFileHead *)mmap(NULL, FILE_HEAD_SIZE, PROT_WRITE|PROT_READ,
						MAP_SHARED, j->journal_fd, 0);
	return 0;
#else
	j->file_head = ursax_4k_align_alloc(FILE_HEAD_SIZE);
	return read_head(j);
#endif
}

int write_del_head(struct ChunkServerJournal *j)
{
#ifdef USE_MMAP_META
	munmap(j->file_head, FILE_HEAD_SIZE);
#else
	write_head(j);
	free(j->file_head);
#endif
	return 0;
}

struct ChunkServerJournal* init_chunkserver_journal(char *ckroot, write_back_jouranl_cb_t wbj_cb)
{
//	register_local_log_level("s.chunkserverj");
	uint32_t meta_size = journal_meta_block_size;
	uint64_t block_nb = journal_cluster_nb;
	uint32_t data_size = meta_size / sizeof(JMeta) * SIZE_4K;
	LOG_INFO("init_chunkserver_journal start meta_size is %d data size is %d block_nb is %d", meta_size, data_size, block_nb);
	check_mkdir(ckroot);
	struct ChunkServerJournal* journal = (struct ChunkServerJournal*)ursax_4k_align_alloc(sizeof(*journal));
	journal->wb_cb = wbj_cb;
	strcpy(journal->ckroot, ckroot);
	journal->state = STATE_INIT;
	Delayer_init(&journal->disk_read, "big-journal disk read");
	Delayer_init(&journal->disk_write, "big-journal disk write");
	Delayer_init(&journal->disk_write_meta, "big-journal write meta");

	INIT_URSAX_LIST(journal->io_request_list);
	get_journal_path(ckroot, journal->journal_file_path);
	journal->journal_fd = open(journal->journal_file_path, O_RDWR | O_CREAT | io_extra_flag, 0600);
	if(journal->journal_fd <= 0){
		LOG_ERROR("error open journal file %s %m", journal->journal_file_path);
		free(journal);
		return NULL;
	}
	journal->is_journal_file_symbol_link = is_symbol_link(journal->journal_file_path);
	int ret = 0;
	uint64_t file_size_old = file_size(journal->journal_fd);
	uint64_t file_size_real = block_nb * (meta_size + data_size) + FILE_HEAD_SIZE;
	uint32_t max_meta_nb_per_cluster = meta_size / sizeof(JMeta);
	if(file_size_old == 0){
		ret = fallocate(journal->journal_fd, 0, 0, file_size_real);
		if(ret < 0){
			LOG_ERROR("error fallocate ret %d %m", ret);
			goto error1;
		}
	}

	ret = new_read_head(journal);
	if(ret < 0){
		goto error1;
	}

	if(file_size_old == 0){
		journal->file_head->magic = META_MAGIC;
		journal->file_head->file_size = file_size_real;
		journal->file_head->journal_cluster_nb = block_nb;
		journal->file_head->journal_meta_size = meta_size;
		journal->file_head->journal_data_size = data_size;
		journal->file_head->max_meta_nb_per_cluster = max_meta_nb_per_cluster;
		journal->file_head->write_mini_size = o_direct_size;
		write_head(journal);
	} else {
		if(journal->file_head->magic != META_MAGIC){
			LOG_FATAL("jouranl file head magic is not right");
			goto error2;
		}
		if(journal->file_head->write_mini_size == 0){
			journal->file_head->write_mini_size = o_direct_size;
		}
	}
	LOG_INFO("write_mini_size is %d", journal->file_head->write_mini_size);

	journal->clusters_meta_info = (clusterRecordInfo *)zalloc(journal->file_head->journal_cluster_nb * sizeof(clusterRecordInfo));
	journal->io_thread_cond = st_cond_new();
	journal->write_chunk_lock = st_mutex_new();
	journal->io_thread = st_thread_create(io_th, journal, 1, 0);
	journal->cache_entry = new_chunk_journal_entry();
	ret = play_back_journals(journal);
	if(ret < 0){
		LOG_ERROR("error play back journals");
		goto error3;
	}
	return journal;
error3:
	del_chunk_journal_entry(journal->cache_entry);
	stop_io_thread(journal);
	st_cond_destroy(journal->io_thread_cond);
error2:
	write_del_head(journal);
error1:
	close(journal->journal_fd);
	free(journal);
	return NULL;
}

void fini_chunkserver_journal(struct ChunkServerJournal* j)
{
	if(!j){
		return;
	}
	end_write_back_journal(j);
	del_chunk_journal_entry(j->cache_entry);
	stop_io_thread(j);
	st_mutex_destroy(j->write_chunk_lock);
	st_cond_destroy(j->io_thread_cond);
	flush_free_all_meta(j);
	write_del_head(j);
	free(j->clusters_meta_info);
	close(j->journal_fd);
	free(j);
	LOG_INFO("fini_chunkserver_journal run success");
}

int reset_journal(struct ChunkServerJournal* j)
{
	LOG_INFO("reset journal cluster index is %ld in cluster index is %d",
				j->file_head->current_using_cluster_index, j->file_head->meta_index_in_cluster);
	del_chunk_journal_entry(j->cache_entry);
	j->file_head->write_back_cluster_index = 0;
	j->file_head->write_back_meta_index = 0;
	j->file_head->current_using_cluster_index = 0;
	j->file_head->meta_index_in_cluster = 0;
	j->file_head->data_index_in_cluster = 0;
	uint64_t file_size_real = j->file_head->file_size;
	char head_buf[FILE_HEAD_SIZE];
	int ret = 0;
	memcpy(head_buf, j->file_head_addr, FILE_HEAD_SIZE);
	ret = flush_free_all_meta(j);
	if(ret < 0){
		return ret;
	}
	ret = write_del_head(j);
	if(ret < 0){
		LOG_FATAL("error munmap ret %d %m", ret);
		return ret;
	}
	ret = ftruncate(j->journal_fd, 0);
	if(ret < 0){
		LOG_FATAL("error ftruncate %m");
		return ret;
	}
	close(j->journal_fd);
	j->journal_fd = open(j->journal_file_path, O_RDWR | O_CREAT | io_extra_flag, 0600);
	if(j->journal_fd < 0){
		LOG_FATAL("error open journal fd %m");
		return ret;
	}
	ret = flock(j->journal_fd, LOCK_EX|LOCK_NB);
	if(ret < 0){
		LOG_ERROR("error flock ret %d %m", ret);
	}

	ret = fallocate(j->journal_fd, 0, 0, file_size_real);
	if(ret < 0){
		LOG_FATAL("error fallocate %m");
		return ret;
	}

	ret = new_read_head(j);
	if(ret < 0){
		return -1;
	}
	memcpy(j->file_head_addr, head_buf, sizeof(head_buf));
	write_head(j);
	j->cache_entry = new_chunk_journal_entry();
	memset(j->clusters_meta_info, 0, j->file_head->journal_cluster_nb * sizeof(clusterRecordInfo));
	ret = read_all_meta(j);
	if(ret < 0){
		return -1;
	}
	set_big_journal_size(0);
	j->write_count = 0;
	return 0;
}

int commit_IO_request(struct ChunkServerJournal* j, struct IORequest *req)
{
	if(!j){
		return -1;
	}
	wait_for_state_ok(j);
	req->j = j;
	INSERT_BEFORE_NODE(&j->io_request_list, &req->list);
	j->io_request_counter++;
	st_cond_signal(j->io_thread_cond);
	return 0;
}

int get_chunk_version_range_from_journal(struct ChunkServerJournal* j, uint32_t volumeid, uint32_t index, uint64_t *start_version, uint64_t *end_version)
{
	if(!j){
		return -1;
	}
	if(current_journal_broken){
		return -1;
	}
	wait_for_state_ok(j);
	j->get_version_request_counter++;
	int ret = get_chunk_version_range(j->cache_entry, volumeid, index, start_version, end_version);
	j->get_version_request_counter--;
	return ret;
}

int get_journal_reference(struct ChunkServerJournal* j)
{
	return j->get_version_request_counter + j->io_request_counter + j->inflight_io_counter;
}
// #define DEBUG_JOURNAL
static int write_back_jmeta_part(struct ChunkServerJournal* j, struct JMeta *jm, char* buf, uint64_t journal_offset, uint32_t start_off, uint32_t write_back_len, bool inc_version)
{
	int ret = align_pread_align_size(j->journal_fd, buf, write_back_len, journal_offset+start_off, NULL, j->file_head->write_mini_size);
	if(ret != (int)write_back_len){
		LOG_ERROR("read journal fd %d offset %ld len %d ret %d %m", j->journal_fd, journal_offset+start_off, write_back_len, ret);
		return -1;
	}
#ifdef DEBUG_JOURNAL
	st_usleep(3000);
#endif
	LOG_DEBUG("ioengine.pwrite chunk %08x.%d version %ld real-file-offset %ld begin %d length %d, start_off is %d checksum %08x",
			jm->volumeid, jm->index, jm->version, journal_offset+start_off, jm->begin, write_back_len, start_off, crc32_n(buf, write_back_len));
	ret = j->wb_cb(jm->volumeid, jm->index, buf, jm->begin+start_off, write_back_len, jm->version, inc_version, false);
	if(ret == ERROR_VERSION){
		LOG_ERROR("j->wb_cb ret %d chunk %08x.%d ERROR_VERSION, skip this chunk, better rm it", ret, jm->volumeid, jm->index);
		return 0;
	}
	if(ret == 1){
		return 0;
	}
	if(ret != (int)write_back_len){
		LOG_ERROR("j->wb_cb ret is %d jm->length is %d offset %ld buf %p", ret, write_back_len, jm->begin+start_off, buf);
		return -1;
	}
	st_sleep(0);
	return 0;
}

int write_back_jmeta_full(struct ChunkServerJournal* j, struct JMeta *jm, char* buf, uint64_t journal_offset)
{
	return write_back_jmeta_part(j, jm, buf, journal_offset, 0, jm->length, true);
}

static inline bool intersect(uint64_t a1, uint64_t a2, uint64_t b1, uint64_t b2)
{
	return !((a2 <= b1) || (b2 <= a1));
}

static int write_back_jmeta_not_covered(struct ChunkServerJournal* j, struct JMeta *jm, char* buf, uint64_t journal_offset)
{
	struct Segment si, *so = NULL;
	int count;
	uint64_t begin, end, len, off;
	int ret = 0;
	uint64_t journal_end;
	lock_chunk_entry(j->cache_entry, jm->volumeid, jm->index);
	if(jm->direct_write_chunk){
		LOG_DEBUG("journal %08x.%d offset %ld len %d is direct_write_chunk return here", jm->volumeid, jm->index, jm->begin, jm->length);
		goto write_version;
	}
#ifdef WRITE_BACK_FULL
	return write_back_jmeta_full(j, jm, buf, journal_offset);
#endif
	si.begin = jm->begin;
	si.end = jm->begin + jm->length;
	journal_end = journal_offset + jm->length;
	ret = lookup_multy(j->cache_entry, jm->volumeid, jm->index, jm->begin, jm->length, &so);
	if(ret <= 0){
		if(jm->length != 0){
			LOG_DEBUG("lookup_multy return ret %d, chunk %08x.%d begin %d len %d version %ld", ret, jm->volumeid, jm->index, jm->begin, jm->length, jm->version);
		}
		goto write_version;
	}
	count = ret;
	LOG_DEBUG("chunk %08x.%d range (%d, %d) %d journal", jm->volumeid, jm->index, jm->begin, jm->length, count);
	for(int i = 0; i < count; i++){
		LOG_DEBUG("journal_offset journal_end (%ld,%ld) logic_pos %ld", journal_offset, journal_end, so[i].logic_pos);
		if(intersect(journal_offset, journal_end, so[i].logic_pos, so[i].logic_pos+1)){
			begin = MAX(si.begin, so[i].begin);
			end = MIN(si.end, so[i].end);
			len = end - begin;
			off = begin - si.begin;
			ret = write_back_jmeta_part(j, jm, buf, journal_offset, off, len, false);
			if(ret < 0){
				goto out;
			}
		}
	}
write_version:
	ret = write_back_jmeta_part(j, jm, buf, journal_offset, 0, 0, true);
out:
	unlock_chunk_entry(j->cache_entry, jm->volumeid, jm->index);
	free(so);
	return ret;
}

static int write_back_jmeta(struct ChunkServerJournal* j, struct JMeta *jm, char* buf, uint64_t journal_offset)
{
	return write_back_jmeta_not_covered(j, jm, buf, journal_offset);
}

static inline int space_used_percent(struct ChunkServerJournal* j)
{
	return j->file_head->current_using_cluster_index * 100 / j->file_head->journal_cluster_nb;
}

struct SmartControlArgs{
	int percent;
	int sleep_utime;
	int last_second_io;
	int io_floor;
	int quota;
	bool reset;
};

struct SmartControlArgs g_io_controls[] = {
		{30, 	1000*1000, 	50, 	1, 	1, 	false},
		{40, 	100*1000, 	50, 	2, 	2, 	false},
		{50, 	100*1000, 	50, 	3, 	4, 	false},
		{60, 	100*1000, 	800, 	4, 	8, 	true},
		{70, 	10*1000, 	1600, 	8, 	16, true},
		{80, 	10*1000, 	6400, 	16, 32, true},
		{100, 	10*1000, 	6400, 	32, 64, true},
};

struct SmartControlArgs* get_control_code(struct ChunkServerJournal* j)
{
	int percent = space_used_percent(j);
	int nb = LEN(g_io_controls);
	for(int i = 0; i < nb; i++){
		if(percent < g_io_controls[i].percent){
			return &g_io_controls[i];
		}
	}
	return &g_io_controls[nb-1];
}

// return n quota to write back
int get_write_back_quota(struct ChunkServerJournal* j)
{
	if(j->stop_write_back){
		return -1;
	}
	if(j->is_journal_file_symbol_link){
		return 1;
	}
	static int current_quota = 0;
	if(current_quota > 0){
		return current_quota--;
	}

	while(true){
		struct SmartControlArgs* c = get_control_code(j);
		LOG_DEBUG("j->io_request_counter + j->inflight_io_counter=%d, lastest_record=%d", j->io_request_counter+j->inflight_io_counter, get_lastest_n_record(j, 1), log_records(j));
		if(j->io_request_counter + j->inflight_io_counter < c->io_floor && get_lastest_n_record(j, 1) < c->last_second_io){
			return c->quota;
		}
		if(j->stop_write_back){
			return -1;
		}
		LOG_DEBUG("st_usleep %d", c->sleep_utime);
		st_usleep(c->sleep_utime);
	}
	return 100;
}

bool check_reset_need(struct ChunkServerJournal* j)
{
	struct SmartControlArgs* c = get_control_code(j);
	return c->reset;
}

int write_back_cluster_range(struct ChunkServerJournal* j, uint64_t cluster_index, uint32_t start_index, uint32_t max_index, bool wait)
{
	LOG_DEBUG("cluster index is %ld/%ld start meta index is %d wait %d", cluster_index, j->file_head->journal_cluster_nb, start_index, wait);
	JMeta *metas = journal_meta_addr(j, cluster_index, 0);
	uint32_t datas_len_before = 0;
	for(uint32_t i = 0; i < start_index; i++){
		datas_len_before += get_req_write_size(&metas[i], j);
	}
	uint64_t datas_start = journal_data_offset(j, cluster_index, datas_len_before);
	uint64_t datas_offset = datas_start;
	int ret = 0;
	for(uint32_t i = start_index; i < max_index; i++){
		if(wait){
			ret = get_write_back_quota(j);
			if(ret < 0){
				break;
			}
		}
		if(metas[i].volumeid == 0){
			LOG_DEBUG("meta index is %d data offset is %d meta size is %d, data size is %d",
						i, datas_offset - datas_start, j->file_head->max_meta_nb_per_cluster, j->file_head->journal_data_size);
			return 0;
		}
		ret = write_back_jmeta(j, &metas[i], wb_buf, datas_offset);
		if(ret < 0){
			break;
		}
		datas_offset += get_req_write_size(&metas[i], j);
		j->file_head->write_back_meta_index = i + 1;
	}
	return ret;
}

int write_back_one_cluster(struct ChunkServerJournal* j, uint64_t cluster_index, uint32_t start_index, bool wait)
{
	return write_back_cluster_range(j, cluster_index, start_index, j->file_head->max_meta_nb_per_cluster, wait);
}

int write_back_last_cluster(struct ChunkServerJournal* j, uint64_t cluster_index, uint32_t start_meta_index, bool wait)
{
	return write_back_cluster_range(j, cluster_index, start_meta_index, j->file_head->meta_index_in_cluster, wait);
}

int write_back_all_and_current(struct ChunkServerJournal* j, uint64_t *cluster_index, uint32_t *start_meta_index, bool wait)
{
	LOG_DEBUG("cluster index is %d start meta index is %d wait %d", *cluster_index, *start_meta_index, wait);
	while(*cluster_index != j->file_head->current_using_cluster_index){
		while(j->clusters_meta_info[*cluster_index].meta_nb != j->clusters_meta_info[*cluster_index].done_nb){
			LOG_DEBUG("index %d meta_nb != done_nb", *cluster_index);
			st_sleep(1);
		}
		int ret = write_back_one_cluster(j, *cluster_index, *start_meta_index, wait);
		if(ret < 0){
			LOG_ERROR("write_back_one_cluster return < 0");
			return ret;
		}
		j->file_head->write_back_meta_index = 0;
		j->file_head->write_back_cluster_index = ++*cluster_index;
		*start_meta_index = 0;
	}
	// write back last cluster
	if(*cluster_index == j->file_head->current_using_cluster_index){ // for unexcepted error
		uint32_t current_canbe_write_back = *start_meta_index;
		JMeta *metas = journal_meta_addr(j, *cluster_index, 0);
		for(int i = *start_meta_index; i < j->file_head->max_meta_nb_per_cluster; i++){
			if(metas[i].volumeid != 0){
				current_canbe_write_back++;
			}else{
				break;
			}
		}
		if(current_canbe_write_back && *start_meta_index != current_canbe_write_back){
			int ret = write_back_cluster_range(j, *cluster_index, *start_meta_index, current_canbe_write_back, wait);
			if(ret < 0){
				LOG_ERROR("write_back_cluster_range return < 0");
			}
			*start_meta_index = j->file_head->write_back_meta_index;
		}
	}

	return 0;
}

void pause_write(struct ChunkServerJournal* j)
{
	j->pause_io = true;
	do{
		LOG_INFO("j->paused_io == false wait");
		st_usleep(100*1000);
	}while(j->paused_io == false);
}

void resume_write(struct ChunkServerJournal* j)
{
	j->pause_io = false;
}

void write_back_all_journal(struct ChunkServerJournal* j)
{
	uint64_t write_back_cluster_index = j->file_head->write_back_cluster_index;
	uint32_t write_back_meta_index = j->file_head->write_back_meta_index;
	int ret = write_back_all_and_current(j, &write_back_cluster_index, &write_back_meta_index, true);
	if(ret < 0){
		return;
	}

	LOG_DEBUG("nearly all journal written backed");
	bool reset = check_reset_need(j);
	if(!reset && !write_back_by_hand){
		return;
	}

	pause_write(j);
	ret = write_back_all_and_current(j, &write_back_cluster_index, &write_back_meta_index, false);
	if(ret < 0){
		goto out;
	}
	write_back_last_cluster(j, write_back_cluster_index, write_back_meta_index, false);
	reset_journal(j);
out:
	resume_write(j);
}

#define FLUSH_INTERVAL (10)
void check_dirty_cluster_and_flush(struct ChunkServerJournal* j, bool flush_head)
{
	static time_t last_time_flush = 0;
	time_t now = time(0);
	if(flush_head){
		write_head(j);
	}
	if(now - last_time_flush < FLUSH_INTERVAL){
		return;
	}
	uint64_t current = j->file_head->current_using_cluster_index;
	if(j->clusters_meta_info[current].dirty_meta){
		j->clusters_meta_info[current].dirty_meta = false;
		flush_meta(j, current);
		last_time_flush = now;
	}
}

static void* smart_write_back_chunk_journal(void* arg)
{
	struct ChunkServerJournal* j = (struct ChunkServerJournal*)arg;
	wb_buf = (char*)ursax_4k_align_alloc(ONE_MB);
	while(!j->stop_write_back){
		uint64_t old_write_count = j->write_count;
		st_usleep(100*1000);
		if(write_back_by_hand){
			LOG_INFO("write_back_by_hand true");
			write_back_all_journal(j);
			write_back_by_hand = false;
			continue;
		}
		if(current_journal_broken){
			break;
		}

		check_dirty_cluster_and_flush(j, false);

		if( old_write_count != j->write_count ||
		    (
		      (j->file_head->current_using_cluster_index || j->file_head->meta_index_in_cluster)
		      && j->file_head->current_using_cluster_index > j->file_head->write_back_cluster_index + 1
			)
		){
			LOG_DEBUG("current_using_cluster_index %d meta_index_in_cluster %d write_back_cluster_index %d", j->file_head->current_using_cluster_index, j->file_head->meta_index_in_cluster, j->file_head->write_back_cluster_index);
			write_back_all_journal(j);
			check_dirty_cluster_and_flush(j, true);
		}
	}
	free(wb_buf);
	return NULL;
}

void begin_write_back_journal(struct ChunkServerJournal* j)
{
	j->wb_thread = st_thread_create(smart_write_back_chunk_journal, j, 1, 0);
}

void end_write_back_journal(struct ChunkServerJournal* j)
{
	j->stop_write_back = true;
	st_thread_join(j->wb_thread, NULL);
}

static void wake(void* data)
{
	struct IORequest *req = (struct IORequest*) data;
	req->done = true;
	if(req->data){
		st_thread_interrupt((st_thread_t)req->data);
	}
}

#define WRITE_DIRECT_TO_CHUNK_LINE (64*1024)
int async_chunk_pwrite(struct ChunkServerJournal* j, struct IORequest *req, uint32_t volumeid, uint32_t index,
						const void *buf, size_t count, off_t offset, uint64_t version, time_t time)
{
	req->jmeta.volumeid = volumeid;
	req->jmeta.index = index;
	req->jmeta.begin = offset;
	req->jmeta.length = count;
	req->jmeta.version = version;
	req->jmeta.time = time;
	req->buf = (void*)buf;
	req->cb = &wake;
	req->emergency_io = !big_journal_delayio;
	req->io_type = IO_WRITE;
	req->done = false;
	if(count >= WRITE_DIRECT_TO_CHUNK_LINE){
		req->jmeta.direct_write_chunk = 1;
	}

	int ret = commit_IO_request(j, req);
	if(ret < 0){
		LOG_ERROR("error commit io request chunk %08x.%d begin %d len %d j %p", volumeid, index, offset, count, j);
		return ret;
	}
	return 0;
}

static ssize_t wait_req_done(struct IORequest *req)
{
	while(!req->done){
		req->data = st_thread_self();
		st_sleep(10);
		if(!req->done){
			LOG_ERROR("error sleep -1 timeout and not done");
		}
	}
	return req->req_ret;
}

int async_chunk_pread(struct ChunkServerJournal* j, struct IORequest *req, be_chunk_pread_t fun, void* chunk,
									uint32_t volumeid, uint32_t index, void *buf, size_t count, off_t offset)
{
	req->jmeta.volumeid = volumeid;
	req->jmeta.index = index;
	req->jmeta.begin = offset;
	req->jmeta.length = count;
	req->buf = (void*)buf;
	req->cb = &wake;
	req->emergency_io = !big_journal_delayio;
	req->io_type = IO_READ;
	req->readfun = fun;
	req->chunk = chunk;
	req->done = false;

	int ret = commit_IO_request(j, req);
	if(ret < 0){
		LOG_ERROR("error commit io request chunk %08x.%d begin %d len %d j %p", volumeid, index, offset, count, j);
		return ret;
	}
	return 0;
}

ssize_t sync_chunk_pread(struct ChunkServerJournal* j, be_chunk_pread_t fun, void* chunk,
						uint32_t volumeid, uint32_t index, void *buf, size_t count, off_t offset)
{
	struct IORequest req;
	int ret = async_chunk_pread(j, &req, fun, chunk, volumeid, index, buf, count, offset);
	if(ret < 0){
		return ret;
	}
	return wait_req_done(&req);
}

ssize_t sync_chunk_pwrite(struct ChunkServerJournal* j, uint32_t volumeid, uint32_t index,
							const void *buf, size_t count, off_t offset, uint64_t version, time_t time)
{
	LOG_DEBUG("chunk %08x.%d offset %ld len %d crc32 %08x", volumeid, index, offset, count, crc32_n(buf, count));
	struct IORequest req;
	int ret = async_chunk_pwrite(j, &req, volumeid, index, buf, count, offset, version, time);
	if(ret < 0){
		return ret;
	}
	ssize_t r = wait_req_done(&req);
#ifdef DEBUG_JOURNAL
	char *shadow_buf = ursax_4k_align_alloc(count);
	sync_chunk_pread(j, fd, volumeid, index, shadow_buf, count, offset);
	int rc = memcmp(buf, shadow_buf, count);
	free(shadow_buf);
	if(rc != 0){
		LOG_ERROR("error this pwrite");
	}
#endif
	return r;
}

ssize_t sync_chunk_write_version(struct ChunkServerJournal* j, uint32_t volumeid, uint32_t index,
								uint32_t chunk_size, uint64_t version)
{
	ALIGIN_MEM(buf, SIZE_4K);
	return sync_chunk_pwrite(j, volumeid, index, buf, 0, chunk_size, version, time(0));
}
