#ifndef __CHUNK_CLIENT__
#define __CHUNK_CLIENT__

#include "protocol.h"
#include "anysocket.h"
#include "networking.h"
#include "log.h"
#include "ursax-crc32.h"
#include "ursax.h"

#ifdef __cplusplus
extern "C" {
#endif

extern uint16_t _opsn;

int _chunk_create(st_netfd_t stfd, struct Request_ChunkCreate* req);
static inline int chunk_create(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t size, bool preallocation, bool incubation, bool checksum, bool ec_chunk)
{
	struct Request_ChunkCreate req;
	setup_header(&req.header, CHUNK_CREATE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.size = size64_convert_to32(size);
	req.flags = 0;
	if (preallocation)
		req.flags |= CHUNK_CREATE_PREALLOCATION;
	if (incubation)
		req.flags |= CHUNK_CREATE_INCUBATION;
	if (checksum)
		req.flags |= CHUNK_CREATE_WITH_CHECKSUM;
	if (ec_chunk)
		req.flags |= CHUNK_CREATE_EC_CHUNK;

	return _chunk_create(stfd, &req);
}

int _chunk_close_fd(st_netfd_t stfd, struct Request_ChunkCloseFd* req);
static inline int chunk_close_fd(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkCloseFd req;
	setup_header(&req.header, CLOSE_CHUNK_FD);
	req.id.volumeid = volumeid;
	req.id.index = index;

	return _chunk_close_fd(stfd, &req);
}

int chunk_list(st_netfd_t stfd, uint32_t volumeid, struct ChunkID** result);

int _chunk_read(st_netfd_t stfd, struct Request_ChunkRead* req, void* buf_new);
static inline int chunk_read(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size)
{
	struct Request_ChunkRead req;
	setup_header(&req.header, CHUNK_READ);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.size = size;
	
	return _chunk_read(stfd, &req, buf);	
}

int _chunk_readv(st_netfd_t stfd, struct Request_ChunkRead* req, struct iovec *iov, uint32_t vec_num);
static inline int chunk_readv(st_netfd_t stfd, uint32_t volumeid, uint32_t index,
							  uint32_t offset, struct iovec *iov, uint32_t vec_num, uint32_t size)
{
	struct Request_ChunkRead req;
	setup_header(&req.header, CHUNK_READ);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.size = size;
	
	return _chunk_readv(stfd, &req, iov, vec_num);
}

int _chunk_readv4(st_netfd_t stfd, struct Request_ChunkRead4* req, struct iovec *iov, uint32_t vec_num);
static inline int chunk_readv4(st_netfd_t stfd, uint32_t volumeid, uint32_t index,
							  uint32_t offset, struct iovec *iov, uint32_t vec_num, uint32_t size, uint64_t version)
{
	struct Request_ChunkRead4 req;
	setup_header(&req.header, CHUNK_READ4);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.size = size;
	req.success_version = version;

	return _chunk_readv4(stfd, &req, iov, vec_num);
}

int _chunk_read3(st_netfd_t stfd, struct Request_ChunkRead3* req, void* buf_new, struct ChecksumInfo *ckinfo);
static inline int chunk_read_checksum(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size)
{
	struct Request_ChunkRead3 req;
	setup_header(&req.header, CHUNK_READ3);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.size = size;

	struct ChecksumInfo ckinfo;
	parse_range(offset, size, &ckinfo);

	return _chunk_read3(stfd, &req, buf, &ckinfo);
}

int _chunk_write(st_netfd_t stfd, struct Request_ChunkWrite* req, void* buf_new);
static inline int chunk_write(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint64_t offset, void* buf, uint64_t size)
{
	struct Request_ChunkWrite req;
	setup_header(&req.header, CHUNK_WRITE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;

	return _chunk_write(stfd, &req, buf);
}

int _chunk_writex(st_netfd_t stfd, struct Request_ChunkWrite* req, void* buf_new, send_cb_t send);
static inline int chunk_writex(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t offset, void* buf, uint32_t size, send_cb_t send)
{
	struct Request_ChunkWrite req;
	setup_header(&req.header, CHUNK_WRITE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;

	return _chunk_writex(stfd, &req, buf, send);
}

int _chunk_writev4(st_netfd_t stfd, struct Request_ChunkWrite4* req, struct iovec *iov, int iov_nb);
static inline int chunk_writev4(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, struct iovec *iov, uint32_t vec_num,
								uint32_t size, uint64_t version, uint32_t flag, time_t client_time)
{
	struct Request_ChunkWrite4 req;
	setup_header(&req.header, CHUNK_WRITE4);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;
	req.flag = flag;
	req.client_time = client_time;

	return _chunk_writev4(stfd, &req, iov, vec_num);
}

static inline int chunk_write4(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t offset, void* buf, uint32_t size, uint32_t flag, time_t client_time)
{
	struct iovec iov;
	iov.iov_base = buf;
	iov.iov_len = size;

	return chunk_writev4(stfd, volumeid, index, offset, &iov, 1, size, version, flag, client_time);
}

int _chunk_write4x(st_netfd_t stfd, struct Request_ChunkWrite4* req, void* buf_new, send_cb_t send);
static inline int chunk_write4x(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t offset, void* buf, uint32_t size, uint32_t flag, time_t client_time, send_cb_t send)
{
	struct Request_ChunkWrite4 req;
	setup_header(&req.header, CHUNK_WRITE4);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;
	req.flag = flag;
	req.client_time = client_time;

	return _chunk_write4x(stfd, &req, buf, send);
}

int _chunk_write_ec(st_netfd_t stfd, struct Request_ChunkWriteEC* req, void* buf_new);
static inline int chunk_write_ec(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t offset, void* buf, uint32_t size)
{
	struct Request_ChunkWriteEC req;
	setup_header(&req.header, CHUNK_WRITE_EC);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;

	return _chunk_write_ec(stfd, &req, buf);
}

typedef int (*wait_confirm_code_t)(void* data, uint32_t index);
int _chunk_write_ec2(st_netfd_t stfd, struct Request_ChunkWriteEC* req, void* buf, wait_confirm_code_t fun, void* data);
static inline int chunk_write_ec2(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t offset, void* buf, uint32_t size,
									wait_confirm_code_t fun, void* data)
{
	struct Request_ChunkWriteEC req;
	setup_header(&req.header, CHUNK_WRITE_EC2);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;
	req.offset = offset;
	req.data.length = size;

	return _chunk_write_ec2(stfd, &req, buf, fun, data);
}

int _chunk_write_ec_parity(st_netfd_t stfd, struct Request_ChunkWriteECParity* req, void* buf_new);
static inline int chunk_write_ec_parity_or_check(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int k, uint32_t offset, void* buf, uint32_t size, uint64_t versions[], uint8_t version_incs[], int cmd)
{
	struct Request_ChunkWriteECParity req;
	setup_header(&req.header, cmd);
	req.id.volumeid = volumeid;
	req.id.index = index;
	memcpy(req.versions, versions, sizeof(req.versions));
	memcpy(req.inc_versions, version_incs, sizeof(req.inc_versions));
	req.k = k;
	req.offset = offset;
	req.data.length = size;

	return _chunk_write_ec_parity(stfd, &req, buf);
}

static inline int chunk_write_ec_parity(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int k, uint32_t offset, void* buf, uint32_t size, uint64_t versions[], uint8_t version_incs[])
{
	return chunk_write_ec_parity_or_check(stfd, volumeid, index, k, offset, buf, size, versions, version_incs, CHUNK_WRITE_EC_PARITY);
}

static inline int chunk_check_ec_parity(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int k, uint32_t offset, uint64_t versions[], uint8_t version_incs[])
{
	return chunk_write_ec_parity_or_check(stfd, volumeid, index, k, offset, NULL, 0, versions, version_incs, CHUNK_WRITE_EC_PARITY_CHECK);
}

int _chunk_update_ec_parity(st_netfd_t stfd, struct Request_ChunkUpdateECParity* req, void* buf_new);
static inline int chunk_update_ec_parity(st_netfd_t stfd, struct ECParityUpdateInfo *info, void *buf, uint32_t size)
{
	LOG_DEBUG("chunk_update_ec_parity volumeid %d index %d offset %d ec_opsn %d current k %d", info->volumeid, info->index, info->offset, info->ec_opsn, info->n_chunks_involved);
	struct Request_ChunkUpdateECParity req;
	setup_header(&req.header, CHUNK_UPDATE_EC_PARITY);
	memcpy(&req.info, info, sizeof(*info));
	req.data.length = size;

	return _chunk_update_ec_parity(stfd, &req, buf);
}

int _chunk_write_ec_data(st_netfd_t stfd, struct Request_ChunkWriteECData* req, void* buf, char *new_replicate, char* new_success_mask);
static inline int chunk_write_ec_data(st_netfd_t stfd, struct ECDataWriteinfo *ec_info, void* buf, uint32_t size, char *new_replicate, char* new_success_mask)
{
	LOG_DEBUG("chunk_write_ec_data volumeid %x index %d offset %d opsn %d ith %d k %d n %d currentk %d ecstart_chunk_index %d data size %d",
			ec_info->id.volumeid, ec_info->id.index, ec_info->offset, ec_info->ec_opsn, ec_info->ith, ec_info->k, ec_info->n, ec_info->n_chunks_involved, 0, size);
	struct Request_ChunkWriteECData req;
	setup_header(&req.header, CHUNK_WRITE_EC_DATA);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = size;

	return _chunk_write_ec_data(stfd, &req, buf, new_replicate, new_success_mask);
}

int _chunk_write_ec_data2(st_netfd_t stfd, Request_ChunkWriteECData2* req, void* buf_new, char *new_replicate, char* new_success_mask);
static inline int chunk_write_ec_data2(st_netfd_t stfd, struct ECDataWriteinfo *ec_info, void* buf, uint32_t size, char *new_replicate, char* new_success_mask)
{
	LOG_DEBUG("chunk_write_ec_data volumeid %x index %d offset %d opsn %d ith %d k %d n %d currentk %d ecstart_chunk_index %d data size %d",
			ec_info->id.volumeid, ec_info->id.index, ec_info->offset, ec_info->ec_opsn, ec_info->ith, ec_info->k, ec_info->n, ec_info->n_chunks_involved, 0, size);
	Request_ChunkWriteECData2 req;
	setup_header(&req.header, CHUNK_WRITE_EC_DATA2);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = size;

	return _chunk_write_ec_data2(stfd, &req, buf, new_replicate, new_success_mask);
}

int _chunk_write_data_to_parity(st_netfd_t stfd, struct Request_ChunkWriteDataToParity* req, void* buf, struct ReWriteD0Record* record);
static inline int chunk_write_data_to_parity(st_netfd_t stfd, struct ECParityUpdateInfo *ec_info, void* buf, struct ReWriteD0Record* record, bool is_d0, uint32_t size)
{
	LOG_DEBUG("chunk_write_data_to_parity volumeid %x index %d offset %d opsn %d ith %d k %d n %d currentk %d ecstart_chunk_index %d data size %d",
				ec_info->volumeid, ec_info->index, ec_info->offset, ec_info->ec_opsn, 0, ec_info->k, ec_info->n, ec_info->n_chunks_involved, 0, size);
	struct Request_ChunkWriteDataToParity req;
	setup_header(&req.header, CHUNK_WRITE_DATA_TO_PARITY);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = size;
	req.is_d0_data = is_d0;
	if(is_d0){
		req.d0_record = *record;
	}

	return _chunk_write_data_to_parity(stfd, &req, buf, record);
}

int _chunk_write_ec_groupdata(st_netfd_t stfd, struct Request_ChunkWriteECGroupData* req, void* buf_new, uint32_t *new_replciate);
static inline int chunk_write_ec_groupdata_(st_netfd_t stfd, struct ECGroupDataWriteInfo *ec_info, void* buf, uint32_t size, uint32_t *new_replciate, int chunk_cmd)
{
	LOG_DEBUG("chunk_write_ec_groupdata volumeid %x index %d offset %ld start %ld k %d n %d size shift %d chunk start inc %d",
			ec_info->id.volumeid, ec_info->id.index, ec_info->offset, ec_info->start, ec_info->k, ec_info->n, ec_info->ec_stripe_size_shift, 0);
	struct Request_ChunkWriteECGroupData req;
	setup_header(&req.header, chunk_cmd);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = size;

	return _chunk_write_ec_groupdata(stfd, &req, buf, new_replciate);
}

static inline int chunk_write_ec_groupdata(st_netfd_t stfd, struct ECGroupDataWriteInfo *ec_info, void* buf, uint32_t size, uint32_t *new_replciate)
{
	return chunk_write_ec_groupdata_(stfd, ec_info, buf, size, new_replciate, CHUNK_WRITE_ECGROUP_DATA);
}

static inline int chunk_write_ec_groupdata2(st_netfd_t stfd, struct ECGroupDataWriteInfo *ec_info, void* buf, uint32_t size, uint32_t *new_replciate)
{
	return chunk_write_ec_groupdata_(stfd, ec_info, buf, size, new_replciate, CHUNK_WRITE_ECGROUP_DATA2);
}

int _chunk_write_ec_on_error(st_netfd_t stfd, struct Request_ChunkWriteECOnErr* req, void* buf_new);
static inline int chunk_write_ec_on_error(st_netfd_t stfd, struct WriteECOnErrInfo *ec_info, void* buf, uint32_t size)
{
	LOG_DEBUG("%s volumeid %x index %d", __func__, ec_info->id.volumeid, ec_info->id.index);
	struct Request_ChunkWriteECOnErr req;
	setup_header(&req.header, CHUNK_WRITE_EC_ON_ERROR);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = size;

	return _chunk_write_ec_on_error(stfd, &req, buf);
}

int _chunk_writev_ec_on_error(st_netfd_t stfd, struct Request_ChunkWriteECOnErr* req, struct iovec *iov, uint32_t niov);
static inline int chunk_writev_ec_on_error(st_netfd_t stfd, struct WriteECOnErrInfo *ec_info, struct iovec *iov, uint32_t niov, uint32_t iov_size_byte)
{
	LOG_DEBUG("%s volumeid %x index %d", __func__, ec_info->id.volumeid, ec_info->id.index);
	struct Request_ChunkWriteECOnErr req;
	setup_header(&req.header, CHUNK_WRITE_EC_ON_ERROR);
	memcpy(&req.info, ec_info, sizeof(*ec_info));
	req.data.length = iov_size_byte;

	int ret = _chunk_writev_ec_on_error(stfd, &req, iov, niov);
	memcpy(ec_info->data_ec_servers, req.info.data_ec_servers, sizeof(ec_info->data_ec_servers));
	return ret;
}

int _chunk_fix_create(st_netfd_t stfd, struct Request_ChunkECFixCreate* req, struct CSEP *cseps);
static inline int chunk_fix_create(st_netfd_t stfd, struct ECFixCreateInfo *ec_info, struct CSEP *cseps)
{
	LOG_DEBUG("chunk_fix_create volumeid %x index %d", ec_info->id.volumeid, ec_info->id.index);
	struct Request_ChunkECFixCreate req;
	setup_header(&req.header, CHUNK_FIX_CREATE);
	memcpy(&req.info, ec_info, sizeof(*ec_info));

	return _chunk_fix_create(stfd, &req, cseps);
}

int _chunk_check_ec(st_netfd_t stfd, struct Request_ChunkCheckParityRight *req, struct CSEP *csep);
static inline int chunk_check_ec(st_netfd_t stfd, int k, int n, uint32_t volumeid, uint32_t index, uint64_t size, struct CSEP *cseps)
{
	struct Request_ChunkCheckParityRight req;
	setup_header(&req.header, CHECK_EC_PARITY_RIGHT);
	req.k = k;
	req.n = n;
	req.chunk_size = size64_convert_to32(size);
	req.start_id.volumeid = volumeid;
	req.start_id.index = index;

	return _chunk_check_ec(stfd, &req, cseps);
}

int _chunk_check_ec2(st_netfd_t stfd, struct Request_ChunkCheckParityRight *req);
static inline int chunk_check_ec2(st_netfd_t stfd, int k, int n, uint32_t volumeid, uint32_t index, uint64_t size)
{
	struct Request_ChunkCheckParityRight req;
	setup_header(&req.header, CHECK_EC_PARITY_RIGHT2);
	req.k = k;
	req.n = n;
	req.chunk_size = size64_convert_to32(size);
	req.start_id.volumeid = volumeid;
	req.start_id.index = index;

	return _chunk_check_ec2(stfd, &req);
}

static inline int chunk_fix_create2(st_netfd_t stfd, struct ECFixCreateInfo *ec_info)
{
	LOG_DEBUG("chunk_fix_create volumeid %x index %d", ec_info->id.volumeid, ec_info->id.index);
	struct Request_ChunkECFixCreate req;
	setup_header(&req.header, CHUNK_FIX_CREATE2);
	memcpy(&req.info, ec_info, sizeof(*ec_info));

	return _chunk_fix_create(stfd, &req, NULL);
}

int _chunk_write3(st_netfd_t stfd, struct Request_ChunkWrite3* req, void* buf_new, uint32_t data_size);
/**
 * if crcs is not NULL, send req and crcs
 * if buf is not NULL, send buf
 * if all NULL, receive data
 */
static inline int chunk_write2(	st_netfd_t stfd,
										struct Request_ChunkWrite3* req,
										void* buf,
										uint32_t size,
										uint32_t* crcs,
										uint32_t crc_nb)
{
	LOG_DEBUG("enter %s buf %p crc %p", __func__, buf, crcs);
	int ret = 0;
	if(crcs){// if crc, send req and crc
		ret = _chunk_write3(stfd, req, crcs, crc_nb * sizeof(uint32_t));
		return ret;
	}
	if(buf){// else send buf only
		if(unlikely((void*)-1 == buf)){
			return 0;
		}
		ret = _chunk_write3(stfd, NULL, buf, size);
		return ret;
	}
	// else receive data
	return _chunk_write3(stfd, req, NULL, 0);
}

// return this to indicate that the client needs to update its replica list
#define NEED_TO_UPDATE_REPLICAS 0x1234
int _chunk_write_replicate(st_netfd_t stfd, struct Request_ChunkWriteReplicate* req, void* buf_new);
static inline int chunk_write_replicate(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size, struct CSEP* endpoints, uint32_t n)
{
	struct Request_ChunkWriteReplicate req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_write_replicate(stfd, &req, buf);
}

int _chunk_writev_replicate(st_netfd_t stfd, struct Request_ChunkWriteReplicate* req, struct iovec *iov, uint32_t vec_num);
static inline int chunk_writev_replicate(st_netfd_t stfd, uint32_t volumeid, uint32_t index,
										 uint32_t offset, struct iovec *iov, uint32_t vec_num,
										 uint32_t size, struct CSEP* endpoints, uint32_t n)
{
	struct Request_ChunkWriteReplicate req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_writev_replicate(stfd, &req, iov, vec_num);
}

int _chunk_write_replicate2(st_netfd_t stfd, struct Request_ChunkWriteReplicate2* req, void* buf_new, uint64_t *ret_version, char *ret_mask);
static inline int chunk_write_replicate2(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size, struct CSEP* endpoints, uint32_t n, uint64_t *ret_version, char *ret_mask)
{
	struct Request_ChunkWriteReplicate2 req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE2);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_write_replicate2(stfd, &req, buf, ret_version, ret_mask);
}

int _chunk_writev_replicate2(st_netfd_t stfd, struct Request_ChunkWriteReplicate2* req, struct iovec *iov, uint32_t vec_num, uint64_t *ret_version, char *ret_mask);
static inline int chunk_writev_replicate2(st_netfd_t stfd, uint32_t volumeid, uint32_t index,
										 uint32_t offset, struct iovec *iov, uint32_t vec_num,
										 uint32_t size, struct CSEP* endpoints, uint32_t n,
										 uint64_t *ret_version, char *ret_mask)
{
	struct Request_ChunkWriteReplicate2 req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE2);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_writev_replicate2(stfd, &req, iov, vec_num, ret_version, ret_mask);
}

int _chunk_writev_replicate4(st_netfd_t stfd, struct Request_ChunkWriteReplicate4* req, struct iovec *iov, uint32_t vec_num, char *ret_mask, uint64_t *ret_version);
static inline int chunk_writev_replicate4(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, struct iovec *iov, uint32_t vec_num, uint32_t size,
											struct CSEP* endpoints, uint32_t n, uint64_t data_version, uint32_t flag, char *ret_mask, uint64_t *ret_version)
{
	struct Request_ChunkWriteReplicate4 req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE4);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	req.data_version = data_version;
	req.flag = flag;
	req.client_time = time(0);
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_writev_replicate4(stfd, &req, iov, vec_num, ret_mask, ret_version);
}

int __chunk_write_replicate3(st_netfd_t stfd, struct Request_ChunkWriteReplicate3* req, void* buf_new, uint32_t* crcs, uint32_t crc_nb);
static inline int _chunk_write_replicate3(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size, struct CSEP* endpoints, uint32_t n,
										struct ChecksumInfo *ckinfo, uint32_t* crcs)
{
	struct Request_ChunkWriteReplicate3 req;
	setup_header(&req.header, CHUNK_WRITE_REPLICATE3);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}
	return __chunk_write_replicate3(stfd, &req, buf, crcs, ckinfo->crc32_nb);
}

void compute_crcs(void* buf_new, uint32_t offset, uint32_t size, uint32_t *crcs, struct ChecksumInfo* ckinfo);
static inline int chunk_write_replicate3(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size, struct CSEP* endpoints, uint32_t n)
{
	struct ChecksumInfo ckinfo;
	uint32_t crcs[4096]; // todo max buf length is 16M, and offset is 512 align

	compute_crcs(buf, offset, size, crcs, &ckinfo);

	return _chunk_write_replicate3(stfd, volumeid, index, offset, buf, size, endpoints, n, &ckinfo, crcs);
}

int _chunk_create_write_replicate(st_netfd_t stfd, struct Request_ChunkCreateWriteReplicate* req, void* buf_new);
static inline int chunk_create_write_replicate(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t offset, void* buf, uint32_t size, struct CSEP* endpoints, uint32_t n, bool preallocation, bool incubation)
{
	struct Request_ChunkCreateWriteReplicate req;
	memset(&req.servers, 0, sizeof(req.servers));
	setup_header(&req.header, CHUNK_CREATE_WRITE_REPLICATE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.offset = offset;
	req.data.length = size;
	req.flags = 0;
	if (preallocation)
		req.flags |= CHUNK_CREATE_PREALLOCATION;
	if (incubation)
		req.flags |= CHUNK_CREATE_INCUBATION;
	memcpy(req.servers, endpoints, n * sizeof(endpoints[0]));
	if (likely(n < MAX_FOLLOWING_SERVERS))
	{
		memset(req.servers + n, 0,  (MAX_FOLLOWING_SERVERS - n) * sizeof(req.servers[0]));
	}

	return _chunk_create_write_replicate(stfd, &req, buf);
}

int chunk_clear_ec_record(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int ith);

int chunk_get_version(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t *version);

int chunk_get_ec_version(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int nb_to_get, uint64_t *versions);

int chunk_flush(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_delete(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_undelete(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_get_journal(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t ip, uint16_t port, uint32_t flag);

#define SHA1_BYTES 20
int chunk_checksum_sha1(st_netfd_t stfd, uint32_t volumeid, uint32_t index, void* buf_new);

int chunk_clone(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_breed(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t since_version, uint32_t ip, uint16_t port);

int chunk_sync_data(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t since_version, uint32_t ip, uint16_t port);

int chunk_get_info(st_netfd_t stfd, uint32_t volumeid, uint32_t index, void *info, uint64_t *chunk_data_size);

int chunk_suspend_write(st_netfd_t stfd, uint32_t volumeid, uint32_t index, bool to_suspend);
int chunk_check_suspend_write(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_notify_new_replica(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t ip, uint16_t port);

int chunk_notify_new_ec_chunk(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t ip, uint16_t port, uint32_t new_index, int m);

int chunk_notify_incubation_done(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version);

int chunk_notify_start_incubation(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t* ret_version);

int chunk_server_terminate(st_netfd_t stfd, int code);
int chunk_server_terminate_no_wait(st_netfd_t stfd, int code);

int chunk_server_ping2(st_netfd_t stfd, uint32_t volumeid, uint32_t index);
int chunk_server_ping(st_netfd_t stfd);
int chunk_server_restart_ping(uint32_t ip, uint16_t port);

int chunk_server_restart(st_netfd_t stfd);

int chunk_rename(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t volumeid1, uint32_t index1);

int chunk_flushjournal(st_netfd_t stfd, bool delete_after_flush);

int chunk_getjounals(st_netfd_t stfd, uint64_t *bytes_len);

int chunk_flush_ec_journal(st_netfd_t stfd, uint32_t volumeid, uint32_t index);

int chunk_get_ec_journals(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t *bytes_len);

#ifdef __cplusplus
}
#endif

#endif
