#include <stdlib.h>
#include <inttypes.h>
#include <string.h>
#include <errno.h>
#include "anysocket.h"
#include "ursax.h"
#include "log.h"
#include "chunk-client.h"
#include "protocol.h"
#include "networking.h"
#include "ursax-crc32.h"
#include "concurrent-connection-pool.h"


uint16_t _opsn = 0;

#define RECV_2ND_PART(stfd, buf, size) RECV_2ND_PART_FAILURE(stfd, buf, size, (void)0)

#define RECV_2ND_PART_FAILURE(stfd, buf, size, failure_cmd) \
	RECV_2ND_PART_FAILURE_TMOUT(stfd, buf, size, failure_cmd, URSAX_TIMEOUT)

#define RECV_2ND_PART_FAILURE_TMOUT(stfd, buf, size, failure_cmd, tmout) {					\
	int ret = st_read_fully(stfd, buf, size, (tmout));										\
	if (unlikely(ret < (int)size))															\
	{																						\
		if (likely(ret < 0)) {																\
			LOG_ERROR("%s() connection error", __func__);									\
		} else {																			\
			LOG_ERROR("received length(%d) < expected(%d)", ret, (int)size);				\
		}																					\
		failure_cmd;																		\
		return MK_CONNECTION_ERROR(-1);														\
	}																						\
}																							\

int _chunk_create(st_netfd_t stfd, struct Request_ChunkCreate* req)
{
	LOG_DEBUG("enter _chunk_create (%08x.%u)", req->id.volumeid, req->id.index);
	struct Response_ChunkCreate resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_close_fd(st_netfd_t stfd, struct Request_ChunkCloseFd* req)
{
	LOG_DEBUG("enter chunk close fd %08x.%d", req->id.volumeid, req->id.index);
	struct Response_ChunkCloseFd resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int chunk_list(st_netfd_t stfd, uint32_t volumeid, struct ChunkID** result)
{
	struct Request_ChunkList req;
	setup_header(&req.header, CHUNK_LIST);
	req.volumeid = volumeid;

	struct Response_ChunkList resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);

	uint32_t size = resp.chunks.count * sizeof(struct ChunkID);
	struct ChunkID* chunks = (struct ChunkID*)malloc(size);
	RECV_2ND_PART_FAILURE(stfd, chunks, size, free(chunks));
	*result = chunks;
	return resp.chunks.count;
}

int _chunk_read(st_netfd_t stfd, struct Request_ChunkRead* req, void* buf)
{
	struct Response_ChunkRead resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if (unlikely(resp.data.length < req->size))
	{
		LOG_ERROR("_chunk_read(): response data length %d < requested size %d", resp.data.length, req->size);
		consume_bytes(stfd, resp.data.length);
		return -1;
	}

	RECV_2ND_PART(stfd, buf, req->size);
	return 0;
}

int _chunk_readv(st_netfd_t stfd, struct Request_ChunkRead* req, struct iovec *iov, uint32_t vec_num)
{
	struct Response_ChunkRead resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if (unlikely(resp.data.length < req->size))
	{
		LOG_ERROR("_chunk_readv(): response data length %d < requested size %d", resp.data.length, req->size);
		consume_bytes(stfd, resp.data.length);
		return -1;
	}

	/**
	 * The reason of copying io vectors:
	 *	st_readv_resid() would change io vector pointer and the array it pointers to.
	 *	This must fail our next try (if we fail here, caller will retry in pooled functions).
	 *	The simplest way to avoid this is to copy vectors.
	 */
	struct iovec iovs[DEFAULT_VECTOR_SIZE];
	struct iovec *iovs_ptr = iovs;
	int num = vec_num;
	memcpy(iovs_ptr, iov, num*sizeof(struct iovec));

	// ret = st_readv_resid(stfd, &iovs_ptr, &num, URSAX_TIMEOUT);
	ret = multy_receive_st_readv(stfd, &iovs_ptr, &num, resp.data.length, URSAX_TIMEOUT);
	if (ret != 0 || num != 0) {
		LOG_ERROR("%s: conn err, errno[%d], incompete[%d]", __func__, errno, num);
		return MK_CONNECTION_ERROR(-1);
	}

	return 0;
}

// todo a copy of _chunk_readv ?
int _chunk_readv4(st_netfd_t stfd, struct Request_ChunkRead4* req, struct iovec *iov, uint32_t vec_num)
{
	struct Response_ChunkRead4 resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if (unlikely(resp.data.length < req->size))
	{
		LOG_ERROR("_chunk_readv(): response data length %d < requested size %d", resp.data.length, req->size);
		consume_bytes(stfd, resp.data.length);
		return -1;
	}

	/**
	 * The reason of copying io vectors:
	 *	st_readv_resid() would change io vector pointer and the array it pointers to.
	 *	This must fail our next try (if we fail here, caller will retry in pooled functions).
	 *	The simplest way to avoid this is to copy vectors.
	 */
	struct iovec iovs[DEFAULT_VECTOR_SIZE];
	struct iovec *iovs_ptr = iovs;
	int num = vec_num;
	memcpy(iovs_ptr, iov, num*sizeof(struct iovec));

	// ret = st_readv_resid(stfd, &iovs_ptr, &num, URSAX_TIMEOUT);
	ret = multy_receive_st_readv(stfd, &iovs_ptr, &num, resp.data.length, URSAX_TIMEOUT);
	if (ret != 0 || num != 0) {
		LOG_ERROR("%s: conn err, errno[%d], incompete[%d]", __func__, errno, num);
		return MK_CONNECTION_ERROR(-1);
	}

	return 0;
}

// 0 data is ok
// -1 wrong data
void compute_crcs(void* buf, uint32_t offset, uint32_t size, uint32_t *crcs, struct ChecksumInfo* ckinfo)
{
	uint32_t crcs_nb = 0;
	parse_range(offset, size, ckinfo);

#ifdef ZERO_FILL_CRC
	char block_size_buf[CRC32_BLOCK_SIZE]; // todo CRC32_BLOCK_SIZE should not be too large
#endif
	uint32_t one_crc;
	uint32_t crc_buf_start = 0;
	if(ckinfo->pre_len){
		uint32_t crc_size = MIN(CRC32_BLOCK_SIZE - ckinfo->pre_len, (int)size);
#ifdef ZERO_FILL_CRC
		memset(block_size_buf, 0, CRC32_BLOCK_SIZE);
		memcpy(block_size_buf + ckinfo->pre_len, buf + crc_buf_start, crc_size);
		crc32_4K_sequence(block_size_buf, CRC32_BLOCK_SIZE, &one_crc);
#else
		one_crc = crc32_n((void*)((uint64_t)buf + crc_buf_start), crc_size);
#endif
		crcs[crcs_nb++] = one_crc;
		crc_buf_start += crc_size;
	}
	if(ckinfo->mid_len){
		crc32_4K_sequence((char*)buf + crc_buf_start, ckinfo->mid_len,
				&crcs[crcs_nb]);
		crcs_nb += (ckinfo->mid_len / CRC32_BLOCK_SIZE);
		crc_buf_start += ckinfo->mid_len;
	}
	if(ckinfo->suf_len){
#ifdef ZERO_FILL_CRC
		memset(block_size_buf, 0, CRC32_BLOCK_SIZE);
		memcpy(block_size_buf, buf + crc_buf_start, CRC32_BLOCK_SIZE - ckinfo->suf_len);
		crc32_4K_sequence(block_size_buf, CRC32_BLOCK_SIZE, &one_crc);
#else
		one_crc = crc32_n((void*)((uint64_t)buf + crc_buf_start),
				CRC32_BLOCK_SIZE - ckinfo->suf_len);
#endif
		crcs[crcs_nb++] = one_crc;
	}
}

int _chunk_read3(st_netfd_t stfd, struct Request_ChunkRead3* req, void* buf, struct ChecksumInfo *ckinfo)
{
	struct Response_ChunkRead3 resp;
	int ret = operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if (unlikely(resp.data.length != req->size))
	{
		LOG_ERROR("_chunk_read_checksum(): response data length %d < requested size %d", resp.data.length, req->size);
		consume_bytes(stfd, resp.data.length + sizeof(uint32_t) * ckinfo->crc32_nb);
		return MK_CONNECTION_ERROR(-1);
	}
	uint32_t crcs_receive[4096]; // todo max buf length is 16M, and offset is 512 align
	LOG_DEBUG("receive %d crc", ckinfo->crc32_nb);
	LOG_DEBUG("resp data length is %d", resp.data.length);
	RECV_2ND_PART(stfd, crcs_receive, sizeof(uint32_t) * ckinfo->crc32_nb);
	RECV_2ND_PART(stfd, buf, resp.data.length);

	struct ChecksumInfo __ckinfo;
	uint32_t crcs_by_calc[4096]; // todo max buf length is 16M, and offset is 512 align
	compute_crcs(buf, req->offset, req->size, crcs_by_calc, &__ckinfo);

	if(0 != memcmp(crcs_by_calc, crcs_receive, sizeof(uint32_t) * ckinfo->crc32_nb)){
		LOG_ERROR("received data's crc not match the data");
		return MK_CONNECTION_ERROR(-1); // todo connection err ?
	}

	return 0;
}

int _chunk_write(st_netfd_t stfd, struct Request_ChunkWrite* req, void* buf)
{
	struct Response_ChunkWrite resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_writex(st_netfd_t stfd, struct Request_ChunkWrite* req, void* buf, send_cb_t send)
{
	struct Response_ChunkWrite resp;
	int ret = operation_send2x_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp), send);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write4(st_netfd_t stfd, struct Request_ChunkWrite4* req, void* buf)
{
	struct Response_ChunkWrite4 resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_writev4(st_netfd_t stfd, struct Request_ChunkWrite4* req, struct iovec *iov, int iov_nb)
{
	struct Response_ChunkWrite4 resp;
	int ret = operation_sendv_recv(stfd, req, sizeof(*req), iov, iov_nb, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write4x(st_netfd_t stfd, struct Request_ChunkWrite4* req, void* buf, send_cb_t send)
{
	struct Response_ChunkWrite4 resp;
	int ret = operation_send2x_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp), send);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_update_ec_parity(st_netfd_t stfd, struct Request_ChunkUpdateECParity* req, void* buf)
{
	struct Response_ChunkUpdateECParity resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write_ec_data(st_netfd_t stfd, struct Request_ChunkWriteECData* req, void* buf, char *new_replicate, char* new_success_mask)
{
	struct Response_ChunkWriteECData resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	*new_success_mask = resp.success_mask;
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if(resp.new_replicate){
		*new_replicate = resp.new_replicate;
		return NEED_TO_UPDATE_REPLICAS;
	}
	return 0;
}

int _chunk_write_ec_groupdata(st_netfd_t stfd, struct Request_ChunkWriteECGroupData* req, void* buf, uint32_t *new_replicate)
{
	struct Response_ChunkWriteECGroupData resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	*new_replicate = resp.new_replicate;
	return 0;
}

int _chunk_write_ec_data2(st_netfd_t stfd, Request_ChunkWriteECData2* req, void* buf, char *new_replicate, char* new_success_mask)
{
	Response_ChunkWriteECData2 resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	*new_success_mask = resp.success_mask;
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	if(resp.new_replicate){
		*new_replicate = resp.new_replicate;
		return NEED_TO_UPDATE_REPLICAS;
	}
	return 0;
}

int _chunk_write_data_to_parity(st_netfd_t stfd, struct Request_ChunkWriteDataToParity* req, void* buf, struct ReWriteD0Record* record)
{
	struct Response_ChunkWriteDataToParity resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	if(resp.retcode == ERROR_DATA){
		*record = resp.d0_record;
	}
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_fix_create(st_netfd_t stfd, struct Request_ChunkECFixCreate* req, struct CSEP *cseps)
{
	struct Response_ChunkECFixCreate resp;
	int m = req->info.k + req->info.n;
	int ret = cseps ?
			 operation_send2_recv(stfd, req, sizeof(*req), cseps, sizeof(*cseps)*m, &resp, sizeof(resp)):
			 operation_send_recv(stfd, req, sizeof(*req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_check_ec(st_netfd_t stfd, struct Request_ChunkCheckParityRight *req, struct CSEP *csep)
{
	struct Response_ChunkCheckParityRight resp;
	int m = req->k + req->n;
	int ret = operation_send2_recv_with_timeout(stfd, req, sizeof(*req), csep, m*sizeof(struct CSEP), &resp, sizeof(resp), URSAX_TIMEOUT*6);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_check_ec2(st_netfd_t stfd, struct Request_ChunkCheckParityRight *req)
{
	struct Response_ChunkCheckParityRight resp;
	int ret = operation_send_recv_with_timeout(stfd, req, sizeof(*req), &resp, sizeof(resp), URSAX_TIMEOUT*6);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write_ec_on_error(st_netfd_t stfd, struct Request_ChunkWriteECOnErr* req, void* buf)
{
	struct Response_ChunkWriteECOnErr resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_writev_ec_on_error(st_netfd_t stfd, struct Request_ChunkWriteECOnErr* req, struct iovec *iov, uint32_t niov)
{
	struct Response_ChunkWriteECOnErr resp;
	int ret = operation_sendv_recv(stfd, req, sizeof(*req), iov, niov, req->data.length, &resp, sizeof(resp));
	memcpy(req->info.data_ec_servers, resp.data_ec_servers, sizeof(resp.data_ec_servers));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write3(st_netfd_t stfd, struct Request_ChunkWrite3* req, void* buf, uint32_t data_size)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWrite3 resp;
	int ret = 0;

	// if buf is NULL, receive data
	if(!buf){
		LOG_DEBUG("buf is NULL, receive data");
		flush_tcp(stfd);
		ret = st_read_fully(stfd, &resp, sizeof(resp), URSAX_TIMEOUT);
		CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
		return 0;
	}

	// if buf not NULL, but req is NULL, just send buf
	if(!req){
		LOG_DEBUG("req is NULL but buf not NULL, data size si %d", data_size);
		ret = st_write(stfd, buf, data_size, URSAX_TIMEOUT);
		if(ret < (int)data_size){
			log_socket_error(stfd, "write");
			return -1;
		}
		flush_tcp(stfd);
		return 0;
	}

	// req is not NULL, and buf not NULL ,send both
	LOG_DEBUG("req is not NULL , data size is %d", data_size);
	struct iovec iov[2];
	iov[0].iov_base = req;
	iov[0].iov_len = sizeof(*req);
	iov[1].iov_base = buf ? buf : req;
	iov[1].iov_len = data_size;
	int len = st_writev(stfd, iov, 2, URSAX_TIMEOUT);
	if(len < (int)(sizeof(*req) + data_size)){
		log_socket_error(stfd, "writev");
		return -1;
	}
	return 0;
}

int _chunk_write_replicate(st_netfd_t stfd, struct Request_ChunkWriteReplicate* req, void* buf)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_writev_replicate(st_netfd_t stfd, struct Request_ChunkWriteReplicate* req, struct iovec *iov, uint32_t vec_num)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate resp;
	int ret = operation_sendv_recv(stfd, req, sizeof(*req), iov, vec_num, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_write_replicate2(st_netfd_t stfd, struct Request_ChunkWriteReplicate2* req, void* buf, uint64_t *ret_version, char *ret_mask)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate2 resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	*ret_version = resp.current_version;
	memcpy(ret_mask, resp.ret_values, sizeof(resp.ret_values));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_writev_replicate2(st_netfd_t stfd, struct Request_ChunkWriteReplicate2* req, struct iovec *iov, uint32_t vec_num, uint64_t *ret_version, char *ret_mask)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate2 resp;
	int ret = operation_sendv_recv(stfd, req, sizeof(*req), iov, vec_num, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	*ret_version = resp.current_version;
	memcpy(ret_mask, resp.ret_values, sizeof(resp.ret_values));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_writev_replicate4(st_netfd_t stfd, struct Request_ChunkWriteReplicate4* req, struct iovec *iov, uint32_t vec_num, char *ret_mask, uint64_t *ret_version)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate4 resp;
	int ret = operation_sendv_recv(stfd, req, sizeof(*req), iov, vec_num, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	*ret_version = resp.chunk_version;
	if(ret_mask){
		memcpy(ret_mask, resp.ret_values, sizeof(resp.ret_values));
	}
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int __chunk_write_replicate3(st_netfd_t stfd, struct Request_ChunkWriteReplicate3* req, void* buf, uint32_t* crcs, uint32_t crc_nb)
{
	LOG_DEBUG("enter %s", __func__);
	struct Response_ChunkWriteReplicate3 resp;
	int ret = operation_send3_recv(stfd, req, sizeof(*req), crcs, crc_nb * sizeof(uint32_t), buf, req->data.length, &resp, sizeof(resp));
	LOG_DEBUG("receive %s", __func__);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_create_write_replicate(st_netfd_t stfd, struct Request_ChunkCreateWriteReplicate* req, void* buf)
{
	struct Response_ChunkCreateWriteReplicate resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	ret = unlikely(resp.need_to_update_replicas) ? NEED_TO_UPDATE_REPLICAS : 0;
	return MK_OPERATION_ERROR(ret);
}

int _chunk_write_ec(st_netfd_t stfd, struct Request_ChunkWriteEC* req, void* buf)
{
	struct Response_ChunkWriteEC resp;
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write_ec2(st_netfd_t stfd, struct Request_ChunkWriteEC* req, void* buf, wait_confirm_code_t fun, void* data)
{
	struct Response_ChunkWriteEC resp;
	int ret = operation_send2(stfd, req, sizeof(*req), buf, req->data.length, URSAX_TIMEOUT);
	if(ret < 0){
		LOG_ERROR("error operation_send2");
		return MK_CONNECTION_ERROR(ret);
	}
	flush_tcp(stfd);
	uint32_t confirm_ret = fun(data, req->id.index);
	int len = st_write(stfd, &confirm_ret, sizeof(confirm_ret), URSAX_TIMEOUT);
	if(len < (int)sizeof(confirm_ret)){
		LOG_ERROR("error st_write confirm code %d", len);
		return MK_CONNECTION_ERROR(len);
	}
	flush_tcp(stfd);

	ret = st_read_fully(stfd, &resp, sizeof(resp), URSAX_TIMEOUT);
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int _chunk_write_ec_parity(st_netfd_t stfd, struct Request_ChunkWriteECParity* req, void* buf)
{
	struct Response_ChunkWriteECParity resp;
	char buf_tmp = 0;
	if(req->data.length == 0){
		buf = &buf_tmp;
	}
	int ret = operation_send2_recv(stfd, req, sizeof(*req), buf, req->data.length, &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, req, resp, stfd);
	return 0;
}

int chunk_get_version(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t *version)
{
	struct Request_ChunkGetVersion req;
	setup_header(&req.header, CHUNK_GET_VERSION);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkGetVersion resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	if(version)
		*version = resp.version;
	return 0;
}

int chunk_get_ec_version(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int nb_to_get, uint64_t *versions)
{
	struct Request_ChunkGetECVersion req;
	setup_header(&req.header, CHUNK_GET_EC_VERSION);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.nb_to_get = nb_to_get;

	struct Response_ChunkGetECVersion resp;
	int ret = operation_send_recv_with_timeout(stfd, &req, sizeof(req), &resp, sizeof(resp), 1*1000*1000);
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	memcpy(versions, resp.versions, nb_to_get*sizeof(versions[0]));
	return 0;
}

int chunk_delete(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkDelete req;
	setup_header(&req.header, CHUNK_DELETE);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkDelete resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_undelete(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkUndelete req;
	setup_header(&req.header, CHUNK_UNDELETE);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkUndelete resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_flush(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkFlush req;
	setup_header(&req.header, CHUNK_FLUSH);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkDelete resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_checksum_sha1(st_netfd_t stfd, uint32_t volumeid, uint32_t index, void* buf)
{
	struct Request_ChunkChecksum req;
	setup_header(&req.header, CHUNK_CHECKSUM);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.algorithm = 0;

	struct Response_ChunkChecksum resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	if (resp.hash.length != 20)
	{
		consume_bytes(stfd, resp.hash.length);
		LOG_ERROR("received SHA1 length isn't 20");
		return -1;
	}

	RECV_2ND_PART(stfd, buf, resp.hash.length);
	return 0;
}

int chunk_clone(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkClone req;
	setup_header(&req.header, CHUNK_CLONE);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkClone resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_breed(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t since_version, uint32_t ip, uint16_t port)
{
	struct Request_ChunkBreed req;
	setup_header(&req.header, CHUNK_BREED);
	req.since_version = since_version;
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.to.ip = ip;
	req.to.port = port;

	struct Response_ChunkBreed resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_sync_data(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t since_version, uint32_t ip, uint16_t port)
{
	struct Request_ChunkSyncData req;
	setup_header(&req.header, CHUNK_SYNC_DATA);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.src_chunk.ip = ip;
	req.src_chunk.port = port;
	memset(&req.sync_extra_info, 0, sizeof(req.sync_extra_info));
	(void)since_version;

	struct Response_ChunkSyncData resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_get_info(st_netfd_t stfd, uint32_t volumeid, uint32_t index, void *info, uint64_t *chunk_data_size)
{
	struct Request_ChunkGetInfo req;
	setup_header(&req.header, CHUNK_GET_INFO);
	req.id.volumeid = volumeid;
	req.id.index = index;


	struct Response_ChunkGetInfo resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	*chunk_data_size = size32_convert_to64(resp.chunk_data_size);
	RECV_2ND_PART(stfd, info, resp.data.length);
	return 0;
}

int chunk_get_journal(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version, uint32_t ip, uint16_t port, uint32_t flag)
{
	struct Request_ChunkGetJournal req;
	setup_header(&req.header, CHUNK_GET_JOURNAL);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.since_version = version;
	req.breed_to.ip = ip;
	req.breed_to.port = port;
	req.flag = flag;

	struct Response_ChunkGetJournal resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_suspend_write(st_netfd_t stfd, uint32_t volumeid, uint32_t index, bool to_suspend)
{
	struct Request_ChunkSuspendWrite req;
	setup_header(&req.header, CHUNK_SUSPEND_WRITE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.to_suspend = to_suspend;

	struct Response_ChunkSuspendWrite resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_check_suspend_write(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkSuspendWrite req;
	setup_header(&req.header, CHUNK_CHECK_SUSPEND);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.to_suspend = 0;

	struct Response_ChunkSuspendWrite resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_notify_new_ec_chunk(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t ip, uint16_t port, uint32_t new_index, int m)
{
	struct Request_ChunkNotifyNewECChunk req;
	setup_header(&req.header, CHUNK_NOTIFY_NEW_EC);
	req.new_id.volumeid = req.id.volumeid = volumeid;
	req.id.index = index;
	req.new_id.index = new_index;
	req.new_addr.ip = ip;
	req.new_addr.port = port;
	req.m = m;

	struct Response_ChunkNotifyNewECChunk resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_notify_new_replica(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t ip, uint16_t port)
{
	struct Request_NotifyNewReplica req;
	setup_header(&req.header, CHUNK_NOTIFY_NEW_REPLICA);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.server.ip = ip;
	req.server.port = port;

	struct Response_NotifyNewReplica resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_notify_incubation_done(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t version)
{
	struct Request_NotifyIncubationDone req;
	setup_header(&req.header, CHUNK_INCUBATION_DONE);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.version = version;

	struct Response_NotifyIncubationDone resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_notify_start_incubation(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t* ret_version)
{
	struct Request_StartIncubation req;
	setup_header(&req.header, CHUNK_START_INCUBATION);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_StartIncubation resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	*ret_version = resp.current_version;
	return 0;
}

int chunk_server_terminate_no_wait(st_netfd_t stfd, int code)
{
	struct Request_ChunkServerTerminate req;
	setup_header(&req.header, CHUNK_SERVER_TERMINATE);
	req.code = code;

	st_write(stfd, &req, sizeof(req), URSAX_TIMEOUT);
	return 0;
}

int chunk_server_terminate(st_netfd_t stfd, int code)
{
	struct Request_ChunkServerTerminate req;
	setup_header(&req.header, CHUNK_SERVER_TERMINATE);
	req.code = code;

	struct Response_ChunkServerTerminate resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_clear_ec_record(st_netfd_t stfd, uint32_t volumeid, uint32_t index, int ith)
{
	struct Request_ChunkClearEcRecord req;
	setup_header(&req.header, CHUNK_CLEAR_EC_RECORD);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.ith = ith;

	struct Response_ChunkClearEcRecord resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_server_ping(st_netfd_t stfd)
{
	struct Request_ChunkServerPing req;
	setup_header(&req.header, CHUNK_SERVER_PING);
	req.code = (rand() << 16) | rand();

	struct Response_ChunkServerPing resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);

	char buf[64];
	RECV_2ND_PART(stfd, buf, resp.code.length);
	buf[resp.code.length] = 0;

	char* endptr;
	long code = strtol(buf, &endptr, 10);
	if (endptr != buf + resp.code.length || (int)code != req.code)
	{
		LOG_ERROR("received code (%s) doesn't match requested (%d)!", buf, req.code);
		return -1;
	}

	return 0;
}

int
chunk_server_restart_ping(uint32_t ip, uint16_t port)
{
	st_netfd_t stfd;
	MAKE_FD_ELASTIC(
			stfd = ursax_connect(ip, port, true),
			"unable to connect to new chunkserver",
			(-1)
	);

	struct Request_ChunkServerPing req;
	setup_header(&req.header, CHUNK_SERVER_PING);
	req.code = (rand() << 16) | rand();

	struct Response_ChunkServerPing resp;
	uint32_t tmout = 1000 * 1000; /**< 1 second */
	int ret = operation_send_recv_tmout(stfd, &req, sizeof(req),
											   &resp, sizeof(resp), tmout, tmout);

	if (ret < 0) {
		LOG_ERROR("[%s:%d] faild to ping, errno: %d, %s",
				  __func__, __LINE__, errno, strerror(errno));
		st_netfd_close(stfd);
		return ret;
	}

	char buf[64];
	RECV_2ND_PART_FAILURE_TMOUT(stfd, buf, resp.code.length, (void)0, tmout);
	buf[resp.code.length] = 0;

	char* endptr;
	long code = strtol(buf, &endptr, 10);
	if (endptr != buf + resp.code.length || (int)code != req.code) {
		LOG_ERROR("[%s:%d] received code (%s) doesn't match requested (%d)!",
				  __func__, __LINE__, buf, req.code);
		ret = -1;
		goto out;
	}

out:
	if (NULL != stfd) {
		st_netfd_close(stfd);
	}
	return (0 > ret) ? -1 : 0;
}

int chunk_server_restart(st_netfd_t stfd)
{
	struct Request_ChunkServerRestart req;
	setup_header(&req.header, CHUNK_SERVER_RESTART);
	req.code = rand();

	struct Response_ChunkServerRestart resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);

	char buf[64];
	RECV_2ND_PART(stfd, buf, resp.msg.length);
	buf[resp.msg.length] = 0;

	char* endptr;
	long code = strtol(buf, &endptr, 10);
	if (endptr != buf + resp.msg.length || (int)code != req.code)
	{
		LOG_ERROR("[%s:%d] received code (%s) doesn't match requested (%d)!",
											__func__, __LINE__, buf, req.code);
		return -1;
	}

	return 0;
}

int chunk_rename(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint32_t volumeid1, uint32_t index1)
{
	struct Request_ChunkRename req;
	setup_header(&req.header, CHUNK_RENAME);
	req.id.volumeid = volumeid;
	req.id.index = index;
	req.id1.volumeid = volumeid1;
	req.id1.index = index1;

	struct Response_ChunkRename resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_flushjournal(st_netfd_t stfd, bool delete_after_flush)
{
	struct Request_ChunkFlushJournal req;
	setup_header(&req.header, CHUNK_FLUSH_JOURNAL);
	if(delete_after_flush){
		req.flag = DELETE_AFTER_FLUSH;
	}

	struct Response_ChunkFlushJournal resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_getjounals(st_netfd_t stfd, uint64_t *bytes_len)
{
	struct Request_ChunkGetBigJournalMeta req;
	setup_header(&req.header, CHUNK_GET_BIG_JOURNAL_META);

	struct Response_ChunkGetBigJournalMeta resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	*bytes_len = resp.msg.length;
	return 0;
}

int chunk_flush_ec_journal(st_netfd_t stfd, uint32_t volumeid, uint32_t index)
{
	struct Request_ChunkFlushECJournal req;
	setup_header(&req.header, CHUNK_FLUSH_EC_JOURNAL);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkFlushECJournal resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	return 0;
}

int chunk_get_ec_journals(st_netfd_t stfd, uint32_t volumeid, uint32_t index, uint64_t *bytes_len)
{
	struct Request_ChunkGetEcJournal req;
	setup_header(&req.header, CHUNK_GET_EC_JOURNAL_META);
	req.id.volumeid = volumeid;
	req.id.index = index;

	struct Response_ChunkGetEcJournal resp;
	int ret = operation_send_recv(stfd, &req, sizeof(req), &resp, sizeof(resp));
	CHECK_ERROR_LOG_RETURN(ret, &req, resp, stfd);
	*bytes_len = resp.msg.length;
	return 0;
}
