//
// Created by root on 8/3/18.
//

#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif

#define USE_LOCAL_LOG_LEVEL

//#include <bits/mman.h>
#include <sys/mman.h>
#include <exception>
#include "log.h"
#include "ursax.h"
#include "operations.h"
#include "operations-common.h"
#include "chunkserver-journal/chunkserver-journal.h"

#define CHECK_REQUEST_MANAGER_ERROR(ret)		\
	{																										\
		int error;																							\
		if ((error = CONNECTION_ERROR(ret)))																\
		{																									\
			LOG_ERROR("failed to connect (%d) to manager when reporting chunk server error", error);		\
		}																									\
		else if ((error = OPERATION_ERROR(ret)))															\
		{																									\
			LOG_ERROR("chunk server error reported to manager, but manager returned another error (%d)", error);\
		}																										\
	}

MemoryPool* mempool = NULL;

extern struct ChunkServerInfo *g_csi;
extern bool current_journal_broken;

#define THREAD_NUM 600
struct thread_pool* th_pool = NULL;
Delayer local_chunk_write_counter;
Delayer read_delay;

int choose_index = 0;

bool use_no_sendfile = false;

struct concurrent_write_task{
    struct Chunk* chunk;
    uint64_t version;
    uint64_t offset;
    uint32_t len;
    void *buf;
    struct CSEP* servers;
    int remote_fault[MAX_FOLLOWING_SERVERS];
    int nfaults;
    int task_index;
    uint32_t flag;
    time_t client_time;
    concurrent_write_task(){
        memset(this, 0, sizeof(*this));
    }
};

static void* mempool_ctor()
{
    return memalign(MEMPOOL_ALIGNMENT, MMAPED_BUFFER_SIZE);
}

static void mempool_dtor(void* p)
{
    free(p);
}

static void* mapped_mempool_ctor()
{
    MMapedBuf* buf = NULL;
    char path[PATH_MAX];
    sprintf(path, SHM_PATH"/tmp_file_invisible.%d", getpid());
    int fd = open(path, O_RDWR | O_EXCL | O_CREAT | O_CLOEXEC, 0600);
    if(fd <= 0){
        LOG_ERROR("error open file %s (%m) use tradition buf", path);
        normal_buf:
        buf =(MMapedBuf*)mempool_ctor();
        buf->fd = 0;
        return buf;
    }
    unlink(path);
    int ret = ftruncate(fd, MMAPED_BUFFER_SIZE);
    // int ret = fallocate(fd, 0, 0, MMAPED_BUFFER_SIZE);
    if(ret < 0){
        LOG_ERROR("error truncate file %s %m", path);
        close(fd);
        goto normal_buf;
    }
    buf = (MMapedBuf*)mmap(NULL, MMAPED_BUFFER_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
    if(buf == MAP_FAILED){
        LOG_ERROR("map fd failed %m %d", fd);
        close(fd);
        goto normal_buf;
    }
    // mmap success
    buf->fd = fd;
    return buf;
}

static void mapped_mempool_dtor(void* p)
{
    MMapedBuf* buf = (MMapedBuf*)p;
    if(buf->fd == 0){
        mempool_dtor(p);
        return;
    }
    close(buf->fd);
    buf->fd = 0;
    munmap(buf, MMAPED_BUFFER_SIZE);
}

void chunk_operations_init()
{
//    register_local_log_level("s.operations");
    mempool = new IdentityPool(100, &mapped_mempool_ctor, &mapped_mempool_dtor,
            MMAPED_BUFFER_SIZE);
    th_pool = thread_pool_new(THREAD_NUM);
//    chunk_operations_ec_init();
//    chunk_operations_ec_new_init();
    Delayer_init(&local_chunk_write_counter, "local chunk write");
    Delayer_init(&read_delay, "read delay");
}

void chunk_operations_fini()
{
    delete mempool;
    thread_pool_delete(th_pool);
//    chunk_operations_ec_new_fini();
//    chunk_operations_ec_fini();
}

int op_chunk_read(st_netfd_t stfd, void* arg)
{
    return op_chunk_fake(stfd, arg);
}

int op_chunk_get_version(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkGetVersion* req = (struct Request_ChunkGetVersion*)arg;
    struct Response_ChunkGetVersion resp;
    init_response(req, &resp);

    LOG_DEBUG("%s(chunk=%08x.%u)", __func__, req->id.volumeid, req->id.index);

    struct Chunk* chunk;
    int ret = be_chunk_open(req->id.volumeid, req->id.index, &chunk);
    if (unlikely(ret < 0))
    {
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk open failed");
    }
    AutoClose __auto_close(chunk);

    resp.version = chunk->version;
    REPLY_OK_RETURN(resp);
}

int chunk_pwrite(struct Chunk* chunk, void *buf, uint32_t len, uint64_t offset)
{
    LOG_DEBUG("%08x.%d rawidx %ld offset %ld len %d crc32 %08x",
              chunk->volumeid, chunk->index, chunk->index_in_raw, offset, len,
              crc32_n(buf, len));
    return be_chunk_pwrite(chunk, buf, len, offset, NULL);
}

/**
 * ret
 * -1		failed, can't continue
 * 1		failed, but can be continue
 * towrite  success
 */
int write_back_journal_cb(uint32_t volumeid, uint32_t index, void* buf, off_t offset, size_t towrite, uint64_t version, bool inc_version, bool just_write_data)
{
    struct Chunk* chunk;
    int ret = be_chunk_open(volumeid, index, &chunk);
    if(ret < 0){
        LOG_INFO("can't open chunk %08x.%d, can continue", volumeid, index);
        return 1;
    }
    AutoClose __auto_close(chunk);

//    if(chunk->ecinfo.is_ec_chunk){
//        LOG_DEBUG("chunk %08x.%d offset %ld towrite %ld, version %ld inc_version %d crc32 %08x",
//                  volumeid, index, offset, towrite, version, inc_version, crc32_n(buf, towrite));
//    }

    if(version != INVALID_VERSION) { // check version
        if(chunk->version_in_disk == INVALID_VERSION) {
            LOG_INFO("chunk %08x.%d chunk->version_in_disk in disk is -1", volumeid, index);
            goto write_disk;
        }
        if(version <= chunk->version_in_disk) {
            LOG_INFO("chunk %08x.%d version_in_disk is %ld income version is %ld, "
                     "this version will be omit",
                     volumeid, index, chunk->version_in_disk, version);
            return 1;
        }

        if(chunk->version_in_disk + 1 != version) {
            if(chunk->version_in_disk_change_state == VERSION_CHANGE_STATE_NORMAL) {
                LOG_ERROR("chunk %08x.%d version in disk is %ld incoming version is %ld",
                        volumeid, index, chunk->version_in_disk, version);
                return ERROR_VERSION;
            }
        }
    }
    write_disk:
    if(towrite){ // towrite could be 0
        ret = chunk_pwrite(chunk, buf, towrite, offset);
        if(ret != (int)towrite) {
            LOG_ERROR("chunk %08x.%d local_chunk_write_data failed, ret is %d, "
                      "towrite is %d buf %p", volumeid, index, ret, towrite, buf);
            return ERROR_DISK_IO;
        }
    }
    else {
        LOG_DEBUG("chunk %08x.%d offset %d towrite %d version %d",
                volumeid, index, offset, towrite, version);
    }

    if(just_write_data) {
        return towrite;
    }

    if(version != INVALID_VERSION && inc_version) {
        be_write_version(chunk, version);
        chunk->version_in_disk = version; // chunk->version_in_disk++
        chunk->version_in_disk_change_state = VERSION_CHANGE_STATE_NORMAL;
    }

    if(version == INVALID_VERSION) {
        chunk->version_in_disk_change_state = VERSION_CHANGE_STATE_BREED;
    }

    return towrite;
}

int op_chunk_create(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkCreate* req = (struct Request_ChunkCreate*)arg;
    struct Response_ChunkCreate resp;
    init_response(req, &resp);

    LOG_INFO("%s(chunk=%08x.%u)", __func__, req->id.volumeid, req->id.index);

    if (unlikely(req->flags & ~(CHUNK_CREATE_INCUBATION
                              | CHUNK_CREATE_PREALLOCATION
                              | CHUNK_CREATE_WITH_CHECKSUM
                              | CHUNK_CREATE_EC_CHUNK))) {
        LOG_ERROR("unrecognized creation flag: %x", req->flags);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_INVALID_ARGUMENTS, "unrecognized creation flag");
    }

    bool incubation = (req->flags & CHUNK_CREATE_INCUBATION);
    bool preallocation = (req->flags & CHUNK_CREATE_PREALLOCATION);
    bool with_checksum = (req->flags & CHUNK_CREATE_WITH_CHECKSUM);
    bool ec_chunk = (req->flags & CHUNK_CREATE_EC_CHUNK);
    LOG_INFO("op_chunk_create chunk %08x.%d incubation %d preallocation %d with_checksum %d ec_chunk %d size %d",
             req->id.volumeid, req->id.index, incubation, preallocation, with_checksum, ec_chunk, size32_convert_to64(req->size));

    int ret = be_chunk_create(req->id.volumeid, req->id.index, size32_convert_to64(req->size), preallocation, incubation, with_checksum, ec_chunk, NULL);

    if (unlikely(ret < 0)) {
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk creation failed");
    }

    if(!incubation)
        heartbeat_add_chunk(req->id.bits);

    REPLY_OK_RETURN(resp);
}

int op_chunk_delete(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkDelete* req = (struct Request_ChunkDelete*)arg;
    struct Response_ChunkDelete resp;
    init_response(req, &resp);

    LOG_INFO("%s(chunk=%08x.%u)", __func__, req->id.volumeid, req->id.index);

    heartbeat_remove_chunk(req->id.bits);

    int ret = be_chunk_delete(req->id.volumeid, req->id.index);
    if (unlikely(ret < 0))
    {
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk delete failed");
    }

    // heartbeat_remove_chunk(req->id.bits);
    journal_lite_remove(req->id.volumeid, req->id.index);
    REPLY_OK_RETURN(resp);
}

int op_chunk_suspend_write(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkSuspendWrite* req = (struct Request_ChunkSuspendWrite*)arg;
    struct Response_ChunkSuspendWrite resp;
    init_response(req, &resp);

    LOG_INFO("%s(start)(chunk=%08x.%u)(to_suspend=%d)", __func__, req->id.volumeid, req->id.index, req->to_suspend);

    if (req->to_suspend)
    {
        struct Chunk* chunk;
        int ret = be_chunk_open(req->id.volumeid, req->id.index, &chunk);
        if (unlikely(ret < 0)){
            REPLY_ERROR_MESSAGE_RETURN(resp, ret, "error open chunk, so suspend failed");
        }
        AutoClose __auto_close(chunk);

        add_write_suspended_chunks(req->id.volumeid, req->id.index);

#define SUSPEND_LOOP (100)
        int i = 0;
        for(i = 0; i < SUSPEND_LOOP && chunk->inflight_write_replicate > 0; i++){
            int ref = be_chunk_get_reference(req->id.volumeid, req->id.index);
            LOG_INFO(" chunk inflight_write_replicate (%d) ! WAIT SOMETIME, chunk %08x.%d, "
                     "wait 50ms chunk ref is %d",
                     chunk->inflight_write_replicate, req->id.volumeid, req->id.index, ref);
            st_usleep(50000);
        }
        if(i == SUSPEND_LOOP){
            LOG_ERROR("%s chunk %08x.%d suspend failed", __func__, req->id.volumeid, req->id.index);
            remove_write_suspended_chunks(req->id.volumeid, req->id.index);
            REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_CANT_SUSPEND, "error cant suspend chunk");
        }
    }
    else
    {
        remove_write_suspended_chunks(req->id.volumeid, req->id.index);
    }
    LOG_DEBUG("%s(end)(chunk=%08x.%u)", __func__, req->id.volumeid, req->id.index);

    REPLY_OK_RETURN(resp);
}

#define LOCK_LENGTH (1024*1024)
static int chunk_sendfile(st_netfd_t out_fd, struct Chunk* chunk, off_t offset, size_t count)
{
    LOG_DEBUG("%s(chunk=%08x.%u, offset=%llu, count=%u)",
              __func__, chunk->volumeid, chunk->index, offset, count);

    uint64_t ts = statis_begin_io();
    size_t _count = count, sends = 0;
    while(count > 0)
    {
        foreground_range_lock(chunk, offset, offset + LOCK_LENGTH - 1);
        int fd = chunk->fd;
        off_t raw_offset = offset;
        int ret = be_chunk_raw_info(chunk, &fd, &raw_offset, offset);
        if (unlikely(ret != 0)) {
            LOG_DEBUG("be_chunk_raw_info error.");
        }
        ssize_t n = st_sendfile(out_fd, fd, &raw_offset, MIN(count, LOCK_LENGTH), &sends);
        foreground_unlock(chunk);
        if(n < 0) {
            goto error;
        }
        count -= n;
    }

    statis_end_io(ts, _count/SIZE_1K);
    LOG_DEBUG("sent %u bytes via %u sendfile() call(s)", _count, sends);
    return (int)_count;

    error:
    statis_end_io(ts, (_count - count)/SIZE_1K);
    return -1;
}

// only for performance test and comparison
static int chunk_sendfile_fake(st_netfd_t out_fd, struct Chunk* chunk, off_t offset, size_t count)
{
    LOG_DEBUG("%s(chunk=%08x.%u, offset=%llu, count=%u)",
              __func__, chunk->volumeid, chunk->index, offset, count);

    DECLARE_1BUF(buf);
    foreground_range_lock(chunk, offset, offset + LOCK_LENGTH - 1);
    int ret = be_chunk_pread(chunk, buf, count, offset, NULL);
    if (ret < 0) {
        LOG_ERROR("failed to read file: %d, %s", errno, strerror(errno));
    } else {
        ret = st_write(out_fd, buf, count, ST_UTIME_NO_TIMEOUT);
        if (ret < 0) {
            LOG_ERROR("failed to st_write(): %d, %s", errno, strerror(errno));
        } else {
            ret = (int)count;
        }
    }
    foreground_unlock(chunk);
    return ret;
}

int checksum_sendfile(st_netfd_t out_fd, struct Chunk* chunk, off_t offset, size_t count)
{
    size_t _count = count;
    off_t end = offset + count - 1;
    off_t align_offset = LOWER_FLOOR(offset, CRC32_BLOCK_SIZE);
    uint32_t pre_len = offset - align_offset;
    off_t align_end = UPPER_FLOOR(end, CRC32_BLOCK_SIZE) - 1;
    uint32_t suf_len = align_end - end;
    count = align_end - align_offset + 1;
    off_t new_offset = align_offset;
    LOG_DEBUG("old count is %d, new count is %d for align", _count, count);

    DECLARE_1BUF(buf);
    int ret = 0, retc;
    uint64_t ts = statis_begin_io();
    while(count > 0)
    {
        foreground_range_lock(chunk, new_offset, new_offset + LOCK_LENGTH - 1);
#ifndef DIV_CHUNK
        retc = journaled_st_pread(chunk, buf, MIN(count, LOCK_LENGTH), new_offset);
#else
        retc = pread_div(chunk, buf, MIN(count, LOCK_LENGTH), new_offset);
#endif
        foreground_unlock(chunk);
        if (retc <= 0) {
            LOG_ERROR("failed to read file: %d, %s", errno, strerror(errno));
            ret = retc < 0 ? retc : -1;
            break;
        }
        int crc_count = retc / CRC32_BLOCK_SIZE;
        uint32_t crcs_on_disk[4096];
        uint32_t crcs_computed[4096];
#ifndef DIV_CHUNK
        read_crc32_from_disk(chunk, crcs_on_disk, crc_count, new_offset/*buf offset*/);
#else
        read_crc32s_from_disk_div(chunk, crcs_on_disk, crc_count, new_offset/*buf offset*/);
#endif
        LOG_DEBUG("crc32_4k_sequence size %d", retc, log_buf(buf, retc));
        crc32_4K_sequence((char*)buf, retc, crcs_computed);
        if(0 != memcmp(crcs_on_disk, crcs_computed, sizeof(uint32_t) * crc_count)){
            LOG_FATAL("crc on disk not equal to crc computed, wrong data, volumeid %x index %d", chunk->volumeid, chunk->index,
                      log_buf(buf, retc), log_buf(crcs_on_disk, sizeof(uint32_t) * crc_count), log_buf(crcs_computed, sizeof(uint32_t) * crc_count));
            ret = -1;

            uint32_t self_ip;
            uint16_t self_port;
            get_self_ip_port(&self_ip, &self_port);
            int retr = request_manager_report_chunk_error2(self_ip, self_port, chunk->volumeid, chunk->index, CHUNK_READ3, ERROR_DISK_CHECKSUM);
            CHECK_REQUEST_MANAGER_ERROR(retr);
            break;
        }
        char* _buf = (char*)buf;
        uint32_t send_nb = retc;
        if(offset >= new_offset){
            LOG_DEBUG("start offset set pre_len %d", pre_len);
            _buf += pre_len;
            send_nb -= pre_len;
        }
        if(new_offset + retc >= end){
            LOG_DEBUG("end offset set suf_len %d", suf_len);
            send_nb -= suf_len;
        }

        ret = st_write(out_fd, _buf, send_nb, URSAX_TIMEOUT);
        if (ret < 0) {
            LOG_ERROR("failed to st_write(): %d, %s", errno, strerror(errno));
            break;
        }

        count -= retc;
        new_offset += retc;
    }

    statis_end_io(ts, (_count - count)/SIZE_1K);
    return ret < 0 ? ret : count;
}

static int chunk_sendfile2(st_netfd_t out_fd, struct Chunk* chunk,
        uint64_t offset, uint64_t count)
{
    LOG_DEBUG("%s(chunk=%08x.%u, offset=%llu, count=%u)",
              __func__, chunk->volumeid, chunk->index, offset, count);

    if(unlikely((int64_t)offset < 0 || offset > chunk->checksum_file_offset
    || offset+count > chunk->checksum_file_offset)) {
        LOG_ERROR("%s over_range offset %ld count %ld file_length %d, chunk %x.%d", __func__, offset, count, chunk->checksum_file_offset, chunk->volumeid, chunk->index);
        return -1;
    }

    if(get_big_journal(chunk->use_journal_hint))
//    || chunk->ecinfo.ec_journal)
    {
        return ec_or_big_journal_sendfile(out_fd, chunk, offset, count);
    }

    if(!chunk->disk_checksum){
        return use_no_sendfile ? chunk_sendfile_fake(out_fd, chunk, offset, count):
               chunk_sendfile(out_fd, chunk, offset, count);
    }

    return checksum_sendfile(out_fd, chunk, offset, count);
}

int op_chunk_read4(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkRead4* req = (struct Request_ChunkRead4*)arg;
    struct Response_ChunkRead4 resp;
    init_response(req, &resp);

    LOG_DEBUG("%s(chunk=%08x.%u, offset=%u, len=%u version=%ld)", __func__,
              req->id.volumeid, req->id.index, req->offset, req->size, req->success_version);

    struct Chunk* chunk;
    int ret = be_chunk_open(req->id.volumeid, req->id.index, &chunk);
    if(unlikely(ret < 0)){
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk open failed");
    }
    AutoClose __auto_close(chunk);

    if(chunk->version < req->success_version || chunk->version == -1UL){
        LOG_ERROR("%s chunk %08x.%d client version is %ld chunk version is %ld",
                  __func__, chunk->volumeid, chunk->index, req->success_version, chunk->version);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_VERSION, "chunk open failed");
    }
    resp.retcode = 0;
    resp.data.length = req->size;
    int ret1 = reply1(stfd, &resp, sizeof(resp));
    if (unlikely(ret1 < 0)){
        LOG_ERROR("op_chunk_read4 reply1 error");
        return MK_CONNECTION_ERROR(ret1);
    }

    begin_proc(&read_delay);
    int ret2 = chunk_sendfile2(stfd, chunk, req->offset, req->size);
    end_proc(&read_delay);
    if(unlikely(ret2 < 0)){
        log_socket_error(stfd, "op_chunk_read4: sendfile");
        return MK_CONNECTION_ERROR(ret2);
    }

    return ret1 + ret2;
}

//if(0 == strcmp(str, "concurrent1")) choose_index = 1
typedef int (*op_handler_t)(st_netfd_t stfd, void* arg);
op_handler_t op_map[] = {
        op_chunk_write_replicate4,
        op_chunk_write_replicate4_1,
};

int op_chunk_write_replicate4all(st_netfd_t stfd, void* arg)
{
    return op_map[choose_index](stfd, arg);
}

int send_mmaped_buf(st_netfd_t out_fd, void* _buf, size_t count)
{
    MMapedBuf* buf = (MMapedBuf*)_buf;
    off_t buf_offset = 0;
    return buf->fd ? st_sendfile(out_fd, buf->fd, &buf_offset, count, NULL):
           st_write(out_fd, buf, count, URSAX_TIMEOUT);
}

static void* write_replicate_stub4(void *arg)
{
    struct concurrent_write_task* ctask = (struct concurrent_write_task*)arg;
    struct Chunk* chunk = ctask->chunk;
    int i = ctask->task_index;

    if(ctask->servers[i].ip == g_csi->ip_int
    && ctask->servers[i].port == g_csi->port) {
        LOG_WARN("write_replicate_stub4 write to self chunk %08x.%d (%s:%d) i is %d",
                chunk->volumeid, chunk->index, str_ip(g_csi->ip_int), g_csi->port, i);
        ctask->remote_fault[i] = 0;
        return NULL;
    }

    int ret = ctask->remote_fault[i] = pooled_chunk_write4x(
            ctask->servers[i].ip, ctask->servers[i].port,
            chunk->volumeid, chunk->index, ctask->version,
            ctask->offset, ctask->buf, ctask->len, ctask->flag,
            ctask->client_time, (send_cb_t)send_mmaped_buf);

    if (unlikely(ret != 0))
    {
        ctask->nfaults++;
        LOG_WARN("chunk_write(chunk=%08x.%u, addr %s:%u) failed with %d",
                 chunk->volumeid, chunk->index, str_ip(ctask->servers[i].ip),
                 ctask->servers[i].port, ret);
    }

    return NULL;
}

int op_chunk_write_replicate4(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkWriteReplicate4* req =
            (struct Request_ChunkWriteReplicate4*)arg;
    struct Response_ChunkWriteReplicate4 resp;
    init_response(req, &resp);
    memset(resp.ret_values, 0, sizeof(resp.ret_values));

    LOG_DEBUG("%s(chunk=%08x.%u), data len %d offset %d start",
              __func__, req->id.volumeid, req->id.index, req->data.length,
              req->offset);

    check_wait_write_suspended(req->id.volumeid, req->id.index);

    DECLARE_1BUF(buf);
    size_t count = req->data.length;
    int ret = st_read_fully(stfd, buf, count, URSAX_TIMEOUT);
    if(unlikely(ret < (long int)count)){
        log_socket_error(stfd, "st_read_fully");
        LOG_ERROR("%s(chunk=%08x.%u), data len %d offset %d end %m, "
                  "st_read_fully error",
                  __func__, req->id.volumeid, req->id.index, req->data.length,
                  req->offset);
        return MK_CONNECTION_ERROR(-1);
    }

    if(req->data.length > MEMPOOL_BUFFER_SIZE){
        LOG_ERROR("req->data.length is too big %d max length allow is %d, "
                  "chunk %08x.%d",
                  req->data.length, MEMPOOL_BUFFER_SIZE, req->id.volumeid,
                  req->id.index);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_INTERNAL, "data.length is too big");
    }

    if(CHECK_NEW_FLAG(req->flag)){
        resp.need_to_update_replicas = check_new_replica_record(req->servers,
                req->id.volumeid, req->id.index);
    } else {
        resp.need_to_update_replicas = false;
        remove_new_replica_record(req->id.volumeid, req->id.index);
    }

    struct Chunk* chunk;
    ret = be_chunk_open_for_write(req->id.volumeid, req->id.index,
            &chunk, true, ERROR_LOCKED);
    if (unlikely(ret < 0)){
        LOG_ERROR("chunk %08x.%d open failed for write rep 4",
                req->id.volumeid, req->id.index);
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk open failed");
    }
    AutoClose __auto_close(chunk);
    resp.chunk_version = chunk->version;

    if(unlikely(chunk->being_incubated)){
        LOG_ERROR("chunk %08x.%d is being incubated, cant do write rep",
                chunk->volumeid, chunk->index);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_CHUNK_NOT_FOUND,
                "chunk is being incubated, can't do this");
    }

    if(unlikely(chunk->version < req->data_version
    || (chunk->version == INVALID_VERSION))) {
        LOG_ERROR("chunk %08x.%d is version is less than req version, "
                  "chunkv %ld datav %ld",
                  chunk->volumeid, chunk->index, chunk->version, req->data_version);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_VERSION,
                "primary version less than req version or main is -1");
    }

    off_t offset = req->offset;
    struct workers_tracker tracker;
    workers_tracker_init(&tracker);
    struct concurrent_write_task ctask;
    ctask.chunk = chunk;
    ctask.buf = buf;
    ctask.servers = req->servers;
    ctask.version = req->data_version;
    ctask.offset = offset;
    ctask.len = count;
    ctask.flag = req->flag;
    ctask.client_time = req->client_time;

    for(int i = 0; i < MAX_FOLLOWING_SERVERS; i++){
        if(unlikely(req->servers[i].ip == 0)){
            break;
        }
        ctask.task_index = i;
        pool_thread_create(th_pool, &write_replicate_stub4, &ctask, &tracker);
        st_sleep(0);
    }
    st_sleep(0);
    int nwritten = 0;
    if(likely(req->data_version == chunk->version)){
        nwritten = local_chunk_write(chunk, buf, offset, count, true,
                req->client_time);
    }else{
        nwritten = count;
        LOG_WARN("chunk %08x.%d req version is %ld chunk version is %ld, "
                 "this version will be omit", chunk->volumeid, chunk->index,
                 req->data_version, chunk->version);
    }
    resp.chunk_version = chunk->version;
    START_RECORD();
    workers_tracker_wait(&tracker, WORKERS_TRACKER_WAIT_MAX_SECOND);
    END_RECORD();

    if(unlikely(nwritten < (long int)count)){
        LOG_ERROR("%s(chunk=%08x.%u), data len %d offset %d end, "
                  "local chunk write error nwritten %d",
                  __func__, req->id.volumeid, req->id.index, req->data.length,
                  req->offset, nwritten);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_DISK_IO, "chunk write failed");
    }

    if(unlikely(ctask.nfaults > 0)){
        for(int i = 0; i < MAX_FOLLOWING_SERVERS; i++){
            if (ctask.remote_fault[i]){
                ret = request_manager_report_chunk_error2(
                        req->servers[i].ip, req->servers[i].port,
                        req->id.volumeid, req->id.index,
                        req->header.op, ctask.remote_fault[i]);
                CHECK_REQUEST_MANAGER_ERROR(ret);
                resp.ret_values[i] = ctask.remote_fault[i];
            }
        }
        LOG_WARN("total %d slave server write failed", ctask.nfaults);
        LOG_WARN("%s_SOME_FAILED(chunk=%08x.%u), data len %d offset %d end",
                __func__, req->id.volumeid, req->id.index, req->data.length,
                req->offset);
        REPLY_ERROR_MESSAGE_RETURN(resp, ctask.nfaults, "some replicate write failed");
    }

    LOG_DEBUG("%s(chunk=%08x.%u), data len %d offset %d end ok execute",
            __func__, req->id.volumeid, req->id.index, req->data.length, req->offset);
    REPLY_OK_RETURN(resp);
}

// temporarily no use
int op_chunk_write_replicate4_1(st_netfd_t stfd, void* arg)
{
    return op_chunk_fake(stfd, arg);
}

int local_chunk_write_data(struct Chunk* chunk, void* buf, int towrite,
        off_t offset, uint64_t version, uint64_t time)
{
    int count = towrite;
    off_t pos = offset;
    while (count > 0)
    {
        foreground_range_lock(chunk, pos, pos + count -1 );
        DEBUG_INCUBATE;
        DEBUG_INCUBATE;
#ifndef DIV_CHUNK
        int nwritten;

        if(get_big_journal(chunk->use_journal_hint)){
            nwritten = sync_chunk_pwrite(get_big_journal(chunk->use_journal_hint),
                                         chunk->volumeid, chunk->index, buf, count,
                                         pos, version, time);
        }
        else{
            nwritten = chunk_pwrite(chunk, buf, count, pos);
        }
#else
        int nwritten;
		if(chunk->disk_checksum){
			nwritten = pwrite_div(chunk, buf, count, pos);
		}else{
			nwritten = chunk_pwrite(chunk, buf, count, pos);
		}
#endif

        foreground_unlock(chunk);

        if (unlikely(nwritten < 0)) {
            if(get_big_journal(chunk->use_journal_hint) && !current_journal_broken){
                current_journal_broken = true;
                async_wipe_out_journal_version();
            }
        }
        if (unlikely(nwritten <= 0)) {

            LOG_ERROR("%s(chunk=%08x.%u, offset=%llu, towrite=%u) nwritten "
                      "is %d buf point is %p",
                      __func__, chunk->volumeid, chunk->index, offset,
                      towrite, nwritten, buf);
//			statis_end_io(ts, (towrite - count)/SIZE_1K);
            return nwritten != 0 ? nwritten : -1;
        }
        pos += nwritten;
        count -= nwritten;
        buf = (void*)((uint64_t)buf + nwritten);
    }
    return towrite;
}

static inline void chunk_journal_add(struct Chunk* c, off_t offset, size_t towrite)
{
//    if(!c->ecinfo.is_ec_chunk)
    {
        journal_lite_add(&c->journal, offset, towrite);
    }
}

int local_chunk_write(struct Chunk* chunk, void* buf, uint64_t offset, uint64_t towrite, bool updatechecksum, time_t client_time)
{
    LOG_DEBUG("%s(chunk=%08x.%u, rawidx=%llu, offset=%llu, towrite=%u)",
              __func__, chunk->volumeid, chunk->index, chunk->index_in_raw, offset, towrite);
    int ret = 0, retu = 0;
    START_RECORD();
    begin_proc(&local_chunk_write_counter);
    uint64_t ts;
    uint64_t journal_record_version = chunk->version != -1UL ? chunk->version + 1 : -1UL;
//    if(chunk->ecinfo.is_ec_chunk){
//        journal_record_version = -1UL;
//    }

    if(unlikely((int64_t)offset < 0 || offset > chunk->checksum_file_offset || offset+towrite > chunk->checksum_file_offset)){
        LOG_ERROR("%s over_range offset %ld towrite %ld file_length %d, chunk %x.%d",
                  __func__, offset, towrite, chunk->checksum_file_offset, chunk->volumeid, chunk->index);
        goto error_out;
    }
    ts = statis_begin_io();
    ret = local_chunk_write_data(chunk, buf, towrite, offset, journal_record_version, client_time);
    statis_end_io(ts, towrite/SIZE_1K);
    if(ret != (int)towrite){
        goto error_out;
    }

    if(updatechecksum){
        retu = update_checksum_after_write(chunk, (char*)buf, towrite, offset);
        if(retu < 0){
            goto error_out;
        }
#ifdef DIV_CHUNK
        #ifdef MSYNC
		if(chunk->disk_checksum){
			flush_whole_file(chunk);
		}
#endif
#endif
    }

    if(likely(chunk->version != (uint64_t)-1)){
        // ec chunks will have this version inc too
        // and ec chunks have separate version too
        chunk->version++;
        if(!get_big_journal(chunk->use_journal_hint)){
            ret = be_chunk_write_version(chunk);
            if (unlikely(ret < 0)) {
                // todo: should we make this struct chunk pinned in memory?
                LOG_ERROR("failed to update chunk version, after data has been written!!");
                goto error_out;
            }
        }
    }

    chunk_journal_add(chunk, offset, towrite);

    write_check_incubation(chunk, offset, towrite, buf);
    end_proc(&local_chunk_write_counter);
    END_RECORD();
    return towrite;
    error_out:
    end_proc(&local_chunk_write_counter);
    END_RECORD();
    return -1;
}

int op_chunk_write4(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkWrite4* req = (struct Request_ChunkWrite4*)arg;
    struct Response_ChunkWrite4 resp;
    init_response(req, &resp);

    LOG_DEBUG("%s(chunk=%08x.%u, offset=%llu, len=%u)", __func__,
              req->id.volumeid, req->id.index, req->offset, req->data.length);

    remove_new_replica_record(req->id.volumeid, req->id.index);

    DECLARE_1BUF(buf);
    size_t count = req->data.length;
    int nread = st_read_fully(stfd, buf, count, URSAX_TIMEOUT);
    if(unlikely(nread < (int)count)){
        LOG_ERROR("%s chunk %08x.%d st read fully", __func__, req->id.volumeid,
                req->id.index);
        log_socket_error(stfd, "st_read_fully");
        return MK_CONNECTION_ERROR(-1);
    }

    struct Chunk* chunk;
    int ret = be_chunk_open_for_write(req->id.volumeid, req->id.index,
            &chunk, false, ERROR_LOCKED);
    if(unlikely(ret < 0)){
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk open failed");
    }
    AutoClose __auto_close(chunk);

    if(unlikely(chunk->version < req->version
    || (chunk->version == INVALID_VERSION
    && req->version != INVALID_VERSION))) {
        LOG_WARN("chunk version doesn't match, req.ver is %lu, chunk.ver is %lu, "
                 "chunk %x.%d, self_addr %s:%d",
                 req->version, chunk->version, chunk->volumeid, chunk->index,
                 str_ip(g_csi->ip_int) , g_csi->port);
        {
            resp.retcode = ERROR_VERSION;
            char msg[1024];
            int msg_len = sprintf(msg, "chunk %08x.%d version doesn't match "
                                       "req.ver is %lu, chunk.ver is %lu, "
                                       "chunk.addr is %s:%d",
                                  chunk->volumeid, chunk->index, req->version,
                                  chunk->version, str_ip(g_csi->ip_int) , g_csi->port);
            resp.msg.length = msg_len;
            reply2(stfd, &resp, sizeof(resp), msg, resp.msg.length);
            return ERROR_VERSION;
        }
    }
    if(unlikely(chunk->version > req->version)){
        LOG_WARN("chunk %08x.%d version is %ld req version is %ld this write "
                 "will be omit",
                 chunk->volumeid, chunk->index, chunk->version, req->version);
        REPLY_OK_RETURN(resp);
    }

    size_t offset = req->offset;
    int nwritten = local_chunk_write(chunk, buf, offset, nread, true, req->client_time);
    if(nwritten < nread){
        LOG_ERROR("%s local_chunk_write failed chunk %08x.%d %m", __func__,
                chunk->volumeid, chunk->index);
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_DISK_IO, "chunk write error");
    }

    REPLY_OK_RETURN(resp);
}

int op_chunk_flush(st_netfd_t stfd, void* arg)
{
    struct Request_ChunkFlush* req = (struct Request_ChunkFlush*)arg;
    struct Response_ChunkFlush resp;
    init_response(req, &resp);

    LOG_DEBUG("%s(chunk=%08x.%u)", __func__, req->id.volumeid, req->id.index);

    struct Chunk* chunk;
    int ret = be_chunk_open(req->id.volumeid, req->id.index, &chunk);
    if (unlikely(ret < 0)) {
        REPLY_ERROR_MESSAGE_RETURN(resp, ret, "chunk open failed");
    }
    AutoClose __auto_close(chunk);

    ret = be_chunk_flush(chunk);
    if (unlikely(ret < 0)) {
        REPLY_ERROR_MESSAGE_RETURN(resp, ERROR_DISK_IO, "chunk flush failed");
    }
    REPLY_OK_RETURN(resp);
}
