//
// Created by root on 11/8/18.
//

#ifndef URSAX_BACKEND_FS_H
#define URSAX_BACKEND_FS_H

#include <stdio.h>
#include <st.h>
#include <dirent.h>
#include "ursax.h"
#include "log.h"
#include "debug.h"
#include "protocol.h"
#include "range-lock.h"
#include "journal-lite.h"

#ifndef SIZE_4K
#define SIZE_4K (4096)
#endif

#define CRC_MAGIC 0x89abcdef

static inline bool is_align(uint64_t size, uint32_t align_size)
{
    return ((size)%align_size == 0);
}

struct ChunkExtra4K
{
    struct ceInfo{
        uint32_t crc_magic; // CRC_MAGIC
        uint8_t ec_chunk; // is ec chunk?
        char padding[3];
        union{
            uint64_t ec_data_version; // init is 0, for data chunk
            uint64_t ec_data_versions[EC_MAX_K*2]; // init is 0, for ec_chunk
        };
    } info;
    char padding[SIZE_4K - sizeof(struct ceInfo)];
} __attribute__ ((packed));

typedef struct ChunkExtra4K ChunkExtra4K;

struct Chunk
{
    union
    {
        struct
        {
            uint32_t volumeid, index;
        };
        uint64_t key;
    };
    uint64_t version;
    uint64_t version_in_disk;
#define VERSION_CHANGE_STATE_INIT 	(0)
#define VERSION_CHANGE_STATE_NORMAL (1)
#define VERSION_CHANGE_STATE_BREED  (2)
    char version_in_disk_change_state; // for write back of journal, version can be change from 1 -> -1 > 999
    int inflight_write_replicate;
    struct RangeLock lock;
    // struct CSEP primary;
    // struct CSEP incubating_to;	// todo: not supporting concurrent incubation per chunk
    // st_thread_t incubating_thread;
    // uint32_t incubating_progress;
    int fd;
    int index_in_raw; // -1 means not in raw
    bool being_incubated;
    bool deleted;
    bool disk_checksum; // with checksum or not
    bool reset_journal_version;
    bool start_use_journal;
    bool close_fd;
#define USE_JOURNAL_DEFAULT (0)
#define USE_JOURNAL_DONT	(1)
    char use_journal_hint; // for journal broken
    uint64_t tail_offset;
    st_mutex_t replicate_write_lock;
    void* write_lock_thread; // for dec inflight_write_replicate
    bool master_chunk; // set in chunk open and reset after this write

// #ifndef DIV_CHUNK
    uint64_t checksum_len; // checksum area byte length
    uint32_t *checksum_map;
    union{
        uint64_t checksum_file_offset; // checksum offset of file
        uint64_t chunk_data_size; // data size of the chunk
    };
// #else
    void *file_map;
    uint32_t div_block_size; // 4M (power of 4M) (4M + 4k crc + 4M + 4k crc ...), can be 1M, but not a good idea
    uint32_t div_block_crc_size; // 4k
    uint32_t crc_nb_per_div_block;
    uint64_t chunk_file_size; // real file size - 4k
// #endif
    uint32_t *checksum_alloc; // same as mmap, but pwrite will not drop cache, div or not div is not the same meaning

    struct Incubation* incubation;
    struct JournalLite journal;

};

static inline uint32_t inthash(uint32_t x)
{
    x = ((x >> 16) ^ x) * 0x45d9f3b;
    x = ((x >> 16) ^ x) * 0x45d9f3b;
    x = ((x >> 16) ^ x);
    return x;
}

static inline int sprintf_chunk_path_hash(char* buf, int hash)
{
    return sprintf(buf, "%03x/", hash);
}

static inline int sprintf_chunk_path(char* buf, uint32_t volumeid, uint32_t index)
{
    uint32_t hash = inthash(volumeid ^ index) & 0xfff;
    return sprintf_chunk_path_hash(buf, hash);
}

static inline int sprintf_chunk_filename(char* buf, uint32_t volumeid, uint32_t index)
{
    return sprintf(buf, "%08x.%u", volumeid, index);
}

static inline int sprintf_chunk_path_filename(char* buf, uint32_t volumeid, uint32_t index)
{
    int n = sprintf_chunk_path(buf, volumeid, index);
    return n + sprintf_chunk_filename(buf+n, volumeid, index);
}

static inline uint64_t chunk_make_key(uint32_t volumeid, uint32_t index)
{
    return ((uint64_t)index << 32) | volumeid;
}

static inline void unlock_chunk_write(struct Chunk* c)
{
    st_mutex_unlock(c->replicate_write_lock);
}

static inline bool is_size_align(uint64_t size)
{
    extern uint32_t o_direct_size;
    return is_align(size, o_direct_size);
}

static inline int lock_chunk_write(struct Chunk* c)
{
    // st_mutex_lock(c->replicate_write_lock);
    int ret = st_mutex_trylock(c->replicate_write_lock);
    if(ret < 0){
        LOG_ERROR("chunk %08x.%d is locked by thread %016lx",
                  c->volumeid, c->index, *(uint64_t*)c->replicate_write_lock);
        st_print_call_stack(*(void**)c->replicate_write_lock);
    }
    return ret;
}

static inline void foreground_range_lock(struct Chunk* chunk, int64_t low, int64_t high)
{
    LOG_DEBUG("%s low %d high %d", __func__, low, high);
    range_lock(&chunk->lock, 0, low, high);
}

static inline void foreground_unlock(struct Chunk* chunk)
{
    LOG_DEBUG("%s ", __func__);
    range_unlock(&chunk->lock, 0);
}

int be_open_server_root(const char* path);
typedef int (*dir_iterate_cb)(struct dirent*, void*);
int dir_iterate(const char* dir, dir_iterate_cb callback, void* arg);
bool be_check_root_ssd_file();
int be_chunk_list(uint32_t volumeid, uint32_t index, struct ChunkID** result);
int read_file(const char* filename, char* buf, int size);
uint32_t get_fd_number(void);
int be_chunk_open(uint32_t volumeid, uint32_t index, struct Chunk** out_chunk);
void be_chunk_close(struct Chunk* chunk);
int be_close_server_root();
int align_pread_align_size(int fd, void *buf, uint32_t n, uint64_t offset,
                           void *file_map, uint32_t align_check_size);

int async_start_use_journal();
int be_write_version(struct Chunk* chunk, uint64_t version);
int be_chunk_pread(struct Chunk* chunk, void* buf, uint32_t size,
        uint32_t offset, void* mmap_ptr);
int be_chunk_pwrite(struct Chunk* chunk, void* buf, uint32_t size,
        uint32_t offset, void* mmap_ptr);
int be_chunk_create(uint32_t volumeid, uint32_t index, uint64_t size/*in byte*/,
        bool preallocation, bool incubating, bool with_checksum, bool ec_chunk,
        struct Chunk** ret_chunk);
int be_chunk_write_version(struct Chunk* chunk);
int be_chunk_get_reference(uint32_t volumeid, uint32_t index);
int be_chunk_open_for_write(uint32_t volumeid, uint32_t index,
        struct Chunk** out_chunk, bool master, int errno_for_lock);

int async_wipe_out_journal_version();
int log_buf(void *buf, int len);

int update_checksum_after_write(struct Chunk* chunk, char *buf, int byte_len,
                                uint64_t offset);
int be_chunk_delete(uint32_t volumeid, uint32_t index);
int be_chunk_raw_info(struct Chunk* chunk, int* fd, off_t *raw_offset, off_t offset);
ssize_t journaled_st_pread(struct Chunk* chunk, void* buf, size_t count, off_t offset);
int read_crc32_from_disk(struct Chunk* chunk, uint32_t *crc,
                         uint32_t nb_to_read, uint64_t offset/*buf offset*/);
int be_chunk_flush(struct Chunk* chunk);

#endif //URSAX_BACKEND_FS_H
