#include "common.h"
#include "index_handle.h"

static int debug = 1;

namespace project
{
    namespace largefile
    {
        IndexHandle::IndexHandle(const std::string &base_path, const uint32_t main_block_id)
        {
            // create file_op_handle object
            std::stringstream tmp_stream;
            tmp_stream << base_path << INDEX_DIR_PREFIX << main_block_id;

            std::string index_path;
            tmp_stream >> index_path;

            file_op_ = new MMapFileOperation(index_path, O_CREAT | O_RDWR | O_LARGEFILE);
            is_load_ = false;
        }

        IndexHandle::~IndexHandle()
        {
            if (file_op_)
            {
                delete file_op_;
                file_op_ = NULL;
            }
        }

        int IndexHandle::create(const uint32_t logic_block_id, const int32_t bucket_size, const MMapOption map_option)
        {
            int ret = TFS_SUCCESS;
            if (debug)
                printf("create index, block id:%u, bucket size:%d, max mmap size:%d, first mmap size:%d, per mmap size:%d\n", logic_block_id, bucket_size, map_option.max_mmap_size_, map_option.first_mmap_size_, map_option.per_mmap_size_);

            if (is_load_)
            {
                return EXIT_INDEX_ALREADY_LOADED_ERROR;
            }

            int64_t file_size = file_op_->get_file_size();

            if (file_size < 0)
            {
                return TFS_ERROR;
            }
            else if (file_size == 0) // 空文件
            {
                IndexHeader i_header;
                i_header.block_info_.block_id_ = logic_block_id;
                i_header.block_info_.seq_no_ = 1;
                i_header.bucket_size_ = bucket_size;

                i_header.index_file_size_ = sizeof(IndexHeader) + bucket_size * sizeof(int32_t); // 因为用的哈希链表存储，前面需要数组

                // 分配空间 index header + total buckets
                char *init_data = new char[i_header.index_file_size_];
                memcpy(init_data, &i_header, sizeof(IndexHeader));
                memset(init_data + sizeof(IndexHeader), 0, i_header.index_file_size_ - sizeof(IndexHeader));

                // write index header and buckets into index file
                ret = file_op_->pwrite_file(init_data, i_header.index_file_size_, 0);

                delete[] init_data;
                init_data = NULL;

                if (ret != TFS_SUCCESS)
                {
                    return ret;
                }

                ret = file_op_->flush_file();

                if (ret != TFS_SUCCESS)
                {
                    return ret;
                }
            }
            else // file size > 0 index already exist
            {
                return EXIT_META_UNEXPECT_FOUND_ERROR;
            }

            // 映射到内存
            ret = file_op_->mmap_file(map_option);
            if (ret != TFS_SUCCESS)
            {
                return ret;
            }

            is_load_ = true;

            if (debug)
                printf("init block id:%d index successful. data file size:%d, index file size:%d, bucket_size:%d, free head offset:%d, seqno:%d, size:%d, filecount:%d, del_size:%d, del_file_count:%d, version:%d\n",
                       logic_block_id, index_header()->data_file_offset_, index_header()->index_file_size_,
                       index_header()->bucket_size_, index_header()->free_head_offset_, block_info()->seq_no_, block_info()->size_t_,
                       block_info()->file_count_, block_info()->del_size_, block_info()->del_file_count_, block_info()->version_);

            return TFS_SUCCESS;
        }

        int IndexHandle::load(const uint32_t logic_block_id, const int32_t _bucket_size, const MMapOption map_option)
        {
            int ret = TFS_SUCCESS;

            if (is_load_)
            {
                return EXIT_INDEX_ALREADY_LOADED_ERROR;
            }

            int64_t file_size = file_op_->get_file_size();
            if (file_size < 0)
            {
                return file_size;
            }
            else if (file_size == 0) // 空文件
            {
                return EXIT_INDEX_CORRUPT_ERROR;
            }

            MMapOption tmp_map_option = map_option;

            if (file_size < tmp_map_option.first_mmap_size_ && file_size <= tmp_map_option.max_mmap_size_) // 需要扩容
            {
                tmp_map_option.first_mmap_size_ = file_size;
            }

            ret = file_op_->mmap_file(tmp_map_option);

            if (ret != TFS_SUCCESS)
            {
                return ret;
            }

            // 合法性判断
            if (bucket_size() == 0 || block_info()->block_id_ == 0)
            {
                fprintf(stderr, "index corrupt error.block id:%u, bucket size:%d\n", block_info()->block_id_, bucket_size());
                return EXIT_INDEX_CORRUPT_ERROR;
            }

            // 检查文件的大小
            int32_t index_file_size = sizeof(IndexHeader) + bucket_size() * sizeof(int32_t);

            if (file_size < index_file_size)
            {
                fprintf(stderr, "index corrupt error, block id:%u, bucket size:%d, file size:%ld, index file size:%d\n", block_info()->block_id_, bucket_size(), file_size, index_file_size);
                return EXIT_INDEX_CORRUPT_ERROR;
            }

            // 检查桶的id
            if (logic_block_id != block_info()->block_id_)
            {
                fprintf(stderr, "block id conflict.block id:%u, index block id:%u\n", logic_block_id, block_info()->block_id_);

                return EXIT_BLOCKID_CONFLICT_ERROR;
            }

            // 检查桶的大小
            if (_bucket_size != bucket_size())
            {
                fprintf(stderr, "index configure error, old bucket size: %d, new bucket size:%d\n", bucket_size(), _bucket_size);
                return EXIT_BUCKET_CONFIGURE_ERROR;
            }

            is_load_ = true;

            if (debug)
                printf("load block id:%d index successful. data file size:%d, index file size:%d, bucket_size:%d, free head offset:%d, seqno:%d, size:%d, filecount:%d, del_size:%d, del_file_count:%d, version:%d\n",
                       logic_block_id, index_header()->data_file_offset_, index_header()->index_file_size_,
                       index_header()->bucket_size_, index_header()->free_head_offset_, block_info()->seq_no_, block_info()->size_t_,
                       block_info()->file_count_, block_info()->del_size_, block_info()->del_file_count_, block_info()->version_);
            return TFS_SUCCESS;
        }

        int IndexHandle::remove(const uint32_t logic_block_id)
        {
            if (is_load_)
            {
                if (logic_block_id != block_info()->block_id_)
                {
                    fprintf(stderr, "block id conflict. block id:%d, index block id:%d\n", logic_block_id, block_info()->block_id_);
                    return EXIT_BLOCKID_CONFLICT_ERROR;
                }
            }

            int ret = file_op_->munmap_file();
            if (largefile::TFS_SUCCESS != ret)
            {
                return ret;
            }

            ret = file_op_->unlink_file(); // 将文件从磁盘中删除
            return ret;
        }

        int IndexHandle::flush()
        {
            int ret = file_op_->flush_file();

            if (largefile::TFS_SUCCESS != ret)
            {
                fprintf(stderr, "index flush fail, ret:%d error desc:%s\n", ret, strerror(errno));
            }

            return ret;
        }

        int IndexHandle::update_block_info(const OperType oper_type, const uint32_t modify_size) // 更新块信息
        {
            if (block_info()->block_id_ == 0)
            {
                return EXIT_BLOCKID_ZERO_ERROR;
            }

            if (oper_type == C_OPER_INSERT)
            {
                ++block_info()->version_;
                ++block_info()->file_count_;
                ++block_info()->seq_no_;
                block_info()->size_t_ += modify_size;
            }
            else if (oper_type == C_OPER_DELETE)
            {
                ++block_info()->version_;
                --block_info()->file_count_;
                block_info()->size_t_ -= modify_size;
                ++block_info()->del_file_count_;
                block_info()->del_size_ += modify_size;
            }
            if (debug)
                printf("update block info.blockid:%u, version:%um file count:%u, size:%u, del file count:%u, del size:%u, seq no:%u, oper type:%d\n",
                       block_info()->block_id_, block_info()->version_, block_info()->file_count_, block_info()->size_t_,
                       block_info()->del_file_count_, block_info()->del_size_, block_info()->seq_no_, oper_type);

            return TFS_SUCCESS;
        }

        // 将MetaInfo写入索引文件
        int32_t IndexHandle::write_segment_meta(const uint64_t key, MetaInfo &meta)
        {
            int32_t current_offset = 0;  // 查找的偏移
            int32_t previous_offset = 0; // 要插入的位置

            // 1.key是否存在
            // 从文件哈希表中查找key是否存在， has_find(key, current_offset, previous_offset)
            int ret = hash_find(key, current_offset, previous_offset);

            if (TFS_SUCCESS == ret)
            {
                return EXIT_META_UNEXPECT_FOUND_ERROR;
            }
            else if (EXIT_META_NOT_FOUND_ERROR != ret) // 读文件有问题
            {
                return ret;
            }

            // 2.不存在就写入meta到文件哈希表中 hash_insert(key, previous_offset, meta)
            ret = hash_insert(key, previous_offset, meta);
            return ret;
        }

        int32_t IndexHandle::read_segment_meta(const uint64_t key, MetaInfo &meta)
        {
            int32_t current_offset = 0, previous_offset = 0;

            int32_t ret = hash_find(key, current_offset, previous_offset);

            if (TFS_SUCCESS == ret)
            {
                ret = file_op_->pread_file(reinterpret_cast<char *>(&meta), sizeof(MetaInfo), current_offset);
                return ret;
            }
            else
            {
                return ret;
            }
        }

        int32_t IndexHandle::delete_segment_meta(const uint64_t key)
        {
            int32_t current_offset = 0, previous_offset = 0;

            int32_t ret = hash_find(key, current_offset, previous_offset);
            if (ret != TFS_SUCCESS)
            {
                return ret;
            }
            // 操作类似于链表，需要拿到当前节点的下一个节点的信息
            MetaInfo meta_info;

            ret = file_op_->pread_file(reinterpret_cast<char *>(&meta_info), sizeof(MetaInfo), current_offset);
            if (ret != TFS_SUCCESS)
            {
                return ret;
            }

            int32_t next_pos = meta_info.get_next_meta_offset();

            // 分为两种情况，是否为首节点，也就是那个槽位
            if (previous_offset == 0)
            {
                int32_t slot = static_cast<uint32_t>(key) % bucket_size();
                bucket_slot()[slot] = next_pos;
            }
            else // 普通节点
            {
                MetaInfo pre_meta_info;
                ret = file_op_->pread_file(reinterpret_cast<char *>(&pre_meta_info), sizeof(MetaInfo), previous_offset);
                if (TFS_SUCCESS != ret)
                {
                    return ret;
                }

                pre_meta_info.set_next_meta_offset(next_pos);
                // 还要写回去
                ret = file_op_->pwrite_file(reinterpret_cast<char *>(&pre_meta_info), sizeof(MetaInfo), previous_offset);
                if (TFS_SUCCESS != ret)
                {
                    return ret;
                }
            }

            // 把删除节点加入可重用节点链表
            // 使用前插法
            meta_info.set_next_meta_offset(free_head_offset());
            ret = file_op_->pwrite_file(reinterpret_cast<char *>(&meta_info), sizeof(MetaInfo), current_offset);
            if (TFS_SUCCESS != ret)
            {
                return ret;
            }
            index_header()->free_head_offset_ = current_offset;
            if(debug) printf("delete_segment_meta - reuse metainfo, current_offset:%d\n", current_offset);

            // 删除块信息
            update_block_info(C_OPER_DELETE, meta_info.get_size());

            return TFS_SUCCESS;
        }

        // 重点研究，面试的亮点
        // hash查找
        int32_t IndexHandle::hash_find(const uint64_t key, int32_t &current_offset, int32_t &previous_offset)
        {
            int ret = TFS_SUCCESS;
            MetaInfo meta_info;

            current_offset = 0;
            previous_offset = 0;
            // 1.确定key存放的桶（slot）的位置
            int32_t slot = static_cast<uint32_t>(key) % bucket_size();

            // 2.读取桶首部存储的第一个节点的偏移量，如果偏移量为0，直接返回EXIT_META_NOT_FOUND_ERROR
            // 3.根据偏移量读取存储的metainfo
            // 4.与key进行比较，相等则设置current_offset和previous_offset 并返回TFS_SUCCESS，否则继续执行5
            // 5.从metainfo中取得下一个节点在文件中的偏移量，如果偏移量为0，直接返回EXIT_META_NOT_FOUND_ERROR，否则跳转至3循环执行.
            int32_t pos = bucket_slot()[slot];

            for (; pos != 0;)
            {
                ret = file_op_->pread_file(reinterpret_cast<char *>(&meta_info), sizeof(MetaInfo), pos);

                if (TFS_SUCCESS != ret)
                {
                    return ret;
                }

                if (hash_compare(key, meta_info.get_key()))
                {
                    current_offset = pos;
                    return TFS_SUCCESS;
                }

                previous_offset = pos;
                pos = meta_info.get_next_meta_offset();
            }

            return EXIT_META_NOT_FOUND_ERROR;
        }

        int32_t IndexHandle::hash_insert(const uint64_t key, int32_t previous_offset, MetaInfo &meta)
        {
            int ret = TFS_SUCCESS;

            MetaInfo tmp_meta_info;
            int32_t current_offset = 0;

            // 1.确定key存放的哈希桶的位置
            int32_t slot = static_cast<uint32_t>(key) % bucket_size();

            // 2.确定meta节点存储在文件中的偏移量
            if (free_head_offset() != 0) // 看链表中是否有可重用的节点
            {
                ret = file_op_->pread_file(reinterpret_cast<char*>(&tmp_meta_info), sizeof(MetaInfo), free_head_offset());
                if(TFS_SUCCESS != ret )
                {
                    return ret;
                }

                current_offset = index_header()->free_head_offset_;
                if(debug) printf("reuse metainfo, current_offset:%d\n", current_offset);
                index_header()->free_head_offset_ = tmp_meta_info.get_next_meta_offset();
            }
            else
            {
                current_offset = index_header()->index_file_size_;
                index_header()->index_file_size_ += sizeof(MetaInfo);
            }

            

            // 3将meta节点写入到索引文件中
            meta.set_next_meta_offset(0); // 其实就是放在桶的最后面

            ret = file_op_->pwrite_file(reinterpret_cast<const char *>(&meta), sizeof(MetaInfo), current_offset);
            if (TFS_SUCCESS != ret)
            {
                index_header()->index_file_size_ -= sizeof(MetaInfo); // 节点回退一个，避免浪费空间
                return ret;
            }

            // 4.将meta节点插入到哈希链表中
            // 当前一个节点存在
            if (0 != previous_offset)
            {
                ret = file_op_->pread_file(reinterpret_cast<char *>(&tmp_meta_info), sizeof(MetaInfo), previous_offset);

                if (TFS_SUCCESS != ret)
                {
                    index_header()->index_file_size_ -= sizeof(MetaInfo); // 节点回退一个，避免浪费空间
                    return ret;
                }

                tmp_meta_info.set_next_meta_offset(current_offset);
                ret = file_op_->pwrite_file(reinterpret_cast<const char *>(&tmp_meta_info), sizeof(MetaInfo), previous_offset);
                if (TFS_SUCCESS != ret)
                {
                    index_header()->index_file_size_ -= sizeof(MetaInfo); // 节点回退一个，避免浪费空间
                    return ret;
                }
            }
            else
            {
                // 把哈希桶的第一个节点的值设为current_offset，说明此节点是第一个节点
                bucket_slot()[slot] = current_offset;
            }

            return TFS_SUCCESS;
        }
    }
}