#include "table/table.h"

namespace hawking {
namespace indexlib {

DEFINE_int32(dumping_interval, 60, "dumping thread interval");
DEFINE_int32(merging_interval, 60, "dumping thread interval");
DEFINE_int32(garbage_interval, 1, "garbage thread interval");
DEFINE_double(merge_threshold, 0.5, "disk segment merge threshold");
DEFINE_string(default_ttl_col_name, "TTL", "default name of ttl column");
DEFINE_bool(building_in_one_segment, true, "building in one segment when format");
DEFINE_bool(snapshot_debug, false, "would not delete snapshot dir when fail");
DEFINE_int32(merging_rss_threshold, 1024 , "MB. don't do merge if rss < this");
DEFINE_double(merging_memcost_rate_threshold, 0.8, "0~1. don't do merge if rss * it < predict cost of merge");

std::list<std::shared_ptr<DiskSegment>>
SimpleDiskSegmentMergeStrategy::FindMergeWorkItems() {
    // preferred merge small segments
    disk_segments_.sort([](
        const std::shared_ptr<DiskSegment>& a, const std::shared_ptr<DiskSegment>& b) {
        return a->MemoryCost() < b->MemoryCost();
    });

    // preferred merge with other urgent segments
    int32_t memory_cost = 0;
    IndexlibIndexConfig* base_schema = nullptr;
    std::list<std::shared_ptr<DiskSegment>> merge_group;
    for (auto disk_segment : disk_segments_) {
        if (with_urgent_ && !disk_segment->ShouldDump()) {
            continue;
        }

        memory_cost += disk_segment->MemoryCost();
        std::shared_ptr<IndexlibIndexConfig> schema = disk_segment->Schema();
        if (memory_cost < threshold) {
            if (!base_schema) {
                base_schema = schema.get();
            } else if (
                !google::protobuf::util::MessageDifferencer::Equals(
                    *base_schema, *schema)) {
                continue;
            }

            merge_group.push_back(disk_segment);
            LOG(INFO) << "Add one disk segment with memory cost "
                << disk_segment->MemoryCost() << " into merge group";
        } else {
            break;
        }
    }

    return merge_group;
}

Table::~Table() {
    running_.store(0);
    building_segment_ = nullptr;
    dumping_segments_.clear();
    disk_segments_.clear();

    if (dumping_thread_.joinable()) {
        dumping_thread_.join();
    }
    if (merging_thread_.joinable()) {
        merging_thread_.join();
    }
    if (garbage_thread_.joinable()) {
        garbage_thread_.join();
    }

    if (!root_dir_.empty()) {
        FslibWrapper::RmDir(root_dir_);
    }
}

bool Table::SnapShot(const std::string& root_dir, bool is_format) {
    if (TableDir() == root_dir) {
        LOG(ERROR) << "Snapshot dir should not same as tabledir "
            << root_dir << " to avoid problems";
        return false;
    }

    if (!FslibWrapper::PathExist(root_dir)) {
        LOG(WARNING) << "root_dir " << root_dir << " not exist, auto create";
        FslibWrapper::Mkdir(root_dir);
    }

    // would shield "add/update/del + dump/merge/garbage + schema update"
    // TODO find better snapshot merchaism, avoid too big influence
    std::unique_lock<std::shared_mutex> snapshot_lock(snapshot_lock_);

    // for hawking format's save index
    // clean the dumping/merging queue and push all mem segment into garbase queue
    CallBackOnExit format_cb([this, is_format]() {
        if (!is_format) {
            return;
        }

        // push building segment and all dumping segments to garbase queue
        std::unique_lock<std::mutex> garbage_lock(garbage_mutex_);
        garbage_queue_.push(building_segment_);

        std::unique_lock<std::mutex> dumping_lock(dumping_mutex_);
        while (!dumping_queue_.empty()) {
            std::shared_ptr<Segment> dumping_segment = dumping_queue_.top();
            garbage_queue_.push(dumping_segment);
            dumping_queue_.pop();
        }
        dumping_lock.unlock();
        garbage_lock.unlock();

        // clear merging queue
        std::unique_lock<std::mutex> merging_lock(merging_mutex_);
        merging_queue_.clear();
        merging_lock.unlock();
    });

    std::atomic<bool> fail;
    fail.store(true, std::memory_order_acquire);
    std::vector<std::thread> snapshot_tasks;

    std::shared_ptr<MemSegment> building_segment = all_segment.building_segment;
    if (building_segment && building_segment->IsDirty()) {
        snapshot_tasks.emplace_back([building_segment, &fail, &root_dir]() {
            std::string segment_dir = CreateSegmentDir_(root_dir);
            if (!building_segment->Dump(segment_dir)) {
                LOG(ERROR) << "Fail to Snapshot with building segment at "
                    << segment_dir;
                fail.store(true, std::memory_order_relaxed);
            } else {
                LOG(INFO) << "Success to Snapshot with building segment at "
                    << segment_dir;
            }
        });
    }

    for (auto dumping_segment : all_segment.dumping_segment) {
        snapshot_tasks.emplace_back([dumping_segment, &fail, &root_dir]()){
            std::string segment_dir = CreateSegmentDir_(root_dir);
            if (!dumping_segment->Dump(segment_dir)) {
                LOG(ERROR) << "Fail to Snapshot with dumping segment at "
                    << segment_dir;;
                fail.store(true, std::memory_order_relaxed);
            } else {
                LOG(INFO) << "Success to Snapshot with dumping segment at "
                    << segment_dir;
            }
        };
    }

    for (auto disk_segment : all_segment.disk_segment) {
        snapshot_tasks.emplace_back([disk_segment, &fail, &root_dir]()){
            if (!disk_segment->IsDirty()) {
                // save dir(root_dir) not the same dir as root_dir_:
                // un-dirty: copy disk segment's dir in root_dir_ to root_dir
                // dirty: need generate and dump dir to root_dir
                butil::FilePath dst_dir(root_dir);
                butil::FilePath src_dir(disk_segment->SegmentDir());
                if (!butil::CopyDirectory(src_dir, dst_dir, true)) {
                    LOG(ERROR) << "Fail to Copy disk segment at "
                        << disk_segment->SegmentDir() << " to " << root_dir;
                    fail.store(true, std::memory_order_relaxed);
                } else {
                    LOG(INFO) << "Success to Copy disk segment at "
                        << disk_segment->SegmentDir() << " to " << root_dir;
                }

                return;
            }

            std::string segment_dir = CreateSegmentDir_(root_dir);
            if (!disk_segment->Dump(segment_dir)) {
                LOG(ERROR) << "Fail to Snapshot with disk segment";
                fail.store(true, std::memory_order_relaxed);
            } else {
                LOG(INFO) << "Success to Snapshot with disk segment at "
                    << segment_dir;
            }
        };
    }

    for (auto& snapshot_task : snapshot_tasks) {
        if (snapshot_task.joinable()) {
            snapshot_task.join();
        }
    }

    if (fail.load(std::memory_order_release)) {
        LOG(ERROR) << "Fail to Snapshot";
        if (!FLGS_snapshot_debug) {
            file_wrapper::RmDir(root_dir);
        }
        return false;
    }
    
    return true;
}

void Table::SwitchDiskSegments_(
    std::list<std::shared_ptr<DiskSegment>>* merge_group,
    std::shared_ptr<DiskSegment> new_disk_segment) {
    std::unique_lock<std::shared_mutex> w_lock(mgr_lock_);
    for (auto merged_disk_segment : *merge_group) {
        auto& tomb_stone = merged_disk_segment->GetTombStone();
        if (tomb_stone) {
            tomb_stone->Stop();
            new_disk_segment->ExcludeTombPks(tomb_stone->Pks());
        }
    }

    for (auto& merged_disk_segment : *merge_group) {
        dumping_segments_.erase(std::remove_if(
            dumping_segments_.begin(),
            dumping_segments_.end(),
            [&merged_disk_segment](const std::shared_ptr<MemSegment>& ptr){
                return ptr.get() == merged_disk_segment.get();
            }), dumping_segments_.end()););
    }

    disk_segments_.push_back(new_disk_segment);
}

bool Table::LoadDiskSegment_(
    const std::string disk_segment_dir,
    std::shared_ptr<MemSegment> remove_mem_segment) {
    // first load disk segment
    // during the load, remove_mem_segment still could be add deleted pk to tomb
    std::shared_ptr<DiskSegment> disk_segment = std::make_shared<DiskSegment>();
    FSFileType data_filetype =
        use_disk_ ? FSFileType::FSFT_BLOCK : FSFileType::FSFT_MMAP;
    if (!disk_segment->Load(disk_segment_dir, data_filetype, true)) {
        LOG(ERROR) << "Load disk_segment_dir Fail";
        return false;
    }

    // at most one doc's delete/update on dumping segment would be recreated oto building segment,
    std::unique_lock<std::shared_mutex> w_lock(mgr_lock_);
    // stop dumping segment's delete, fetch deleted pks, exclude them in disk segment
    auto& tomb_stone = remove_mem_segment->GetTombStone();
    if (tomb_stone) {
        tomb_stone->Stop();
        disk_segment->ExcludeTombPks(tomb_stone->Pks());
    }

    // dumping segment list move out
    dumping_segments_.erase(std::remove_if(
        dumping_segments_.begin(),
        dumping_segments_.end(),
        [&remove_mem_segment](const std::shared_ptr<MemSegment>& ptr){
            return ptr.get() == remove_mem_segment.get();
        }), dumping_segments_.end());

    // disk segment list push back
    disk_segments_.push_back(disk_segment);
    return true;
}

bool Table::Init(std::shared_ptr<IndexlibIndexConfig> schema) {
    building_in_one_segment_ = FLAGS_building_in_one_segment;
    if (!FslibWrapper::PathExist(root_dir_) && !FslibWrapper::Mkdir(root_dir_)) {
        LOG(ERROR) << "root_dir " << root_dir_ << " mkdir fail";
        return false;
    }

    if (!UpdateSchema(schema)) {
        LOG(ERROR) << "Wrong Index Config";
        return false;
    }
    
    building_segment_ = std::make_shared<MemSegment>();
    if (!building_segment_->Init(schema)) {
        return false;
    }
    
    InitPkFieldName_();
    Init_();
    return true;
}

bool Table::Load(
    std::shared_ptr<IndexlibIndexConfig> schema,
    bool need_increase,
    bool use_disk) {
    use_disk_ = use_disk;
    if (!UpdateSchema(schema)) {
        LOG(ERROR) << "Wrong Index Config";
        return false;
    }

    if (need_increase) {
        building_segment_ = std::make_shared<MemSegment>();
        if (!building_segment_->Init(schema_)) {
            LOG(ERROR) << "Create building segment fail";
            return false;
        }
    }
    InitPkFieldName_();

    std::vector<std::string> segment_dirs = FslibWrapper::GetSubDir(root_dir_);
    for (const auto& segment_dir : segment_dirs) {
        std::string seg_dir =
            butil::string_printf("%s/%s", root_dir_.data(), segment_dir.data());
        std::shared_ptr<DiskSegment> disk_segment = std::make_shared<DiskSegment>();
        FSFileType data_filetype =
            use_disk_ ? FSFileType::FSFT_BLOCK : FSFileType::FSFT_MMAP;
        if (!disk_segment->Load(seg_dir, data_filetype, need_increase)) {
            LOG(ERROR) << "Load segment at directory " << seg_dir << " fail";
            return false;
        }

        disk_segments_.push_back(disk_segment);
    }

    if (need_increase) {
        Init_();
    }
    return true;
}

std::list<std::shared_ptr<DiskSegment>> FindMergeWorkItems_() {
    std::shared_ptr<DiskSegmentMergeStrategy> disk_segment_merge_strategy =
        std::make_shared<DiskSegmentMergeStrategy>(GetAllSegment_().disk_segment());
    return disk_segment_merge_strategy->FindMergeWorkItems();
}

bool Table::DoMerge_(
    std::list<std::shared_ptr<DiskSegment>> merge_group,
    const std::string& new_disk_segment_dir) {
    // stop update, jump into DumpingOrMerging status
    for (auto merge_item : merge_group) {
        merge_item->WaitForWriteFinish();
    }

    DiskSegmentMerger disk_segment_merger(merge_group, new_disk_segment_dir);
    return disk_segment_merger.DoMerge();
}

void Table::Init_() {
    running_.store(1);
    dumping_thread_ = std::thread([this](){
        while (1) {
            std::this_thread::sleep_for(std::chrono::seconds(FLAGS_dumping_interval));
            if (1 != running_.load(std::memory_order_relaxed)) {
                std::unique_lock<std::mutex> lock(dumping_mutex_);
                if (dumping_queue_.empty()) {
                    LOG(INFO) << "Table " << table_name_ << " dumpped finish";
                    break;
                }
            }

            std::shared_lock<std::shared_mutex> shield_snapshot_lock(snapshot_lock_);

            std::shared_ptr<MemSegment> dump_mem_segment;
            {
                std::unique_lock<std::mutex> lock(dumping_mutex_);
                if (!dumping_queue_.empty()) {
                    dump_mem_segment = dumping_queue_.front();
                    if (unlikely(!dump_mem_segment)) {
                        LOG(ERROR) << "Dumping mem segment is nullptr!";
                        dumping_queue_.pop();
                        continue;
                    }
                }
            }

            // dumped, but had not loaded disk segment
            std::string segment_dir = CreateSegmentDir_(root_dir_);
            if (dump_mem_segment->Dump(segment_dir)) {
                if (LoadDiskSegment_(
                        segment_dir,
                        dump_mem_segment)) {
                    LOG(INFO) << "MemSegment " << segment_dir
                        << " dump finish and new DiskSegment Load Success.";
                    std::unique_lock<std::mutex> seg_lock(dumping_mutex_);
                    dumping_queue_.pop();
                    seg_lock.unlock();

                    std::unique_lock<std::mutex> lock(garbage_mutex_);
                    garbage_queue_.push(dump_mem_segment);
                } else {
                    dump_mem_segment->ResetSegmentState();
                    LOG(ERROR) << "MemSegment " << segment_dir
                        << " dump finish and new DiskSegment Load Fail";
                }

                continue;
            } else {
                dump_mem_segment->ResetSegmentState();
                FslibWrapper::RmDir(segment_dir);
                LOG(ERROR) << "Dumping mem segment dump fail!";
            }
        }
    });

    merging_thread_ = std::thread([this](){
        while (running_.load(std::memory_order_relaxed)) {
            std::shared_lock<std::shared_mutex> shield_snapshot_lock(
                this->snapshot_lock_);
            std::list<std::shared_ptr<DiskSegment>> merge_group =
                this->FindMergeWorkItems_();
            if (merge_group.size() > 1) {
                std::string new_disk_segment_dir = CreateSegmentDir_();
                if (false == this->DoMerge_(merge_group, new_disk_segment_dir)) {
                    LOG(ERROR) << "merge " merge_group.size() << " disk segments fail";
                    for (auto merge_item : merge_group) {
                        merge_item->ResetSegmentState();
                    }
                } else {
                    FSFileType data_filetype =
                        this->DecideFsFileTypeAfterMerge_(merge_group);
                    std::shared_ptr<DiskSegment> new_disk_segment =
                        std::make_shared<DiskSegment>();
                    if (!new_disk_segment->Load(
                            new_disk_segment_dir, data_filetype, true)) {
                        LOG(ERROR) << "new merged disk segment load fail";
                        for (auto merge_item : merge_group) {
                            merge_item->ResetSegmentState();
                        }
                    } else {
                        this->SwitchDiskSegments_(&merge_group, new_disk_segment);
                        std::unique_lock<std::mutex> lock(garbage_mutex_);
                        for (auto merge_item : merge_group) {
                            garbage_queue_.push(merge_item);
                        }
                    }
                }
            }

            // TODO in testing
            std::this_thread::sleep_for(std::chrono::seconds(FLAGS_merging_interval));
        }

        LOG(INFO) << "Table " << table_name_ << " merging finish";
    });

    garbage_thread_ = std::thread([this](){
         while (1) {
            if (1 != running_.load(std::memory_order_relaxed)) {
                std::unique_lock<std::mutex> lock(garbage_mutex_);
                if (garbage_queue_.empty()) {
                    LOG(INFO) << "Table " << table_name_ << " release finish";
                    break;
                }
            }

            std::shared_ptr<Segment> may_rel_segment;
            {
                std::unique_lock<std::mutex> lock(garbage_mutex_);
                if (!garbage_queue_.empty()) {
                    may_rel_segment = garbage_queue_.front();
                    garbage_queue_.pop();
                }
            }

            may_rel_segment = nullptr;
            std::this_thread::sleep_for(std::chrono::seconds(FLAGS_garbage_interval));
        }
    });
}

// TODO how consider memory limit
FSFileType Table::DecideFsFileTypeAfterMerge_(
    const std::list<std::shared_ptr<DiskSegment>>& merged_disk_segment_list) const {
    FSFileType data_filetype = FSFileType::FSFT_MMAP;
    for (const auto& merged_disk_segment : merged_disk_segment_list) {
        if (merged_disk_segment->FileType() != data_filetype) {
            return merged_disk_segment->FileType();
        }
    }

    return data_filetype;
}

util::HawkingRecallRetCode Table::RemoveDocByPk_(
    std::shared_ptr<Segment> segment, uint64_t pk) {
    return segment->Delete(pk);
}

util::HawkingRecallRetCode Table::RemoveDocByPk_(uint64_t pk) {
    AllSegment all_segment = GetAllSegment_();
    if (Status::OK == all_segment.building_segment_->Delete(pk)) {
        return util::HawkingRecallRetCode::OK;
    }

    for (auto dumping_segment : all_segment.dumping_segments_) {
        if (Status::OK == dumping_segment->Delete(pk)) {
            return util::HawkingRecallRetCode::OK;
        }
    }

    for (auto disk_segments : all_segment.disk_segments_) {
        if (Status::OK == disk_segments->Delete(pk)) {
            return util::HawkingRecallRetCode::OK;
        }
    }

    return util::HawkingRecallRetCode::PK_NOT_FOUND;
}

bool Table::PkExists_(
    uint64_t pk, std::shared_ptr<Segment>* exist_segment, DocId* exist_doc_id) const {
    AllSegment all_segment = GetAllSegment_();
    ReturnValue<DocId> ret{Status::NotFound, 0};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->Lookup(pk);
        if (ret.OK()) {
            *exist_segment = all_segment.building_segment;
            *exist_doc_id = ret.Value();
        }
        return true;
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->Lookup(pk);
        if (ret.OK()) {
            *exist_segment = seg;
            *exist_doc_id = ret.Value();
            return true;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->Lookup(pk);
        if (ret.OK()) {
            *exist_segment = seg;
            *exist_doc_id = ret.Value();
            return true;
        }
    }

    // if all `building + dumping + disked` segs could not found pk,
    // create in building seg
    return false;
}

bool Table::FieldCompletion_(
    IndexlibMessage* mut_message, uint64_t pk, std::shared_ptr<Segment> exist_segment) {
    // record which fields is need update
    std::unordered_set<std::string> update_sets;
    update_sets.reserve(mut_message->columns().size());
    for (const auto& column : mut_message->columns()) {
        update_sets.emplace(column.column_name());
    }

    // get all fields of this pk from exist_segment
    std::shared_ptr<IndexlibIndexConfig> seg_index_config = exist_segment->Schema();
    google::protobuf::RepeatedPtrField<std::string> select_fields;
    select_fields.Reserve(seg_index_config->fields_size());
    for (const auto& field_config : seg_index_config->fields()) {
        std::string* field_name = select_fields.Add();
        *field_name = field_config.field_name();
    }
    google::protobuf::RepeatedField<int64_t> pks;
    pks.Add(pk);
    google::protobuf::RepeatedPtrField<util::FilterColumn> dummy;
    IndexlibRetrieveResponse resp;
    if (Status::OK != exist_segment->QueryByPk(pks, select_fields, dummy, dummy, &resp ||
        resp.docs_size() != 1) {
        LOG(ERROR) << "Get current pk " << pk << " all columns fail with resp "
            << resp.ShortDebugString()
            << " for " << seg_index_config->fields_size() << " columns";
        return false;
    }

    // add un-update fields to message
    auto* columns = resp.mutable_docs()->Mutable(0);
    for (int32_t idx = 0; idx < columns->size(); ++idx) {
        util::SelectColumn* column = columns.Mutable(idx);
        if (update_sets.find(columns.column_name()) != update_sets.end()) {
            continue;
        }

        util::SelectColumn* add_col = mut_message->add_columns();
        add_col->Swap(column);
    }

    return true;
}

util::HawkingRecallRetCode Table::AddDocument_(
    const IndexlibMessage& message,
    uint64_t pk) {
    std::shared_ptr<Segment> exist_segment;
    DocId exist_doc_id;
    std::shared_ptr<NormalDocument> doc;
    std::shared_ptr<MemSegment> building_segment = GetAllSegment_().building_segment;
    // 1. if pk not exists in any segment, couldn't upadte, newly create
    // 2. if pk exist in dumping/merging segment, couldn't upadte,
    //    would delete in exist segment('s tomb) and recreate in building segment
    // 3. if pk exist in building/disk segment, could update
    if (PkExists_(pk, &exist_segment, &exist_doc_id)) {
        doc = exist_segment->CreateNormalDocument(message);
        auto ret = exist_segment->UpdateFields(message, pk, doc, exist_doc_id);
        if (util::HawkingRecallRetCode::SEGMENT_DUMPING_OR_MERGING == ret) {
            if (!IsSameSchema(exist_segment->Schema(), building_segment.Schema())) {
                return ret;
            }

            IndexlibMessage* mut_message = &(const_cast<IndexlibMessage&>(message));
            if (!FieldCompletion_(mut_message, pk, exist_segment)) {
                LOG(ERROR) << "Get current all fields from exist_segment fail with pk "
                    << pk;
                return util::HawkingRecallRetCode::INDEX_SEARCH_EXCEPTION;
            }
            doc = exist_segment->CreateNormalDocument(message);
            RemoveDocByPk_(exist_segment, pk);
        } else {
            return ret;
        }
    }

    if (!doc) {
        doc = building_segment->CreateNormalDocument(message);
        if (!doc) {
            LOG(ERROR) << "transfer message to doc fail, message: "
                << message.ShortDebugString()
                << ", index config: "
                << building_segment->Schema()->ShortDebugString();
            return util::HawkingRecallRetCode::MSG_PARSE_FAIL;
        }
    }

    // create new mem-building-segment and make old one into dumping queue when full
    if (!building_in_one_segment_ && building_segment->ShouldDump()) {
        // avoid UpdateSchema already update the building_segment_,
        // that the building_segment_ here is already in dumping queue
        if (!building_segment->IsInDumpingQueue()) {
            std::shared_ptr<MemSegment> new_building_segment = std::make_shared<MemSegment>();
            // not need check
            new_building_segment->Init(schema_());
            UpdateBuildingSegment_(building_segment, new_building_segment, true);
        } else {
            // if has been UpdateSchema, retry
            return util::HawkingRecallRetCode::SCHEMA_NOT_MATCH;
        }
    }

    return building_segment->AddDocument(doc) ?
        util::HawkingRecallRetCode::OK : util::HawkingRecallRetCode::INDEX_BUILD_FAIL;
}

bool Table::FetchPk_(const IndexlibMessage& message, uint64_t* pk) const {
    for (const auto& field : message.columns()) {
        if (field.column_name() == pk_field_name_) {
            *pk = field.column_value().int_value();
            return true;
        }
    }

    return false;
}

util::HawkingRecallRetCode Table::AddDocument(const IndexlibMessage& message) {
    // OWMR
    std::unique_lock<bthread::Mutex> data_lock(data_lock_);

    // shield snapshot
    std::shared_lock<std::shared_mutex> shield_snapshot_lock(snapshot_lock_);
    
    // opcode check
    if (message.cmd() != util::INDEX_RT_DEL &&
        message.cmd() != util::INDEX_RT_ADD &&
        message.cmd() != util::INDEX_RT_UPDATE) {
        LOG(ERROR) << "Invalid message Cmd " << static_cast<int32_t>(message.cmd());
        return util::HawkingRecallRetCode::OPCODE_NOT_FOUND;
    }

    // must has pk!
    uint64_t pk;
    if (!FetchPk_(message, &pk)) {
        LOG(ERROR) << "Invalid message without Primary Key "
            << message.ShortDebugString();
        return util::HawkingRecallRetCode::PK_NOT_EXIST;
    }

    // del action is del the pk in segment's deletion_index
    if (message.cmd() == util::HawkingRecallRetCode::INDEX_RT_DEL) {
        return RemoveDocByPk_(pk);
    }

    // update or add
    return AddDocument_(message, pk);
}

// prevent same field_name with different field_type
bool Table::DiffOneSchema_(
    std::shared_ptr<IndexlibIndexConfig> schema,
    std::shared_ptr<IndexlibIndexConfig> exist_index_config) const {
    for (const auto& field_config : schema.fields()) {
        const std::string& field_name = field_config.field_name();
        util.ColumnType field_type = field_config.field_type();

        for (const auto& exist_field_config : exist_index_config.fields()) {
            const std::string& exist_field_name = exist_field_config.field_name();
            util.ColumnType exist_field_type = exist_field_config.field_type();
            if (field_name == exist_field_name && field_type != exist_field_type) {
                LOG(ERROR) << "Same field_name " << field_name
                    << " with different field_type, exist field_type "
                    << static_cast<int32_t>(exist_field_type) << " new field_type "
                    << static_cast<int32_t>(field_type);
                return false;
            }
        }
    }

    return true;
}

// prevent same field_name with different field_type
bool Table::DiffExistSchema_(std::shared_ptr<IndexlibIndexConfig> schema) const {
    AllSegment all_segment = GetAllSegment_();
    if (all_segment.building_segment &&
        !DiffOneSchema_(schema, all_segment.building_segment)) {
        LOG(ERROR) << "building segment diff error";
        return false;
    }

    for (const auto dumping_segment : all_segment.dumping_segment) {
        if (!DiffOneSchema_(schema, dumping_segment)) {
            LOG(ERROR) << "dumping segment diff error";
            return false;
        }
    }

    for (const auto disk_segment : all_segment.disk_segment) {
        if (!DiffOneSchema_(schema, disk_segment)) {
            LOG(ERROR) << "disk segment diff error";
            return false;
        }
    }

    return true;
}

bool Table::SchemaBasicCheck_(std::shared_ptr<IndexlibIndexConfig> schema) const {
    if (unlikely(!schema)) {
        LOG(ERROR) << "empty schema";
        return false;
    }

    if (unlikely(schema->fields().empty())) {
        LOG(ERROR) << "without any field";
        return false;
    }

    std::unordered_set<std::string> field_set;
    field_set.reserve(schema->fields_size());
    for (const auto& field : schema->fields()) {
        if (field.field_type() < util::ColumnType::COLUMN_INT8 ||
            field.field_type() > util::ColumnType::COLUMN_STRING_LIST) {
            LOG(ERROR) << "invalid field_type "
                << static_casst<int32_t>(field.field_type());
            return false;
        }
        
        auto ret = field_set.emplace(field.field_name());
        if (!ret.second) {
            LOG(ERROR) << "duplicate field_name " << field.field_name();
            return false;
        }
    }

    for (const auto& inverted_config : schema->inverted_configs()) {
        auto iter = field_set.find(inverted_config.field_name());
        if (iter == field_set.end()) {
            LOG(ERROR) << "invert field " << inverted_config.field_name()
                << " not in fields. schema : "
                << schema->ShortDebugString();
            return false;
        }
    }

    return true;
}

bool Table::CheckSchema_(std::shared_ptr<IndexlibIndexConfig> schema) const {
    return SchemaBasicCheck_(schema) && DiffExistSchema_(schema);
}

bool Table::UpdateSchema(std::shared_ptr<IndexlibIndexConfig> schema) {
    if (!CheckSchema_()) {
        return false;
    }

    if (schema.life_cycle()) {
        auto* ttl_field_config = schema->add_fields();
        ttl_field_config->set_field_name(FLAGS_default_ttl_col_name);
        ttl_field_config->set_field_type(util::COLUMN_INT64);
        ttl_field_config->mutable_default_value()->set_int_value(0);
    }

    std::shared_lock<std::shared_mutex> shield_snapshot_lock(snapshot_lock_);

    std::unique_lock<std::shared_mutex> w_lock(mgr_lock_);
    std::shared_ptr<MemSegment> new_building_segment;
    if (building_segment_) {
        new_building_segment = std::make_shared<MemSegment>();
        if (!new_building_segment->Init(schema)) {
            LOG(ERROR) << "New IndexConfig Init new segment fail "
                << schema->ShortDebugString();
            return false;
        }
    }

    schema_ = schema;
    if (new_building_segment) {
        UpdateBuildingSegment_(building_segment_, new_building_segment, false);
    }

    return true;
}

bool Table::FetchTerm_(
    std::shared_ptr<IndexlibIndexConfig> schema,
    const IndexlibRetrieveRequest& req,
    uint64_t* term) {
    const std::string& inverted_field_name = req.inverted_info().field_name();
    for (const auto& field_config : schema->fields()) {
        if (field_config.field_name() == inverted_field_name &&
            GetFieldHash(field_config, &req.inverted_info().field_value(), term)) {
            return true;
        }
    }

    return false;
}

void Table::Query(const IndexlibRetrieveRequest& req, IndexlibRetrieveResponse* resp) {
    // generate termkey and inverted fieldid
    uint64_t term_key;
    AllSegment all_segment = GetAllSegment_();
    if (!FetchTerm_(all_segment.schema, req, &term_key)) {
        return;
    }

    const auto& select_fields = req.select_field_names();
    const auto& filter_fields = req.filter_fields();
    const auto& where_fields = req.where_fields();

    Status ret = Status::OK;
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->Query(
            req.inverted_info().field_name(),
            term_key, req.topk(),
            select_fields, filter_fields, where_fields, resp);
        if (Status::OK != ret && ret != Status::NotFound) {
            return;
        }
        if (resp->docs_size() >= req.topk()) {
            return;
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->Query(
            req.inverted_info().field_name(),
            term_key, req.topk() - resp->docs_size(),
            select_fields, filter_fields, where_fields, resp);
        if (Status::OK != ret && ret != Status::NotFound) {
            return;
        }

        if (resp->docs_size() >= req.topk()) {
            return;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->Query(
            req.inverted_info().field_name(),
            term_key, req.topk() - resp->docs_size(),
            select_fields, filter_fields, where_fields, resp);

        if (Status::OK != ret && ret != Status::NotFound) {
            return;
        }

        if (resp->docs_size() >= req.topk()) {
            return;
        }
    }
}

void Table::QueryByPk(
    const IndexlibRetrieveByPkRequest& req, IndexlibRetrieveResponse* resp) {
    const auto& select_fields = req.select_field_names();
    const auto& filter_fields = req.filter_fields();
    const auto& where_fields = req.where_fields();
    AllSegment all_segment = GetAllSegment_();

    Status ret = Status::OK;
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->QueryByPk(
            req.pk_list(), select_fields, filter_fields, where_fields, resp);
        if (Status::OK != ret && Status::NotFound != ret) {
            return;
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->QueryByPk(
            req.pk_list(), select_fields, filter_fields, where_fields, resp);
        if (Status::OK != ret && Status::NotFound != ret) {
            return;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->QueryByPk(
            req.pk_list(), select_fields, filter_fields, where_fields, resp);
        if (Status::OK != ret && Status::NotFound != ret) {
            return;
        }
    }
}

// use for ann index's localdata
bool Table::FilterCheck(
    int64_t pk,
    const google::protobuf::RepeatedPtrField<util::FilterColumn>& filter_fields,
    const google::protobuf::RepeatedPtrField<util::FilterColumn>& where_fields) {
    AllSegment all_segment = GetAllSegment_();

    ReturnValue<bool> ret{Status::OK, false};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->FilterCheck(pk, filter_fields, where_fields);
        if (ret.Code() == Status::OK) {
            return ret.Value();
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->FilterCheck(pk, filter_fields, where_fields);
        if (Status::OK == ret.Code()) {
            return ret.Value();
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->FilterCheck(pk, filter_fields, where_fields);
        if (Status::OK == ret.Code()) {
            return ret.Value();
        }
    }

    return false;
}

bool Table::FilterCheck(int64_t pk, const util::FilterColumn& field, bool is_where) {
    AllSegment all_segment = GetAllSegment_();

    ReturnValue<bool> ret{Status::OK, false};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->FilterCheck(pk, field, is_where);
        if (ret.Code() == Status::OK) {
            return ret.Value();
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->FilterCheck(pk, field, is_where);
        if (Status::OK == ret.Code()) {
            return ret.Value();
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->FilterCheck(pk, field, is_where);
        if (Status::OK == ret.Code()) {
            return ret.Value();
        }
    }

    return false;
}

// use for ann index's localdata
void Table::FillLocaldata(
    int64_t pk,
    google::protobuf::RepeatedPtrField<util::SelectColumn>* select_fields) {
    AllSegment all_segment = GetAllSegment_();

    ReturnValue<bool> ret{Status::OK, false};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->FillLocaldata(pk, select_fields);
        if (ret.Code() == Status::OK) {
            return;
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->FillLocaldata(pk, select_fields);
        if (Status::OK == ret.Code()) {
            return;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->FillLocaldata(pk, select_fields);
        if (Status::OK == ret.Code()) {
            return;
        }
    }
}

void Table::FillLocaldata(
    int64_t pk,
    util::SelectColumn* select_field) {
    AllSegment all_segment = GetAllSegment_();

    ReturnValue<bool> ret{Status::OK, false};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->FillLocaldata(pk, select_field);
        if (ret.Code() == Status::OK) {
            return;
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->FillLocaldata(pk, select_field);
        if (Status::OK == ret.Code()) {
            return;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->FillLocaldata(pk, select_field);
        if (Status::OK == ret.Code()) {
            return;
        }
    }
}

void Table::FillLocaldata(
    int64_t pk,
    int32_t field_id,
    util::FieldValue* field_value) {
    AllSegment all_segment = GetAllSegment_();

    ReturnValue<bool> ret{Status::OK, false};
    if (all_segment.building_segment) {
        ret = all_segment.building_segment->FillLocaldata(pk, field_id, field_value);
        if (ret.Code() == Status::OK) {
            return;
        }
    }

    for (auto seg : all_segment.dumping_segment) {
        ret = seg->FillLocaldata(pk, field_id, field_value);
        if (Status::OK == ret.Code()) {
            return;
        }
    }

    for (auto seg : all_segment.disk_segment) {
        ret = seg->FillLocaldata(pk, field_id, field_value);
        if (Status::OK == ret.Code()) {
            return;
        }
    }
}

}
}