#include "segment/disk_segment_merger.h"

namespace hawking {
namespace indexlib {

DECLARE_string(default_ttl_col_name);

bool DiskSegmentMerger::WriteOffsetFileT_() {
    std::vector<size_t> offsets;
    offsets.resize(centroid_info_.size(), 0);

    size_t start_offset = 0;
    size_t length = sizeof(size_t) + sizeof(ArrayLengthType);
    for (size_t doc_idx = 0; doc_idx < centroid_info_.size(); ++doc_idx) {
        offsets[doc_idx] = start_offset + doc_idx * length;
    }

    Status ret = attr_offset_file_writer_->Write(
        (uint8_t*)(offsets.data()), offsets.length() * sizeof(size_t), 0).Code();
    attr_offset_file_writer_.Close();
    return ret == Status::OK;
}

ArrayLengthType DiskSegmentMerger::GetElementCount_(
    util::ColumnType field_type, util::FieldValue* field_value) const {
    if (field_type <= util::ColumnType::COLUMN_INT64_LIST) {
        return field_value->int_values().values_size();
    } else if (field_type <= util::ColumnType::COLUMN_DOUBLE_LIST) {
        return field_value->double_values().values_size();
    } else {
        return field_value->bytes_values().values_size();
    }
}

bool DiskSegmentMerger::WriteVarlenMetaFileT_(
    int32_t field_id, util::ColumnType field_type,
    const uint32_t element_length, size_t* datafile_length) {
    size_t varlen_metafile_length = sizeof(VarlenItem) * centroid_info_.size();
    attr_varlen_metafile_writer_->ReserveFile(varlen_metafile_length);

    VarlenMetaBuffer varlen_metafile_buffer_writer(varlen_metafile_length);
    for (const auto& doc : centroid_info_) {
        const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
        util::FieldValue* field_value = doc_ctx->GetField(field_id);
        if (unlikely(!field_value)) {
            LOG(ERROR) << "get value from segment " << doc.seg_idx
                << " of doc_id " << doc.old_docid << " fail";
            return false;
        }

        varlen_metafile_buffer_writer->AppendOffset(*datafile_length);

        if (field_type == util::COLUMN_STRING) {
            const std::string& str = field_value->bytes_value();
            varlen_metafile_buffer_writer->AppendStringLength(str.length());
        } else {
            ArrayLengthType element_count = GetElementCount_(field_value);
            varlen_metafile_buffer_writer->AppendElementCount(element_count);
        }

        *datafile_length += (element_length * element_count);
    }

    bool ret = (Status::OK == attr_varlen_metafile_writer_->Write(
        varlen_metafile_buffer_writer.Buffer(), varlen_metafile_length, 0));
    attr_varlen_metafile_writer_->Close();
    return ret;
}

bool DiskSegmentMerger::DumpMultiDouble_(int32_t field_id, const uint32_t element_length) {
    size_t datafile_offset = 0;
    for (const auto& doc : centroid_info_) {
        const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
        util::FieldValue* field_value = doc_ctx->GetField(field_id);

        for (size_t idx = 0; idx < field_value->double_values().values_size(); ++idx) {
            double val = field_value->double_values().values(idx);
            if (Status::OK !=
                attr_data_file_writer_->Write(&val, element_length, datafile_offset)) {
                LOG(ERROR) << "write datafile fail for field_id " << field_id
                    << " at offset " << datafile_offset
                    << " with length " << element_length;
                return false;
            }

            datafile_offset += element_length;
        }
    }

    return true;
}

bool DiskSegmentMerger::DumpString_(int32_t field_id) {
    size_t datafile_offset = 0;
    for (const auto& doc : centroid_info_) {
        const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
        util::FieldValue* field_value = doc_ctx->GetField(field_id);

        const std::string& str = field_value->bytes_value();
        if (Status::OK !=
            attr_data_file_writer_->Write(
                (uint8_t*)str.data(), str.length(), datafile_offset)) {
            LOG(ERROR) << "write datafile fail for field_id " << field_id
                    << " at offset " << datafile_offset
                    << " with length " << str.length();
            return false;
        }

        datafile_offset += str.length();
    }

    return true;
}

bool DiskSegmentMerger::DumpMultiT_(
    int32_t field_id, util::ColumnType field_type, const uint32_t element_length) {
    // 1. write offset file
    if (!WriteOffsetFileT_()) {
        LOG(ERROR) << "Write offsetfile fail";
        return false;
    }
    
    // 2. write varlen_metafile + calc datafile length
    size_t datafile_length = 0;
    if (!WriteVarlenMetaFileT_(field_id, field_type, element_length, &datafile_length)) {
        LOG(ERROR) << "Write offsetfile fail";
        return false;
    }

    // 3. write datafile
    attr_data_file_writer_->ReserveFile(datafile_length);
    switch (field_type) {
    case util::ColumnType::COLUMN_STRING: {
        return DumpString_(field_id);
    }
    case util::ColumnType::COLUMN_INT8_LIST: {
        return DumpMultiInt_<int8_t>DumpMultiInt_(field_id);
    }
    case util::ColumnType::COLUMN_INT16_LIST: {
        return DumpMultiInt_<int16_t>DumpMultiInt_(field_id);
    }
    case util::ColumnType::COLUMN_INT32_LIST: {
        return DumpMultiInt_<int32_t>DumpMultiInt_(field_id);
    }
    case util::ColumnType::COLUMN_INT64_LIST: {
        return DumpMultiInt_<int64_t>DumpMultiInt_(field_id);
    }
    case util::ColumnType::COLUMN_FLOAT_LIST: {
        return DumpMultiFloat_<float>(field_id);
    }
    case util::ColumnType::COLUMN_DOUBLE_LIST: {
        return DumpMultiFloat_<double>(field_id);
    }
    default:
        LOG(ERROR) << "unknown field_type " << static_cast<int32_t>(field_type);
        return false;
    }
}

bool DiskSegmentMerger::WriteOffsetFileString_(
    int32_t field_id, size_t* varlen_metafile_length, size_t* datafile_length) {
    std::vector<size_t> offsets;
    offsets.resize(centroid_info_.size(), 0);

    for (size_t doc_idx = 0; doc_idx < centroid_info_.size(); ++doc_idx) {
        const DocUnit& doc = centroid_info_[doc_idx];
        const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
        util::FieldValue* field_value = doc_ctx->GetField(field_id);
        if (unlikely(!field_value)) {
            LOG(ERROR) << "get value from segment " << doc.seg_idx
                << " of doc_id " << doc.old_docid << " fail";
            return false;
        }

        offsets[doc_idx] = *varlen_metafile_length;

        *varlen_metafile_length += sizeof(size_t);
        *varlen_metafile_length += sizeof(ArrayLengthType);
        for (size_t idx = 0; idx < field_value->bytes_values().values_size(); ++idx) {
            const std::string& str = field_value->bytes_values().values(idx);

            *varlen_metafile_length += sizeof(StringLengthType);
            *datafile_length += str.length();
        }
    }

    Status ret = attr_offset_file_writer_->Write(
        (uint8_t*)(offsets.data()), offsets.length() * sizeof(size_t), 0).Code();
    attr_offset_file_writer_.Close();
    return ret == Status::OK;
}

bool DiskSegmentMerger::WriteVarlenMetaAndDataFileString_(
    int32_t field_id, VarlenMetaBuffer* varlen_metafile_buffer_writer) {
    size_t datafile_offset = 0;
    for (size_t doc_idx = 0; doc_idx < centroid_info_.size(); ++doc_idx) {
        const DocUnit& doc = centroid_info_[doc_idx];
        const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
        util::FieldValue* field_value = doc_ctx->GetField(field_id);

        varlen_metafile_buffer_writer->AppendOffset(datafile_offset);
        ArrayLengthType elem_count = field_value->bytes_values().values_size();
        varlen_metafile_buffer_writer->AppendElementCount(elem_count);

        for (size_t idx = 0; idx < field_value->bytes_values().values_size(); ++idx) {
            const std::string& str = field_value->bytes_values().values(idx);

            varlen_metafile_buffer_writer->AppendStringLength(str.length());
            if (Status::OK != attr_data_file_writer_->Write(
                    (uint8_t*)(str.data()), str.length(), datafile_offset)) {
                LOG(ERROR) << "write datafile fail for field_id " << field_id
                    << " at offset " << datafile_offset
                    << " with length " << str.length();
                return false;
            }
            datafile_offset += str.length();
        }
    }

    return true;
}

bool DiskSegmentMerger::DumpMultiString_(int32_t field_id) {
    // 1. write offset file
    size_t varlen_metafile_length = 0;
    size_t datafile_length = 0;
    if (!WriteOffsetFileString_(field_id, &varlen_metafile_length, &datafile_length)) {
        LOG(ERROR) << "Write offsetfile fail";
        return false;
    }

    // 2. write varlen_metafile + calc datafile length
    attr_varlen_metafile_writer_->ReserveFile(varlen_metafile_length);
    attr_data_file_writer_->ReserveFile(datafile_length);
    VarlenMetaBuffer varlen_metafile_buffer_writer(varlen_metafile_length);
    return WriteVarlenMetaAndDataFileString_(field_id, &varlen_metafile_buffer_writer);
}

bool DiskSegmentMerger::MergeVarlenAttributeIndex_(
    std::vector<std::shared_ptr<AttributeDiskIndexer>> attr_index_list,
    const std::string& attr_dir,
    int32_t field_id,
    util::ColumnType field_type) {
    // 1. index file init: offset + varlen_meta + data
    std::string offset_file = butil::string_printf("%s/offset_file", attr_dir.data());
    attr_offset_file_writer_.reset(new NormalFileWriter(offset_file));
    if (attr_offset_file_writer_->Init() != Status::OK) {
        LOG(ERROR) << "fail to Init offset file writer " << offset_file;
        return false;
    }

    std::string varlen_metafile =
        butil::string_printf("%s/varlen_metafile", attr_dir.data());
    attr_varlen_metafile_writer_.reset(new NormalFileWriter(varlen_metafile));
    if (attr_varlen_metafile_writer_->Init() != Status::OK) {
        LOG(ERROR) << "fail to Init varlen metafile writer " << varlen_metafile;
        return false;
    }

    std::string data_file = butil::string_printf("%s/data_file", attr_dir.data());
    attr_data_file_writer_.reset(new NormalFileWriter>(data_file));
    if (attr_data_file_writer_->Init() != Status::OK) {
        LOG(ERROR) << "fail to Init data file writer " << data_file;
        return false;
    }

    // 2. read attr group by segment
    indexlib::ArenaWrapper arena_wrapper;
    std::vector<DiskSegmentPart> segment_group;
    segment_group.reserve(merge_group_.size());
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        segment_group.emplace_back(arena_wrapper);
    }

    if (!AttributeRead_(attr_dir, attr_index_list, &segment_group)) {
        return false;
    }

    // 3. write attr files: offset + varlen_meta + data
    offset_file_writer->ReserveFile(centroid_info_.size() * sizeof(size_t));
    return (field_type == util::ColumnType::COLUMN_STRING_LIST) ?
        DumpMultiString_(field_id) :
        DumpMultiT_(field_id, field_type, merge_group_[0]->ElementLength());
}

bool DiskSegmentMerger::MergeFixlenAttributeIndex_(
    std::vector<std::shared_ptr<AttributeDiskIndexer>> attr_index_list,
    const std::string& attr_dir,
    int32_t field_id,
    util::ColumnType field_type) {
    // 1. index file init: data
    std::string data_file = butil::string_printf("%s/data_file", attr_dir.data());
    attr_data_file_writer_.reset(new NormalFileWriter>(data_file));
    if (attr_data_file_writer_->Init() != Status::OK) {
        LOG(ERROR) << "fail to Init data file writer " << data_file;
        return false;
    }

    // 2. read attr group by segment
    indexlib::ArenaWrapper arena_wrapper;
    std::vector<DiskSegmentPart> segment_group;
    segment_group.reserve(merge_group_.size());
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        segment_group.emplace_back(arena_wrapper);
    }

    if (!AttributeRead_(attr_dir, attr_index_list, &segment_group)) {
        return false;
    }    

    // 3. write attr files: data
    attr_data_file_writer_->ReserveFile(
        centroid_info_.size() * merge_group_[0]->ElementLength());
    switch (field_type) {
    case util::ColumnType::COLUMN_INT8:{
        return DumpSingleInt_<int8_t>(field_id);
    }
    case util::ColumnType::COLUMN_INT16:{
        return DumpSingleInt_<int16_t>(field_id);
    }
    case util::ColumnType::COLUMN_INT32:{
        return DumpSingleInt_<int32_t>(field_id);
    }
    case util::ColumnType::COLUMN_INT64:{
        return DumpSingleInt_<int64_t>(field_id);
    }
    case util::ColumnType::COLUMN_FLOAT:{
        return DumpSingleFloat_<float>(field_id);
    }
    case util::ColumnType::COLUMN_DOUBLE:{
        return DumpSingleFloat_<double>(field_id);
    }
    default:
        LOG(ERROR) << "unknown field_type " << static_cast<int32_t>(field_type);
        return false;
    }
}

bool DiskSegmentMerger::AttributeRead_(
    const std::string& field_name,
    const std::vector<std::shared_ptr<AttributeDiskIndexer>>& attr_index_list,
    std::vector<DiskSegmentPart>* segment_group) {
    // load old doc_id group by segment
    for (const auto& doc_unit : centroid_info_) {
        segment_group[doc_unit.seg_idx].PushDocId(doc_unit.old_docid);
    }

    // batchread group by segment
    for (size_t seg_idx = 0; seg_idx < segment_group.size(); ++seg_idx) {
        if (Status::OK !=
            attr_index_list[seg_idx]->BatchRead(segment_group[seg_idx].SegmentCtx())) {
            LOG(ERROR) << "fail to BatchRead disk segment " << seg_idx
                << " for " << field_name;
            return false;
        }
    }

    return true;
}

bool DiskSegmentMerger::MergeAttributeIndex_() {
    // 1. attr dir init
    std::string attribute_dir =
        butil::string_printf("%s/attribute_index", new_disk_segment_dir_.data());
    FslibWrapper::Mkdir(attribute_dir);
    std::shared_ptr<IndexlibIndexConfig> schema = merge_group_[0]->Schema();

    // 2. traverse fields to merge attr index files of every segment
    for (const auto& field_config : schema->fields()) {
        // mkdir for attr
        const std::string& field_name = field_config.field_name();
        std::string field_attr_dir =
            butil::string_printf("%s/%s", attribute_dir.data(), field_name.data());
        FslibWrapper::Mkdir(field_attr_dir);

        // find field_id
        int32_t field_id;
        util::ColumnType field_type;
        merge_group_[0]->GetFieldIdAndTypedByName(field_name, &field_id, &field_type);

        // collect attr index of every segment
        std::vector<std::shared_ptr<AttributeDiskIndexer>> attr_index_list;
        attr_index_list.reserve(merge_group_.size());
        for (auto merge_item : merge_group_) {
            attr_index_list.emplace_back(merge_item->GetAttributeIndexer(field_id));
        }

        // do merge
        if (attr_index_list[0]->IsVarlen()) {
            if (!MergeVarlenAttributeIndex_(
                    attr_index_list, field_attr_dir, field_id, field_type)) {
                LOG(ERROR) << "Merge fail because of attr field " << field_name
                    << " merge fail";
                return false;
            }
        } else {
            if (!MergeFixlenAttributeIndex_(
                    attr_index_list, field_attr_dir, field_id, field_type)) {
                LOG(ERROR) << "Merge fail because of attr field " << field_name
                    << " merge fail";
                return false;
            }
        }
    }

    return true;
}

bool DiskSegmentMerger::FindObsoleted_() {
    int32_t ttl_field_id;
    util::ColumnType ttl_field_type;
    if (!merge_group_[0]->GetFieldIdAndTypedByName(
            FLAGS_default_ttl_col_name, &ttl_field_id, &ttl_field_type)) {
        LOG(INFO) << "without ttl field";
        return true;
    }
    
    int64_t current_ts = butil::gettimeofday_s();
    indexlib::ArenaWrapper arena_wrapper;
    SegmentContext segment_ctx(arena_wrapper);
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        std::shared_ptr<DiskSegment> merge_item = merge_group_[seg_idx];
        std::shared_ptr<AttributeIndexer> ttl_attr_index =
            merge_item->GetAttributeIndexer(ttl_field_id);
        const PrimaryKeyDiskIndexer* pk_index =
            dynamic_cast<PrimaryKeyDiskIndexer*>(merge_item->GetPkIndexer().get());
        const DeletionMapIndexer* deletion_index = merge_item->GetDeletionIndexer().get();
        auto iter = pk_index->CreateIterator();
        
        uint64_t pk;
        DocId doc_id;
        while (iter->HasNext()) {
            iter->Value(&pk, &doc_id);
            if (!deletion_index->IsDeleted(doc_id)) {
                segment_ctx.PushDocId(doc_id);
            }
            iter->Next();
        }

        if (Status::OK != ttl_attr_index->BatchRead(&segment_ctx)) {
            LOG(WARNING) << "read ttl fail of ttl_field_id " << ttl_field_id;
            return false;
        }

        int64_t ts = 0;
        for (size_t doc_idx = 0; doc_idx < segment_ctx.DocIds().size(); ++doc_idx) {
            const DocContext& doc_ctx = segment_ctx.DocContexts()[doc_idx];
            doc_ctx.GetField(ttl_field_id, &ts);

            if (ts <= current_ts) {
                obsoleted_doc_ids_[seg_idx].emplace(segment_ctx.DocIds()[doc_idx]);
            }
        }

        LOG(INFO) << "disk segment " << seg_idx << " has obsoleted "
            << obsoleted_doc_ids_[seg_idx].size() << " docs";
    }

    return true;
}

bool DiskSegmentMerger::MergePkIndex_() {
    // 1. pkindex file init
    std::string pk_dir =
        butil::string_printf("%s/primary_key", new_disk_segment_dir_.data());
    FslibWrapper::Mkdir(pk_dir);
    std::string filepath = butil::string_printf("%s/data_file", pk_dir.data());
    std::shared_ptr<NormalFileWriter> file_writer =
        std::make_shared<NormalFileWriter>(filepath);
    Status ret = file_writer->Init();
    if (ret != Status::OK) {
        LOG(ERROR) << "Merge fail because of pk file " << filepath
            << " Init fail";
        return false;
    }

    // 2. find ttl obsoleted docs in every disk segment
    if (!FindObsoleted_()) {
        LOG(ERROR) << "Merge fail because of ttl attr file read fail";
        return false;
    }

    // 2. exact reserve valid doc_sum in all disk segments
    size_t doc_count = 0;
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        std::shared_ptr<DiskSegment> merge_item = merge_group_[seg_idx];
        doc_count += merge_item->DocCount();
        doc_count -= obsoleted_doc_ids_[seg_idx].size();
    }
    centroid_info_.reserve(doc_count);

    // 3. multi segment merge,
    // new doc order is regenerated by pk from samll to big within all segments
    MinPkHeap min_pk_heap;
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        min_pk_heap.emplace(seg_idx, merge_group_, obsoleted_doc_ids_[seg_idx]);
    }

    DocId current_new_doc_id = 0;
    std::pair<uint64_t, DocId> ret;
    while (!min_pk_heap.empty()) {
        PkChannel& item = min_pk_heap.top();
        min_pk_heap.pop();

        ret = item.Value();
        if (!item.ShouldFilter(ret.second) {
            centroid_info_.emplace_back(
                ret.second, item.SegmentIdx());
            if (has_inverted_) {
                old2new_dict_[item.SegmentIdx()].emplace(ret.second, current_new_doc_id);
            }
            current_new_doc_id++;
        }

        item.Next();
        if (item.HasNext()) {
            min_pk_heap.push(PkChannel);
        }
    }

    // 4. write pkindex file
    std::vector<SortedPrimaryKeyFileWriter::KVItem<uint64_t>> buffer;
    buffer.reserve(centroid_info_.size());
    for (const auto& doc_unit : centroid_info_) {
        buffer.emplace_back(doc_unit.pk, doc_unit.new_doc_id);
    }

    size_t file_bytes = (sizeof(uint64_t) + sizeof(DocId)) * centroid_info_.size();
    auto r = file_writer->ReserveFile(file_bytes);
    if (!r.OK()) {
        LOG(ERROR) << "Merge fail because of pk file write fail with "
            << static_cast<int32_t>(r.Code());
        return false;
    }

    return true;
}

bool DiskSegmentMerger::DumpOneTerm_(
    const std::vector<std::shared_ptr<InvertedDiskIndexer>>& inverted_index_list,
    const std::vector<DocId>& merged_doc_ids,
    const uint64_t term,
    size_t* offset_in_offsetfile,
    size_t* offset_in_datafile) {
    TermMetaUnit term_meta(
        term, *offset_in_datafile, static_cast<uint32_t>(merged_doc_ids.size()));
    if (Status::OK != inverted_offset_file_writer_->Write(
            (uint8_t*)&term_meta, sizeof(term_meta), *offset_in_offsetfile)) {
        LOG(ERROR) << "write termmeta to offsetfile fail";
        return false;
    }

    *offset_in_offsetfile += sizeof(term_meta);

    if (Status::OK != inverted_data_file_writer_->Write(
        (uint8_t*)(merged_doc_ids.data()),
        merged_doc_ids.size() * sizeof(DocID),
        *offset_in_datafile)) {
        LOG(ERROR) << "write doclist to datafile fail";
        return false;
    }

    *offset_in_datafile += (merged_doc_ids.size() * sizeof(DocID));
    return true;
}

bool DiskSegmentMerger::MergeInvertIndexOneField_(
    const std::vector<std::shared_ptr<InvertedDiskIndexer>>& inverted_index_list,
    const std::string& inverted_dir,
    const std::string& field_name) {
    // 1. mkdir inverted dir of this field
    std::string inverted_index_dir =
        butil::string_printf("%s/%s", inverted_dir.data(), field_name.data());
    FslibWrapper::Mkdir(cur_inverted_dir);

    // 2. fetch all terms
    std::set<uint64_t> terms;
    for (const auto& inverted_index : inverted_index_list) {
        uint64_t term;
        auto iterator = inverted_index->CreateIterator();
        while (iterator->HasNext()) {
            iterator->Value(&term);
            terms.emplace(term);
            iterator->Next();
        }
    }

    // 3. inverted file init: offset and data
    std::string offset_file = butil::string_printf("%s/offset_file", inverted_dir.data());
    inverted_offset_file_writer_.reset(new NormalFileWriter(offset_file));
    if (Status::OK != offset_file_writer->Init()) {
        LOG(ERROR) << "fail merge inverted index at field_name " << field_name
            << " with offsetfile Init fail";
        return false;
    }
    inverted_offset_file_writer_->ReserveFile(
        (InvertedDiskIndexer::offset_file_item_bytes_) * terms.size());

    std::string data_file = butil::string_printf("%s/data_file", inverted_dir.data());
    inverted_data_file_writer_.reset(new NormalFileWriter(data_file));
    if (Status::OK != inverted_data_file_writer_->Init()) {
        LOG(ERROR) << "fail merge inverted index at field_name " << field_name
            << " with datafile Init fail";
        return false;
    }

    indexlib::ArenaWrapper arena_wrapper;
    std::vector<std::unique_ptr<SegmentContext>> seg_ctxs;
    seg_ctxs.reserve(merge_group_.size());
    for (size_t seg_idx = 0; seg_idx < merge_group_.size(); ++seg_idx) {
        std::unique_ptr<SegmentContext> seg_ctx =
            std::make_unique<SegmentContext>(arena_wrapper);
        seg_ctxs.emplace_back(std::move(seg_ctx));
    }

    size_t offset_in_offsetfile = 0, offset_in_datafile = 0
    // 4. traverse every term and merge and dump
    for (const auto term : terms) {
        // read whole posting of this term within every segment
        std::vector<DocId> merged_doc_ids;
        uint32_t doc_count = 0;
        for (size_t seg_idx = 0; seg_idx < inverted_index_list.size(); ++seg_idx) {
            SegmentContext* seg_ctx = seg_ctxs[seg_idx].get();
            if (Status::OK !=
                    inverted_index_list[seg_idx]->Lookup(term, INT_MAX, seg_ctx)) {
                LOG(ERROR) << "traverse term " << term << " fail";
                return false;
            }

            doc_count += seg_ctx->RelateDocCount();
        }

        // merge all new_doc_ids of this term within every segment
        merged_doc_ids.reserve(doc_count);
        for (size_t seg_idx = 0; seg_idx < seg_ctxs.size(); ++seg_idx) {
            const std::vector<DocId>& disk_seg_old_doc_ids = seg_ctxs[seg_idx]->DocIds();
            const std::unordered_map<DocId, DocId>& old2new_dict = old2new_dict_[seg_idx];
            for (const auto disk_seg_old_doc_id : disk_seg_old_doc_ids) {
                auto iter = old2new_dict.find(disk_seg_old_doc_id);
                if (iter == old2new_dict.end()) {
                    LOG(ERROR) << "old doc_id " << disk_seg_old_doc_id
                        << " in seg_idx " << seg_idx << " not found in old2new_dict";
                    return false;
                }

                merged_doc_ids.emplace_back(iter->second);
            }
        }

        // shuffle
        std::random_device rd;
        std::mt19937 g(rd());
        std::shuffle(merged_doc_ids.begin(), merged_doc_ids.end(), g);

        // dump
        if (!DumpOneTerm_(
                inverted_index_list, merged_doc_ids, term,
                &offset_in_offsetfile, &offset_in_datafile)) {
            LOG(ERROR) << "dump term " << term << " fail";
            return false;
        }

        // reset
        for (size_t seg_idx = 0; seg_idx < seg_ctxs.size(); ++seg_idx) {
            seg_ctxs[seg_idx]->Clear();
        }
    }

    return true;
}

bool DiskSegmentMerger::MergeInvertIndex_() {
    if (!has_inserted_) {
        return true;
    }

    // 1. invert dir init
    std::string inverted_dir =
        butil::string_printf("%s/inverted_index", new_disk_segment_dir_.data());
    FslibWrapper::Mkdir(inverted_dir);
    std::shared_ptr<IndexlibIndexConfig> schema = merge_group_[0]->Schema();

    for (const auto& inverted_config : schema->inverted_configs()) {
        const std::string& inverted_field_name = inverted_config.field_name();

        // find field_id
        int32_t field_id;
        util::ColumnType field_type;
        merge_group_[0]->GetFieldIdAndTypedByName(
            inverted_field_name, &field_id, &field_type);

        std::vector<std::shared_ptr<InvertedDiskIndexer>> inverted_index_list;
        for (auto merge_item : merge_group_) {
            inverted_index_list.emplace_back(merge_item->GetInvertedIndexer(field_id));
        }

        if (!MergeInvertIndexOneField_(
                inverted_index_list, inverted_dir, inverted_field_name)) {
            return false;
        }
    }

    return true;
}

}
}