#pragma once
#include <list>
#include <map>

namespace hawking {
namespace indexlib {

#pragma pack(1)
struct VarlenItem {
    size_t offset_in_datafile;
    ArrayLengthType element_count;
};
#pragma pack()

class PkChannel {
public:
    PkChannel() = default;
    PkChannel(
        size_t idx,
        const std::list<std::shared_ptr<DiskSegment>>& disk_seg_list,
        const std::unordered_set<DocId>& obs) :
        idx_(idx), disk_segment_list_(&disk_seg_list), obsoleted_doc_ids_(&obs) {
        iter_ = (disk_seg_list[idx])->GetPkIndexer()->CreateIterator();
    }

    std::pair<uint64_t, DocId> Value() const {
        return iter_->Value();
    }

    bool HasNext() {
        return iter_->HasNext();
    }

    void Next() {
        iter_->Next();
    }

    size_t SegmentIdx() {
        return idx_;
    }

    bool ShouldFilter(DocId doc_id) const {
        return IsDeleted_(doc_id) || IsObsolete_(doc_id);
    }

private:
    bool IsDeleted_(DocId doc_id) const {
        return ((*disk_segment_list_)[idx])->GetDeletionIndexer()->IsDeleted(doc_id);
    }

    bool IsObsolete_(DocId doc_id) const {
        return obsoleted_doc_ids_->find(doc_id) != obsoleted_doc_ids_->end();
    }

    size_t idx_ = -1;
    const std::list<std::shared_ptr<DiskSegment>>* disk_segment_list_ = nullptr;
    const std::unordered_set<DocId>* obsoleted_doc_ids_ = nullptr;
    std::shared_ptr<PrimaryKeyDiskIndexer::PrimaryKeyDiskIterator> iter_;
};

// sort by pk from small to large
struct PkIterComparator {
    bool operator()(const PkChannel& lhs, const PkChannel& rhs) const {
        return lhs.Value().first < rhs->Value().first;
    }
};

using MinPkHeap = std::priority_queue<PkChannel, std::vector<PkChannel>, PkIterComparator>;

class DiskSegmentMerger {
public:
    struct DocUnit {
        DocId old_docid;
        size_t seg_idx;

        DocUnit() = default;
        DocUnit(DocId od, size_t si) : old_docid(od), seg_idx(si) {}
    };

    class DiskSegmentPart {
    public:
        DiskSegmentPart() = default;
        DiskSegmentPart(indexlib::ArenaWrapper& arena_wrapper) {
            segment_ctx_ = std::make_unique<SegmentContext>(arena_wrapper);
        }

        void PushDocId(DocId doc_id) {
            segment_ctx_->PushDocId(doc_id);
        }

        const DocContext* GetDocResult(DocId doc_id) const {
            auto it = std::lower_bound(
                segment_ctx_->DocIds().begin(), segment_ctx_->DocIds().end(), doc_id);
            if (it == segment_ctx_->DocIds().end() || *it != doc_id) {
                return nullptr;
            }

            size_t idx = std::distance(segment_ctx_->begin(), it);
            return &(segment_ctx_->DocContexts[idx]);
        }

        SegmentContext* SegmentCtx() {
            return segment_ctx_.get();
        }        

    private:
        std::unique_ptr<SegmentContext> segment_ctx_;
    };

    DiskSegmentMerger(
        std::list<std::shared_ptr<DiskSegment>> merge_group,
        const std::string& new_disk_segment_dir) :
        merge_group_(merge_group), new_disk_segment_dir_(new_disk_segment_dir) {
        obsoleted_doc_ids_.resize(merge_group.size());
        has_inverted_ = merge_group[0]->HasInvertedIndexer();
        if (has_inverted_) {
            old2new_dict_.resize(merge_group.size());
        }
    }
    ~DiskSegmentMerger() {
        if (fail_flag) {
            FslibWrapper::RmDir(new_disk_segment_dir_);
        }
    }

    bool DoMerge() {
        if (!MergePkIndex_() || !MergeInvertIndex_() || !MergeAttributeIndex_()) {
            SetFail_();
            return false;
        }

        return true;
    }

private:
    bool FindObsoleted_();
    bool MergePkIndex_();

    bool MergeInvertIndex_();

    bool MergeAttributeIndex_();
    bool MergeVarlenAttributeIndex_(
        std::vector<std::shared_ptr<AttributeDiskIndexer>> attr_index_list,
        const std::string& attr_dir,
        int32_t field_id,
        util::ColumnType field_type);
    bool MergeFixlenAttributeIndex_(
        std::vector<std::shared_ptr<AttributeDiskIndexer>> attr_index_list,
        const std::string& attr_dir,
        int32_t field_id,
        util::ColumnType field_type);
    bool AttributeRead_(
        const std::string& field_name,
        const std::vector<std::shared_ptr<AttributeDiskIndexer>>& attr_index_list,
        std::vector<DiskSegmentPart>* segment_group);
    bool WriteOffsetFileT_();
    bool WriteVarlenMetaFileT_();
    ArrayLengthType GetElementCount_(
        util::ColumnType field_type, util::FieldValue* field_value) const;
    bool DumpString_(int32_t field_id);
    template<class T>
    bool DumpMultiInt_(int32_t field_id) {
        size_t datafile_offset = 0;
        for (const auto& doc : centroid_info_) {
            const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
            util::FieldValue* field_value = doc_ctx->GetField(field_id);

            for (size_t idx = 0; idx < field_value->int_values().values_size(); ++idx) {
                T val = static_cast<T>(field_value->int_values().values(idx));
                if (Status::OK !=
                    attr_data_file_writer_->Write(&val, sizeof(T), datafile_offset)) {
                    LOG(ERROR) << "write datafile fail for field_id " << field_id
                        << " at offset " << datafile_offset
                        << " with length " << sizeof(T);
                    return false;
                }

                datafile_offset += sizeof(T);
            }
        }

        return true;
    }
    template<class T>
    bool DumpMultiFloat_(int32_t field_id) {
        size_t datafile_offset = 0;
        for (const auto& doc : centroid_info_) {
            const DocContext* doc_ctx = segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
            util::FieldValue* field_value = doc_ctx->GetField(field_id);

            for (size_t idx = 0; idx < field_value->double_values().values_size(); ++idx) {
                T val = static_cast<T>(field_value->double_values().values(idx));
                if (Status::OK !=
                    attr_data_file_writer_->Write(&val, sizeof(T), datafile_offset)) {
                    LOG(ERROR) << "write datafile fail for field_id " << field_id
                        << " at offset " << datafile_offset
                        << " with length " << sizeof(T);
                    return false;
                }

                datafile_offset += sizeof(T);
            }
        }
    }

    return true;
}
    bool DumpMultiDouble_(int32_t field_id, const uint32_t element_length);
    bool DumpMultiString_(int32_t field_id);
    bool DumpMultiT_(
        int32_t field_id, util::ColumnType field_type, const uint32_t element_length);
    
    bool WriteVarlenMetaAndDataFileString_(
        int32_t field_id, VarlenMetaBuffer* varlen_metafile_buffer_writer);
    bool WriteOffsetFileString_(
        int32_t field_id, size_t* varlen_metafile_length, size_t* datafile_length);

    template<class T>
    bool DumpSingleFloat_(int32_t field_id) {
        std::vector<T> data;
        data.resize(centroid_info_.size(), 0);

        T val;
        for (size_t doc_idx = 0; doc_idx < centroid_info_.size(); ++doc_idx) {
            const DocUnit& doc = centroid_info_[doc_idx];
            const DocContext* doc_ctx =
                segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
            if (unlikely(!doc_ctx->GetField(field_id, &val))) {
                LOG(ERROR) << "read value from field_id " << field_id
                    << " fail";
                return false;
            }

            data[doc_idx] = val;
        }

        if (Status::OK != attr_data_file_writer_->Write(
                (uint8_t*)data.data(), sizeof(T) * data.size(), 0)) {
            LOG(ERROR) << "Write datafile fail with offset 0 length "
                << (sizeof(T) * data.size());
            return false;
        }

        return true;
    }
    
    template<class T>
    bool DumpSingleInt_(int32_t field_id) {
        std::vector<T> data;
        data.resize(centroid_info_.size(), 0);

        int64_t val;
        for (size_t doc_idx = 0; doc_idx < centroid_info_.size(); ++doc_idx) {
            const DocUnit& doc = centroid_info_[doc_idx];
            const DocContext* doc_ctx =
                segment_group[doc.seg_idx].GetDocResult(doc.old_docid);
            if (unlikely(!doc_ctx->GetField(field_id, &val))) {
                LOG(ERROR) << "read value from field_id " << field_id
                    << " fail";
                return false;
            }

            data[doc_idx] = static_cast<T>(val);
        }

        if (Status::OK != attr_data_file_writer_->Write(
                (uint8_t*)data.data(), sizeof(T) * data.size(), 0)) {
            LOG(ERROR) << "Write datafile fail with offset 0 length "
                << (sizeof(T) * data.size());
            return false;
        }

        return true;
    }

    void SetFail_() {
        fail_flag = true;
    }

    std::list<std::shared_ptr<DiskSegment>> merge_group_;
    const std::string& new_disk_segment_dir_;
    bool fail_flag = false;
    bool has_inverted_ = false;

    // record ttl-obsoleted doc_ids of every segment
    std::vector<std::unordered_set<DocId>> obsoleted_doc_ids_;
    // only for inverted
    std::vector<std::unordered_map<DocId, DocId>> old2new_dict_;
    std::vector<DocUnit> centroid_info_;

    std::unique_ptr<NormalFileWriter> attr_offset_file_writer_;
    std::unique_ptr<NormalFileWriter> attr_varlen_metafile_writer_;
    std::unique_ptr<NormalFileWriter> attr_data_file_writer_;

    std::unique_ptr<NormalFileWriter> inverted_offset_file_writer_;
    std::unique_ptr<NormalFileWriter> inverted_data_file_writer_;
};

}
}