#include "index/inverted_index/inverted_mem_indexer/inverted_mem_indexer.h"

namespace hawking {
namespace indexlib {

DEFINE_int32(posting_realtime_upadte_space, 1024, "realtime update hashmap init size");

Status InvertedMemIndexer::Init(
    int32_t field_id,
    const std::unordered_set<uint64_t>& high_freq_termkeys) {
    field_id_ = field_id;
    allocator_ = std::make_unique<MMapAllocator>();
    pool_ = std::make_unique<Pool>(allocator_.get(), 10 * 1024 * 1024);

    posting_table_ =
        std::make_unique<HashMap<uint64_t, PostingWriter*>>(pool_.get(), hashmap_init_size_);
    return Status::OK;
}

Status InvertedMemIndexer::AddDocument(NormalDocument* doc) {
    uint64_t term; 
    bool ret = doc->GetIndexDocument()->GetField(field_id_, &term);
    if (!ret) {
        return Status::NotFound;
    }

    AddTerm_(term, doc->DocId());
    return Status::OK;
}

Status InvertedMemIndexer::UpdateDocument(
    uint64_t old_term, uint64_t new_term, DocId doc_id) {
    PostingWriter** writer = posting_table_->Find(old_term);
    if (!writer) {
        return Status::NotFound;
    }

    writer->DeleteDocument(doc_id);
    AddTerm_(new_term, doc_id);
    return Status::OK;
}

void InvertedMemIndexer::AddTerm_(uint64_t term, DocId doc_id) {
    PostingWriter** writer = posting_table_->Find(term);
    if (!writer) {
        PostingWriter* posting_writer = POOL_NEW_CLASS(pool_, PostingWriter, pool_.get());
        posting_table_->Insert(term, posting_writer);
        posting_writer->AddDocument(doc_id);
    } else {
        PostingWriter* posting_writer = *writer;
        posting_writer->AddDocument(doc_id);
    }
}

void InvertedMemIndexer::RemoveTerm_(uint64_t term, DocId doc_id) {
    PostingWriter** writer = posting_table_->Find(term);
    if (writer) {
        PostingWriter* posting_writer = *writer;
        posting_writer->RemoveDocument(doc_id);
    }
}

Status InvertedMemIndexer::Lookup(
    uint64_t term_key, uint32_t topk, SegmentContext* segment_ctx) {
    PostingWriter** posting_ptr = posting_table_->Find(term_key);
    if (!posting_ptr) {
        return Status::NotFound;
    }

    PostingWriter* posting_writer = *posting_ptr;
    posting_writer->CopySnapShot(segment_ctx);
    return Status::OK;
}

Status InvertedMemIndexer::Dump(
    const std::string& inverted_dir, 
    const std::unordered_map<DocId, DocId>& old2new_map) {
    std::string offset_file = butil::string_printf("%s/offset_file", inverted_dir.data());
    std::string data_file = butil::string_printf("%s/data_file", inverted_dir.data());
    std::shared_ptr<NormalFileWriter> offset_file_writer =
        std::make_shared<NormalFileWriter>(offset_file);
    std::shared_ptr<NormalFileWriter> data_file_writer =
        std::make_shared<NormalFileWriter>(data_file);

    Status ret = offset_file_writer->Init();
    if (ret != Status::OK) {
        return ret;
    }
    ret = data_file_writer->Init();
    if (ret != Status::OK) {
        return ret;
    }

    // write term count to meta file
    off_t offset_file_off = 0;
    uint64_t data_file_off = 0;

    // make term sorted
    std::map<uint64_t, PostingWriter*> sorted_map;
    auto iter = posting_table_->CreateIterator();
    while (iter.HasNext()) {
        sorted_map.insert(iter.Next());
    }

    // write <term, doclist> based on snapshot-vector and filter deleted docs
    for (auto iter : sorted_map) {
        uint32_t doclist_length =
            static_cast<uint32_t>(iter.second->DocListLength(old2new_map));
        TermMetaUnit termmeta(iter.first, data_file_off, doclist_length);
        auto ret = offset_file_writer->Write(
            (uint8_t*)&termmeta, sizeof(termmeta), offset_file_off);
        if (!ret.OK()) {
            return ret.Code();
        }

        offset_file_off += sizeof(termmeta);
        auto dump_ret = iter.second->Dump(data_file_writer, &data_file_off, old2new_map);
        if (Status::OK != dump_ret) {
            return dump_ret;
        }
    }

    offset_file_writer->Close();
    data_file_writer->Close();
    return Status::OK;
}

}
}