#pragma once

#include"index.hpp"
#include"util.hpp"
#include"log.hpp"
#include<algorithm>
#include<jsoncpp/json/json.h>

namespace ns_searcher
{
    //去重，不同词可能对应同一doc_id
    struct InvertElemPrint {
        uint64_t doc_id;
        int weight;
        std::vector<std::string> words;
        InvertElemPrint():doc_id(0), weight(0) {}
    };

    class Searcher
    {
        private:
            ns_index::Index* index;

        public: 
            Searcher() {}
            ~Searcher() {}

        public:
            void InitSearcher(const std::string& input)
            {
                //1.创建index对象
                index = ns_index::Index::GetInstance();
                LOG(NORMAL, "=====Create singleton index complete=====");
                //2. 构建索引
                index->ns_index::Index::Indexing(input);
                LOG(NORMAL, "=====Build forward and inverted index complete===");
            }

            void Search(const std::string& query, std::string* json_string)
            {
                //1. 分词：query被分词为keywords
                std::vector<std::string> keywords;
                util::JiebaUtil::CutString(query, &keywords);

                //2. 触发：根据分词触发搜索（一样需要转小写）
                //构建tokens_map
                std::unordered_map<uint16_t, InvertElemPrint> tokens_map; 
                
                //最终获得<docId, InvertElemPrint>的映射
                for(std::string keyword: keywords)
                {
                    boost::to_lower(keyword);
                    // keyword ->倒排拉链，即得知文档id
                    ns_index::inverted_index_list* inv_ind_list = index->GetInvertIndex(keyword);
                    if(nullptr == inv_ind_list)
                    {
                        continue;
                    }

                    // 不同词可能对应相同文档，本质就是要去重doc_id
                    // 遍历倒排拉链，填写InvertElemPrint，每个item和k对应
                    for(const auto& elem : *inv_ind_list)
                    {
                        // 拿到tokens_map中doc_id下标的InvertElemPrint
                        auto& item = tokens_map[elem.doc_id]; //[]如果存在直接获取同一item（方便累加），不存在则新建
                        item.doc_id = elem.doc_id;
                        item.weight += elem.weight; //累加权值
                        item.words.push_back(elem.keyword);
                    }
                }

                std::vector<InvertElemPrint> inv_ind_list_all;
                //遍历去重后的item
                for(const auto& item: tokens_map)
                {
                    inv_ind_list_all.push_back(std::move(item.second));
                }

                //3. 合并排序：按照相关性降序排列 lamda
                std::sort(inv_ind_list_all.begin(), inv_ind_list_all.end(), 
                            [](const InvertElemPrint& e1, const InvertElemPrint& e2)
                            {
                                return e1.weight > e2.weight;
                            });


                //4. 构建：json串 
                Json::Value root;  
                for(auto& item: inv_ind_list_all)
                {
                    //查找正排索引
                    ns_index::docInfo* doc = index->GetForwardIndex(item.doc_id);
                    if(nullptr == doc)
                    {
                        continue;
                    }
                    //用jsoncpp进行序列化 和 反序列化
                    Json::Value elem;
                    elem["title"] = doc->title;
                    elem["desc"] = GetDesc(doc->content, item.words[0]); //摘要，和keyword强相关
                    elem["url"] = doc->url;

                    //for debug
                    elem["id"] = (int)item.doc_id; //int -> string
                    elem["weight"] = item.weight;
                    
                    //不断循环，root变得越来越多
                    root.append(elem);
                }

                Json::StyledWriter writer;
                // Json::FastWriter writer;
                *json_string = writer.write(root);
            }

            std::string GetDesc(const std::string& html_content, const std::string& keyword)
            {
                //1. 找到keyword的首次出现位置，绝对不可能找不到
                auto iter = std::search(html_content.begin(), html_content.end(), keyword.begin(), keyword.end(),[](int x, int y){
                    return (std::tolower(x) == std::tolower(y));
                });
                if(iter == html_content.end())
                {
                    return "oh my gosh! We can not find it. it's impossiable!";
                }
                std::size_t first_pos = std::distance(html_content.begin(), iter);

                //2. 取它前50，后100（没有50个，从开头取，结尾同理）
                const int prev_step = 50;
                const int next_step = 100;
                
                int begin = 0;
                int end = html_content.size()-1;
                if(first_pos > begin + prev_step)
                {
                    begin = first_pos -prev_step;
                }

                if(end > first_pos + next_step)
                {
                    end = first_pos + next_step;
                }

                if(begin >= end)
                {
                    return "string pos error";
                }
                
                std::string result = html_content.substr(begin, end-begin+1) + "...EXPLORE FURTHURE";
                return result;
            }        
    };
}