package org.example.doc_searcher.searcher;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class Index {
    //创建两个锁对象
    private Object locker1=new Object();
    private Object locker2=new Object();
    private static  String INDEX_PATH=null;
    static {
        if (Config.isOnline){
            INDEX_PATH="/home/ubuntu/doc_searcher_index/";
        }else {
            INDEX_PATH="D:/doc_searcher_index/";
        }
    }
    private ObjectMapper objectMapper=new ObjectMapper();
    //正排索引
    private ArrayList<DocInfo> forwardIndex=new ArrayList<>();
    //倒排索引
    private HashMap<String,ArrayList<Weight>> invertedIndex=new HashMap<>();
    //1.给定docId，在正排中，卡逊详细的文档信息

    public DocInfo getDocInfo(int docId){
        return forwardIndex.get(docId);
    }
    //2.给定一个词，在倒排索引中查哪些文档和这个词关联
    public List<Weight> getInverted(String term){
        return invertedIndex.get(term);
    }
    //3.往索引中新增文档
    public void addDoc(String title,String url,String content){
        //给正排索引新增和倒排索引都新增信息
        //构建正排索引
        DocInfo docInfo=buildForward(title,url,content);
        //创建倒排索引
        buildInverted(docInfo);
    }

    private void buildInverted(DocInfo docInfo) {
        class WordCnt{
        public int titleCount;
        public int contentCount;
        }
        HashMap<String,WordCnt> wordCntHashMap=new HashMap<>();

        //1.针对标题进行分词操作
        List<Term> terms= ToAnalysis.parse(docInfo.getTitle()).getTerms();
        //2.针对分词结果，统计每个词出现的次数
        for (Term term:terms){
            String word=term.getName();
            WordCnt wordCnt=wordCntHashMap.get(word);
            if (wordCnt==null){
                WordCnt newwordCnt=new WordCnt();
                newwordCnt.titleCount=1;
                newwordCnt.contentCount=0;
                wordCntHashMap.put(word,newwordCnt);
            }else {
                wordCnt.titleCount+=1;
            }
        }
        //3.针对正文进行分词操作
        List<Term> terms2=ToAnalysis.parse(docInfo.getContent()).getTerms();
        //4.遍历分词结果，统计每个词出现的次数
        for (Term term:terms2){
            String word=term.getName();
            WordCnt wordCnt=wordCntHashMap.get(word);
            if (wordCnt==null){
                WordCnt newWordCnt=new WordCnt();
                newWordCnt.titleCount=0;
                newWordCnt.contentCount=1;
                wordCntHashMap.put(word,newWordCnt);
            }else {
                wordCnt.contentCount+=1;
            }
        }
        //5.设置权重为：标题*10+正文
        for(Map.Entry<String,WordCnt> entry:wordCntHashMap.entrySet()) {
            synchronized (locker1){
                List<Weight> invertedList=invertedIndex.get(entry.getKey());
                if (invertedList==null){
                    ArrayList<Weight> newInvertedList=new ArrayList<>();
                    Weight weight=new Weight();
                    weight.setWeight(entry.getValue().titleCount*10+entry.getValue().contentCount);
                    weight.setDocId(docInfo.getDocID());
                    newInvertedList.add(weight);
                    invertedIndex.put(entry.getKey(),newInvertedList);
                }else {
                    Weight weight=new Weight();
                    weight.setDocId(docInfo.getDocID());
                    weight.setWeight(entry.getValue().titleCount*10+entry.getValue().contentCount);
                    invertedList.add(weight);
                }
            }


        }
    }

    private DocInfo buildForward(String title, String url, String content) {
        DocInfo docInfo=new DocInfo();
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        synchronized (locker2){
            docInfo.setDocID(forwardIndex.size());
            forwardIndex.add(docInfo);
        }

        return docInfo;
    }

    //4.把内存的索引保存到磁盘
    public void save(){
        long beg=System.currentTimeMillis();
        System.out.println("保存索引开始！");
        File indexPathFile=new File(INDEX_PATH);
        if(!indexPathFile.exists()){
            indexPathFile.mkdir();
        }
        File forwardIndexFile=new File(INDEX_PATH+"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH+"inverted.txt");
        try {
            objectMapper.writeValue(forwardIndexFile,forwardIndex);
            objectMapper.writeValue(invertedIndexFile,invertedIndex);
        }catch (IOException e) {
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();
        System.out.println("保存索引完成！消耗时间："+(end-beg)+"ms");
    }
    //5.把磁盘的索引结构保存到内存
    public void load(){
        long beg=System.currentTimeMillis();
        System.out.println("加载索引开始！");
        File forwardIndexFile=new File(INDEX_PATH+"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH+"inverted.txt");
        try {
            forwardIndex=objectMapper.readValue(forwardIndexFile, new TypeReference<ArrayList<DocInfo>>() {});
            invertedIndex=objectMapper.readValue(invertedIndexFile, new TypeReference<HashMap<String, ArrayList<Weight>>>() {});
        }catch (IOException e){
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();
        System.out.println("加载引擎结束消耗时间："+(end-beg)+"ms");
    }

    public static void main(String[] args) {
        Index index=new Index();
        index.load();
        System.out.println("索引加载完成 ");
    }
}
