package com.example.demo.searcher;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class Index {
    //这个类要完成的方法

    private ObjectMapper objectMapper=new ObjectMapper();
    public static  String INDEX_PATH =null;
    static {
        if(Config.isOnline){
            INDEX_PATH = "/root/java104/doc_searcher_index/";
        }else {
            INDEX_PATH = "d:/doc_searcher_index/";
        }

    }
    public ArrayList<DocInfo> forwardIndex=new ArrayList<>();
    public HashMap<String,ArrayList<Weight>>   invertedIndex=new HashMap<>();

    //创建两个锁对象
    private Object locker1=new Object();
    private Object locker2=new Object();

    //1.给定一个docId,在正排索引中,查询文档的详细信息
    public DocInfo getDocInfo(int docId){
        return forwardIndex.get(docId);
    }
    //2.给定一的词,在倒排索引中,查哪些文档和这个词关联
    public List<Weight> getInverted(String term){
        return invertedIndex.get(term);
    }
    //3.往索引中新增一个文档
    public void addDoc(String title,String url,String content){
        //新增文档操作,需要同时给正排索引和倒排索引新增信息
        //构建正排索引
        DocInfo docInfo=buildForward(title,url,content);
        //构建倒排索引
        buildInverted(docInfo);
    }

    private void buildInverted(DocInfo docInfo) {

        class WordCnt{
            public int titleCount;
            public int contentCount;
            public WordCnt(int titleCount, int contentCount) {
                this.titleCount = titleCount;
                this.contentCount = contentCount;
            }
        }
        HashMap<String,WordCnt> wordCountHashMap=new HashMap<>();
        //1.针对文档标题进行分词
        List<Term> termTitle=ToAnalysis.parse(docInfo.getTitle()).getTerms();
        //2.遍历分词结果,统计每个词出现的次数

        for(Term term:termTitle){
            String word = term.getName();
            WordCnt wordCnt=wordCountHashMap.get(word);
            if(wordCnt!=null){
                wordCnt.titleCount+=1;
            }else{
                wordCountHashMap.put(word, new WordCnt(1, 0));
            }
        }

        //3.针对正文进行分词
        List<Term> termsContent=ToAnalysis.parse(docInfo.getContent()).getTerms();
        //4.遍历分词结果,统计每个词出现的次数
        for (Term term:termsContent) {
            String word = term.getName();
            WordCnt wordCnt=wordCountHashMap.get(word);
            if(wordCnt!=null){
                wordCnt.contentCount+=1;
            }else{
                wordCountHashMap.put(word, new WordCnt(0, 1));

            }
        }

        //5.把上面的结果汇总到一个HashMap里面
        //6.遍历这个HashMap,依次来更新倒排索引中的结构
        for(Map.Entry<String,WordCnt> entry:wordCountHashMap.entrySet()){
            synchronized (locker1){
                List<Weight> invertedList=invertedIndex.get(entry.getKey());

                if(invertedList==null){
                    //如果为空就插入一个新的键值对
                    ArrayList<Weight> list=new ArrayList<>();
                    //把新的文档(当前DocInfo)构造成weight对象
                    Weight weight=new Weight();
                    weight.setDocId(docInfo.getDocId());
                    //最终文档的权重,设定为标题中出现的次数*10+正文中出现的次数
                    weight.setWeight(entry.getValue().contentCount+ entry.getValue().titleCount*10);
                    list.add(weight);
                    invertedIndex.put(entry.getKey(), list);
                }else{
                    //如果不为空,就把这个文档,构造出一个weight对象,插入到倒排拉链的后面
                    Weight weight=new Weight();
                    weight.setDocId(docInfo.getDocId());
                    weight.setWeight(entry.getValue().contentCount+ entry.getValue().titleCount*10);
                    invertedList.add(weight);
                }
            }
        }

    }

    private DocInfo buildForward(String title, String url, String content) {
        DocInfo docInfo=new DocInfo();
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        synchronized (locker2){
            docInfo.setDocId(forwardIndex.size());
            forwardIndex.add(docInfo);
        }
        return docInfo;
    }

    //4.把内存中的索引结构保存到磁盘中
    public void save(){
        //使用两个文件,分别保存正排和倒排
        long sta=System.currentTimeMillis();
        System.out.println("保存索引开始");
        File indexPathFile=new File(INDEX_PATH);
        //先判断一下索引的目录是否存在,不存在就创建
        if(!indexPathFile.exists()){
            indexPathFile.mkdirs();
        }
        File forwardIndexFile=new File(INDEX_PATH+"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH+"inverted.txt");
        try {
            objectMapper.writeValue(forwardIndexFile,forwardIndex);
            objectMapper.writeValue(invertedIndexFile,invertedIndex);

        } catch (IOException e) {
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();

        System.out.println("保存索引完成!"+"一共消耗 "+(end-sta)+" ms");


    }
    //5.把磁盘中的索引数据加载到内存中
    public void load(){
        long sta=System.currentTimeMillis();

        System.out.println("加载索引开始!");
        File forwardIndexFile=new File(INDEX_PATH+"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH+"inverted.txt");

        try {
            forwardIndex=objectMapper.readValue(forwardIndexFile,new TypeReference<ArrayList<DocInfo>>(){});
            invertedIndex = objectMapper.readValue(invertedIndexFile, new TypeReference<HashMap<String, ArrayList<Weight>>>() {});

        } catch (IOException e) {
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();

        System.out.println("加载索引结束!"+"一共消耗 "+(end-sta)+" ms");
    }

    public static void main(String[] args) {
        Index index=new Index();
        index.load();
        System.out.println("索引加载完成");
    }

}
