package com.example.lzh.component;

import com.example.lzh.model.DocInfo;
import com.example.lzh.model.Weight;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.io.File;
import java.io.IOException;
import java.util.*;

/**
 * @Question Index
 * @Date 2023/1/27 12:09
 * @Solution
 */
@Component
public class Index {
    private static final String INDEX_PATH="/home/ur-search/";
    ObjectMapper objectMapper = new ObjectMapper();
    //锁
    Object locker1=new Object();
    Object locker2=new Object();
    //数组下标表示docId
    private ArrayList<DocInfo> forwardIndex=new ArrayList<>();
    //哈希表表示倒排索引
    //key是词，value是相关联的文章
    private HashMap<String,ArrayList<Weight>> invertedIndex =new HashMap<>();
    //1.给定docId,在正排索引中，查询文档信息
    public DocInfo getDocInfo(int docId) {
        return forwardIndex.get(docId);
    }
    //2.给定一个词，在倒排索引中，查询哪些文档包含这个词
    public List<Weight> getInverted(String term) {
        return invertedIndex.get(term);
    }
    //3.往索引中新增一个文档
    public void addDoc(String title,String url,String content) {
        //新增文档操作，需要同时给正/倒排索引新增

        //构建正排
        DocInfo docInfo = buildForward(title,url,content);
        //构建倒排
        buildInverted(docInfo);
    }

    public DocInfo buildForward(String title,String url,String content) {
        DocInfo docInfo = new DocInfo();
        docInfo.setDocId(forwardIndex.size());
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        synchronized (locker1) {
            forwardIndex.add(docInfo);
        }
        return docInfo;
    }

    public void buildInverted(DocInfo docInfo) {

        class WordCnt {
            public int titleCount;//标题中出现的次数
            public int contentCount;//正文中出现的次数
        }
        //统计词频
        HashMap<String,WordCnt> wordCntHashMap=new HashMap<>();
        //对标题分词
        List<Term> terms = ToAnalysis.parse(docInfo.getTitle()).getTerms();
        //遍历分词结果，统计每个词出现的次数
        for(Term term:terms) {
            String word=term.getName();
            WordCnt wordCnt = wordCntHashMap.get(word);
            if(wordCnt==null) {
                WordCnt wordCnt1=new WordCnt();
                wordCnt1.titleCount=1;
                wordCnt1.contentCount=0;
                wordCntHashMap.put(word,wordCnt1);
            } else {
                wordCnt.titleCount+=1;
            }
        }
        //对内容分词
        terms=ToAnalysis.parse(docInfo.getContent()).getTerms();
        for(Term term:terms) {
            String word=term.getName();
            WordCnt wordCnt = wordCntHashMap.get(word);
            if(wordCnt==null) {
                WordCnt wordCnt1=new WordCnt();
                wordCnt1.titleCount=1;
                wordCnt1.contentCount=0;
                wordCntHashMap.put(word,wordCnt1);
            } else {
                wordCnt.titleCount+=1;
            }
        }
        //最终文档权重，等于标题中出现的次数*10+内容中出现的次数
        //遍历HashMap，更新倒排索引结构
        //Map.Entry<String,WordCnt>代表的是一个词出现的频率
        for(Map.Entry<String,WordCnt> entry: wordCntHashMap.entrySet()) {
            List<Weight> invertedList = invertedIndex.get(entry.getKey());
            synchronized (locker2) {
                if(invertedList==null) {
                    ArrayList<Weight> newInvertedList=new ArrayList<>();
                    //把新文档构造成Weight对象插进来
                    //思路整理：HashMap<String,ArrayList<Weight>>key代表一个词，Value代表这个词出现过的文档
                    //ArrayList<Weight>，Weight代表的是一篇文章，Weight里面有id和weight，weight代表权重，权重越大
                    //相关性越高，那么这篇文档就该出现在搜索结果的前面
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDocId());
                    weight.setWeight(entry.getValue().contentCount+entry.getValue().titleCount*10);
                    newInvertedList.add(weight);
                    invertedIndex.put(entry.getKey(),newInvertedList);
                } else {
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDocId());
                    weight.setWeight(entry.getValue().contentCount+entry.getValue().titleCount*10);
                    invertedList.add(weight);
                }
            }
        }
    }

    //4.把内存中的索引结构保存到磁盘
    public void save() {
        long beg=System.currentTimeMillis();
        File indexPathFile=new File(INDEX_PATH);
        if (!indexPathFile.exists()) {
            indexPathFile.mkdir();
        }
        File forwardIndexFile=new File(INDEX_PATH,"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH,"inverted.txt");
        try {
            objectMapper.writeValue(forwardIndexFile,forwardIndex);
            objectMapper.writeValue(invertedIndexFile,invertedIndex);
        } catch (IOException e) {
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();
        System.out.println("保存索引完成... 耗时："+(end-beg)+"毫秒");
    }
    //5.把磁盘索引加载到内存中
    public void load() {
        long beg=System.currentTimeMillis();
        System.out.println("开始加载索引...");
        File forwardIndexFile=new File(INDEX_PATH,"forward.txt");
        File invertedIndexFile=new File(INDEX_PATH,"inverted.txt");
        try {
            forwardIndex=objectMapper.readValue(forwardIndexFile, new TypeReference<ArrayList<DocInfo>>() {});
            invertedIndex=objectMapper.readValue(invertedIndexFile, new TypeReference<HashMap<String, ArrayList<Weight>>>() {});
        } catch (IOException e) {
            e.printStackTrace();
        }
        long end=System.currentTimeMillis();
        System.out.println("索引加载结束..."+(end-beg)+"毫秒");
    }

}
