package com.example.documentsearchengine;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Data;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.stereotype.Component;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * 构建索引数据结构
 */
@Data
public class Index {
    private final String INDEX_PATH="D:\\Ddoc_searcher_index\\";
    //正牌索引
    private ArrayList<DocInfo> forwardIndex=new ArrayList<>();

    private Object lock1=new Object();
    private Object lock2=new Object();
    //倒排索引key是分词,value是term分词对应的倒排后的文档Id列表
    private HashMap<String,ArrayList<Weight>>invertedIndex=new HashMap<>();
    //1.给定一个dooId,在正排索引中，查询文档详细信息
    public DocInfo getDocInfo(Integer doId){
        return forwardIndex.get(doId);
    }
    //2.给定一个词，在倒排索引中，查询哪些文档与该词有关联
    public List<Weight> getInverted(String term){
        return invertedIndex.get(term);
    }
    //3.往索引中添加一个新文档
    public void addDoc(String title,String url,String content){
        //新增文档,需要给正排和倒排索引新增信息
        //构建正排索引
        DocInfo docInfo = builderDoInfo(title, url, content);
        //构建倒排索引
        builderInverted(docInfo);
    }

    private void builderInverted(DocInfo docInfo) {
        //针对标题和正文进行分词(标题比正文更加有权重)
        class wordCount{
            //表示该词在标题中出现的次数
            public Integer titleCount;
            //表示该词在正文中出现的次数
            public Integer contentCount;
        }
        HashMap<String,wordCount>wordCountMap=new HashMap<>();
        //进行分词 已经将所有分词结果转换为小写
        List<Term>terms= ToAnalysis.parse(docInfo.getTitle()).getTerms();
        for(Term term:terms){
            String word=term.getName();
            wordCount wordCount = wordCountMap.get(word);
            if(wordCount==null){
                wordCount newWordCount=new wordCount();
                newWordCount.titleCount=1;
                newWordCount.contentCount=0;
                wordCountMap.put(word,newWordCount);
            }else{
                wordCount.titleCount++;
            }
        }
        List<Term> terms1 = ToAnalysis.parse(docInfo.getContent()).getTerms();
        for(Term term:terms1){
            String word=term.getName();
            wordCount wordCount = wordCountMap.get(word);
            if(wordCount==null){
                wordCount newWordCount=new wordCount();
                newWordCount.titleCount=0;
                newWordCount.contentCount=1;
                wordCountMap.put(word,newWordCount);
            }else{
                wordCount.contentCount++;
            }
        }
        //使用HashMap来存储 weight=标题分词出现次数*10+正文分词出现次数
        for(Map.Entry<String,wordCount>entry:wordCountMap.entrySet()){
            //先去倒排索引中排查是否已存在
            synchronized (lock2) {
                ArrayList<Weight> invertedList = invertedIndex.get(entry.getKey());
                if (invertedList == null) {
                    //给新分词创建新key
                    ArrayList<Weight> newinvertedList = new ArrayList<>();
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDoId());
                    weight.setWeight(entry.getValue().titleCount * 10 + entry.getValue().contentCount);
                    newinvertedList.add(weight);
                    invertedIndex.put(entry.getKey(), newinvertedList);
                } else {
                    //key已存在，在其List后添加value值便可
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDoId());
                    weight.setWeight(entry.getValue().titleCount * 10 + entry.getValue().contentCount);
                    invertedList.add(weight);
                }
            }
        }
    }

    private DocInfo builderDoInfo(String title, String url, String content) {
        DocInfo docInfo=new DocInfo();
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        synchronized (lock1) {
            docInfo.setDoId(forwardIndex.size());
            forwardIndex.add(docInfo);
        }
        return docInfo;
    }

    //4.把内存中的索引结构存在磁盘中
    public void save(){
        long beginTime = System.currentTimeMillis();
        System.out.println("保存索引开始");
        File indexPathFile=new File(INDEX_PATH);
        if(!indexPathFile.exists()){
            //不存在则创建目录
            indexPathFile.mkdirs();
        }
        File forwordIndexFile=new File(INDEX_PATH+"forwordFile.txt");
        File invertedIndexFile=new File(INDEX_PATH+"invertedFile.txt");
        ObjectMapper objectMapper=new ObjectMapper();
        try {
            objectMapper.writeValue(forwordIndexFile,forwardIndex);
            objectMapper.writeValue(invertedIndexFile,invertedIndex);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        long endTime=System.currentTimeMillis();
        System.out.println("保存索引结束，消耗时间:"+(endTime-beginTime));
    }
    //5.把磁盘中的索引结构加载到内存中
    public void load(){
        System.out.println("加载索引开始");
        long beginTime=System.currentTimeMillis();
        File forwordIndexFile=new File(INDEX_PATH+"forwordFile.txt");
        File invertedIndexFile=new File(INDEX_PATH+"invertedFile.txt");
        ObjectMapper objectMapper=new ObjectMapper();
        try {
            objectMapper.readValue(forwordIndexFile, new TypeReference<ArrayList<DocInfo>>() {});
            objectMapper.readValue(invertedIndexFile, new TypeReference<HashMap<String,ArrayList<Weight>>>() {});
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        long endTime=System.currentTimeMillis();
        System.out.println("加载索引结束");
    }
}
