package com.example.java_doc_searcher.seracher;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class Index {
    private Object locker1=new Object();
    private Object locker2=new Object();

    //还要保存一个本地的路径
    public static String INDEX_PATH=null;
    static {
        if(Config.isOnline) {
            INDEX_PATH="/install/java_doc_searcher/";
        } else {
            INDEX_PATH="D:\\codeutil\\javadoc\\";
        }
    }
    private ObjectMapper objectMapper = new ObjectMapper();
    //首先是正的数据结构
    //我们利用数组下标来当作一个文档docId
    private ArrayList<DocInfo> forwardIndex = new ArrayList<>();

    //倒排的话，我们希望达到的效果是根据一个词，来得到一些文档
    Map<String ,ArrayList<Weight>> invertedIndex = new HashMap<>();

    //正排
    public DocInfo getDocInfo(Integer docId) {
        return forwardIndex.get(docId);
    }
    //倒排
    public ArrayList<Weight> getInvertedIndex(String term) {
        return invertedIndex.get(term);
    }
    public void addDoc(String title, String url, String content) {
        DocInfo docInfo = buildForward(title,url,content);
        buildInverted(docInfo);
    }

    /**
     * 用来制作倒排
     * @param docInfo
     */
    private void buildInverted(DocInfo docInfo) {
         class WordCnt{
             public int titleCount;
             public int contentCount;
         }
         //先分词标题，然后在分词文章的内容
        //分词的结构数据是一个哈希表，String
        Map<String, WordCnt> wordCntMap = new HashMap<>();
        List<Term> terms = ToAnalysis.parse(docInfo.getTitle()).getTerms();
        for(Term term : terms){
            //如果有重复的话，次数加一
            String name=term.getName();
            WordCnt wordCnt = wordCntMap.get(name);
            if(wordCnt==null){
                //构建出一个对象
                wordCnt = new WordCnt();
                wordCnt.titleCount=1;
                wordCnt.contentCount=0;
                wordCntMap.put(name, wordCnt);
            } else {
                wordCnt.titleCount+=1;
            }
        }
        //针对正文进行分词
        terms = ToAnalysis.parse(docInfo.getContent()).getTerms();
        for(Term term : terms){
            String name=term.getName();
            WordCnt wordCnt = wordCntMap.get(name);
            if(wordCnt==null){
                WordCnt newWordCnt = new WordCnt();
                newWordCnt.titleCount=1;
                newWordCnt.contentCount=0;
                wordCntMap.put(name, newWordCnt);
            } else {
                wordCnt.contentCount+=1;
            }
        }
        //接下来就是算权重
        for(Map.Entry<String, WordCnt> entry : wordCntMap.entrySet()){
           synchronized (locker2) {
               //先将这些词在索引中查看
               ArrayList<Weight> invertedList  = invertedIndex.get(entry.getKey());
               if(invertedList ==null){
                   //往里面添加元素
                   ArrayList<Weight> newInvertedList  = new ArrayList<>();
                   Weight weight = new Weight();
                   weight.setDocId(docInfo.getDocId());
                   weight.setWeight(entry.getValue().titleCount*10+entry.getValue().contentCount);
                   newInvertedList.add(weight);
                   invertedIndex.put(entry.getKey(), newInvertedList );
               } else {
                   //更新权重
                   Weight weight = new Weight();
                   weight.setDocId(docInfo.getDocId());
                   weight.setWeight(entry.getValue().titleCount*10+entry.getValue().contentCount);
                   invertedList.add(weight);
               }
           }
        }
    }

    private DocInfo buildForward(String title,String url,String content) {
        DocInfo docInfo = new DocInfo();
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        //id的处理
        //加锁，这里修改的是共同的东西
        synchronized (locker1) {
            docInfo.setDocId(forwardIndex.size());
            forwardIndex.add(docInfo);
        }
        return docInfo;
    }
    //保存索引到本地
    public void save() {
        System.out.println("索引开始保存");
        File indexFile = new File(INDEX_PATH);
        if(!indexFile.exists()) {
            indexFile.mkdirs();
        }
        File indexForwardFile=new File(INDEX_PATH+"forward.txt");
        File indexInvertedFile=new File(INDEX_PATH+"inverted.txt");
        long begin = System.currentTimeMillis();
        try {
            objectMapper.writeValue(indexForwardFile, forwardIndex);
            objectMapper.writeValue(indexInvertedFile, invertedIndex);
        }catch (IOException e) {
            e.printStackTrace();
        }
        long end = System.currentTimeMillis();
        System.out.println("索引保存完成,时间为："+(end-begin)+"ms");
    }
    //加载索引
    public void load() {
        System.out.println("开始加载索引");
        long begin = System.currentTimeMillis();
        File indexForwardFile=new File(INDEX_PATH+"forward.txt");
        File indexInvertedFile=new File(INDEX_PATH+"inverted.txt");
        try{
            forwardIndex= objectMapper.readValue(indexForwardFile,new TypeReference<ArrayList<DocInfo>>(){});
            invertedIndex=objectMapper.readValue(indexInvertedFile,new TypeReference<HashMap<String,ArrayList<Weight>>>(){});
        }catch (IOException e) {
            e.printStackTrace();
        }
        long end = System.currentTimeMillis();
        System.out.println("索引加载完成，时间为："+(end-begin)+"ms");
    }
    public static void main(String[] args) {

        Index index = new Index();
        System.out.println("开始加载索引");
        index.save();
        System.out.println("索引加载完成");
    }
}
