package com.example.searchjavaapi.index;

import com.example.searchjavaapi.common.DocInfo;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.stereotype.Service;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;


@Service
public class Index {
    //这个词在docId文档中对应的权重是多少
    static public class Weight{
        public String word;
        public int docId;
        //weight = 10*标题中出现的次数+正文中出现的次数
        public int weight;
    }
    //索引类，需要包含两方面的内容，正排索引和倒排索引
    //正派索引是通过docId作为DocInfo，直接把docId作为数组下标
    private ArrayList<DocInfo> fowardIndex = new ArrayList<>();
    //倒排索引   通过词-》一组docId
    //不光能得到每个词在哪些文档中出现过，还想知道这个词在文档中的权重
    private HashMap<String,ArrayList<Weight>> invertedIndex = new HashMap<>();

    //Index 类需要提供的方法
    //查正排
    public DocInfo getDocInfo(int docId) {
        return fowardIndex.get(docId);
    }
    //查倒排
    public ArrayList<Weight> getInvertedIndex(String term){
        return invertedIndex.get(term);
    }

    //构建索引，把raw_data.txt文件的内容读取出来，加载到内存中
    //索引模块中最复杂的部分
    public void build(String inputPath) throws IOException {
        class Timer{
            public long readFileTime;
            public long buildForwardTime;
            public long buildInveredTime;
        }

        Timer timer = new Timer();
        long startTime = System.currentTimeMillis();
        System.out.println("build start");

        //1.打开文件，按行读取
        BufferedReader bufferedReader = new BufferedReader(new FileReader(new File(inputPath)));
        //2.读取到的每一行，

        while (true){

            long t1 = System.currentTimeMillis();

            String line = bufferedReader.readLine();
            if (line == null){
                break;
            }
            long t2 = System.currentTimeMillis();

            //3.按照\3进行切分，切分结果构造成一个DocInfo对象
            DocInfo docInfo = buildForward(line);
            long t3 = System.currentTimeMillis();

            //4.构造倒排的过程，把DocInfo对象进行详细处理
            buildInverted(docInfo);
            long t4 = System.currentTimeMillis();

//            System.out.println("Build"+docInfo.getTitle()+" done!");
            timer.readFileTime+=(t2-t1);
            timer.buildForwardTime+=(t3-t2);
            timer.buildInveredTime+=(t4-t3);
        }
        bufferedReader.close();




        long finishedTime = System.currentTimeMillis();
        System.out.println("build end! time:"+(finishedTime-startTime)+"ms");
        System.out.println("readFiletime: "+timer.readFileTime
        +" buildForwaerdTime: "+timer.buildForwardTime
        +" buildInveredTime: "+timer.buildInveredTime);
    }

    //构建倒排索引表
    private void buildInverted(DocInfo docInfo) {
        //统计关键词在标题中和正文中出现的次数
        class WordCnt{
            public int titleCount;
            public int contentCount;
            public WordCnt(int titleCount,int contentCount){
                this.titleCount = titleCount;
                this.contentCount = contentCount;
            }

        }
        HashMap<String,WordCnt> wordCntHashMap = new HashMap<>();
        //针对DocInfo中的title 和Content进行分词，根据分词的结果构建weight对象
        //1.针对标题分词
        List<Term> titleTerms = ToAnalysis.parse(docInfo.getTitle()).getTerms();
        //2.遍历分词结果，统计标题中每个词的出现次数
        for(Term term:titleTerms){
            //此处word已经转换成小写了
            String word = term.getName();
            WordCnt wordCnt = wordCntHashMap.get(word);
            if (wordCnt==null){
                wordCntHashMap.put(word,new WordCnt(1,0));
            }else{
                //词已经在哈希表中存在，直接修改titlecount
                wordCnt.titleCount++;
            }
        }
        //3.针对正文分词
        List<Term> contentTerms = ToAnalysis.parse(docInfo.getContnet()).getTerms();
        //4.遍历分词结果，统计正文中的每个词的分词结果
        for(Term term:contentTerms){
            String word = term.getName();
            WordCnt wordCnt = wordCntHashMap.get(word);
            if (wordCnt==null){
                wordCntHashMap.put(word,new WordCnt(0,1));
            }else{
                //词已经在哈希表中存在，直接修改titlecount
                wordCnt.contentCount++;
            }
        }

        //5.遍历HashMap，依次构建Weight对象，并更新倒排索引的映射关系
        for(HashMap.Entry<String, WordCnt> entry : wordCntHashMap.entrySet()){
            Weight weight = new Weight();
            weight.word = entry.getKey();
            weight.docId = docInfo.getDocId();
            weight.weight = entry.getValue().titleCount*10+entry.getValue().contentCount;
            //weight 加入到倒排索引中,倒排索引是一个HashMap，value是一个Weight构成的Arraylist
            //先根据这个词找到HashMap中对应的ArrayList，称为倒排拉链
            ArrayList<Weight> invertedList = invertedIndex.get(entry.getKey());
            if (invertedList==null){
                //当前这个键值对不存在,新加入这个键值对
                invertedList = new ArrayList<>();
                invertedIndex.put(entry.getKey(), invertedList);
            }
            //到这里，invertedList已经是合法的了
            invertedList.add(weight);

        }



    }

    private DocInfo buildForward(String line) {
        //把这一行按照\3进行切分
        //分出来的桑格部分就是一个文档的标题 url 正文
        String[] tokens = line.split("\3");
        if (tokens.length!=3){
            //发现文件格式有问题
            //因为文件涉及过多，没必要因为一个小错误而终止程序
            System.out.println("文件格式存在问题"+line);
            return null;
        }
        DocInfo docInfo = new DocInfo();
        //id就是正排索引的数组下标
        docInfo.setDocId(fowardIndex.size());
        docInfo.setTitle(tokens[0]);
        docInfo.setUrl(tokens[1]);
        docInfo.setContnet(tokens[2]);
        fowardIndex.add(docInfo);
        return docInfo;
    }


}















