import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

public class cniuer {
    import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

    //通过这个类在内存中创建索引结构
    public class Index {
        //使用数组下标表示docId
        private ArrayList<DocInfo> forwardIndex = new ArrayList<>();

        //使用哈希表来弄倒排索引
        //value 就是一组和这个词关联的文章
        private HashMap<String,ArrayList<Weight>> invertedIndex = new HashMap<>();

        //1.给定一个docId，在正排索引中，查询文档的详细信息
        public DocInfo getDocInfo(int docId){
            return forwardIndex.get(docId);
        }
        //2.给定一个词，在倒排索引中，查哪些文档和这个词关联
        //单纯返回一个整数List不太好，因为词和文档之间存在一定的相关性，应该返回相关性
        public List<Weight> getInverted(String term){
            return invertedIndex.get(term);
        }
        //3.往索引中新增一个文档
        public void addDoc(String title,String url,String content){
            //正排和倒排都要加
            DocInfo docInfo = new buildForward(title,url,content);
            builderInverted(docInfo);
        }

        private void builderInverted(DocInfo docInfo) {
            class WordCnt {
                //表示这个词在标题中出现的次数
                public int titleCount;
                //表示这个此在正文中出现的次数
                public int contentCount;
            }
            //统计词频
            HashMap<String,WordCnt> wordCntHashMap = new HashMap<>();
            //1.针对文档标题进行分词
            List<Term> terms = ToAnalysis.parse(docInfo.getTitle()).getTerms();
            //2.遍历分词结果，统计每个词出现的次数
            for(Term term:terms){
                String word = term.getName();
                WordCnt wordCnt = wordCntHashMap.get(word);
                if(wordCnt == null){
                    WordCnt newWordCnt = new WordCnt();
                    newWordCnt.titleCount= 1;
                    newWordCnt.contentCount = 0;
                    wordCntHashMap.put(word,newWordCnt);
                }else {
                    wordCnt.titleCount+=1;
                }
            }
            //3.针对正文进行分词
            terms = ToAnalysis.parse(docInfo.getContent()).getTerms();
            //4.遍历分词结果，统计每个词出现的次数
            for(Term term:terms){
                String word = term.getName();
                WordCnt wordCnt = wordCntHashMap.get(word);
                if(wordCnt == null){
                    WordCnt newWordCnt = new WordCnt();
                    newWordCnt.titleCount= 0;
                    newWordCnt.contentCount = 1;
                    wordCntHashMap.put(word,newWordCnt);
                }else {
                    wordCnt.contentCount+=1;
                }
                //5.把上面的结果汇总到一个HashMap里面
                //最终文档的权重，就设定成标题中出现的次数*10+正文中出现的次数
                //6.遍历刚才这个HashMap，依次来更新倒排索引中的结构了。
            }

            private DocInfo buildForward(String title,String url,String content){
                DocInfo docInfo = new DocInfo();
                docInfo.setDocId(forwardIndex.size());
                docInfo.setTitle(title);
                docInfo.setUrl(url);
                docInfo.setContent(content);
                forwardIndex.add(docInfo);
                forwardIndex.add(docInfo);
                return docInfo;
            }

            //4.把内存中的索引结构保存到磁盘中
            public void save(){

            }
            //5.把磁盘中的索引数据加载到内存中
            public void load(){

            }
        }

    }
