package com.issac.OnsiteSearchEngine.searcher;

import com.fasterxml.jackson.core.exc.StreamReadException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DatabindException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.nlpcn.commons.lang.util.WordAlert;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class Index {
    private static final String INDEX_FILE_PATH = "D:/ISSAC/Code/JavaProject/jdk-17.0.14_doc-all/docs/api/";

    private ObjectMapper objectMapper = new ObjectMapper();
    /**
     * Index 类在内存中构造出索引
     *
     * @Param:forwordIndex 正排索引
     * @Param:invertedIndex 倒排索引
     * 最终invertedIndex结构
     * {
     * "Java":    [Weight(docId=0, weight=13), Weight(docId=1, weight=7), ...],
     * "搜索":     [Weight(docId=1, weight=12)],
     * "api":     [Weight(docId=0, weight=10)]
     * }
     * <p>
     * 这个类要提供的方法：
     * 1. 给定一个 docId：在正排索引中，查询文档的详细信息
     * 2. 给定一个词：在倒排索引中，查出哪些文档和这个词有关联（哪些文档包含这个词）
     * 3. 往索引中新增一个文档
     * 4. 把内存中的索引结构保存到磁盘
     * 5. 把磁盘中的索引数据加载到内存中
     */

    // 使用 “数组下标” 表示docId
    private ArrayList<DocInfo> forwardIndex = new ArrayList<>();

    // "hashmap" 表示 “倒排索引”
    // key：词  value：与这个词相关的文章
    private HashMap<String, ArrayList<Weight>> invertedIndex = new HashMap<>();

    // 新创建两个锁对象
    private Object locker1 = new Object();
    private Object locker2 = new Object();

    // 1. 给定一个 docId：在正排索引中，查询文档的详细信息
    public DocInfo getDocInfoByDocId(int docId) {
        return forwardIndex.get(docId);
    }

    // 2. 给定一个词：在倒排索引中，查出哪些文档和这个词有关联（哪些文档包含这个词）
    public List<Weight> getDocsFromInvertedIndex(String term) {
        // @Param:term 表示一个词
        return invertedIndex.get(term);
    }

    // 3. 往索引中新增一个文档
    public void addDocToIndex(String title, String url, String content) {
        // 构建正排索引
        DocInfo docInfo = buildForwardIndex(title, url, content);
        // 给倒排索引新增信息
        buildInvertedIndex(docInfo);
    }

    private DocInfo buildForwardIndex(String title, String url, String content) {
        DocInfo docInfo = new DocInfo();
        docInfo.setTitle(title);
        docInfo.setUrl(url);
        docInfo.setContent(content);
        synchronized (locker1) {
            docInfo.setDocId(forwardIndex.size());
            forwardIndex.add(docInfo);
        }
        return docInfo;
    }

    private void buildInvertedIndex(DocInfo docInfo) {
        class WordCount {
            /**
             * 记录词出现的次数
             * @Param:cntInTitle 该word在标题出现的次数
             * @Param:cntInContent 该word在正文出现的次数
             */
            public int countInTitle;
            public int countInContent;
        }

        // hashmap当前文档统计词频
        HashMap<String, WordCount> wordCntHashMap = new HashMap<>();

        // 1. 针对文档标题进行分词
        List<Term> terms = ToAnalysis.parse(docInfo.getTitle()).getTerms();

        // 2. 遍历分词结果，统计每个词出现的频率
        for (Term term : terms) {
            // 从hashmap中获取当前词频
            String word = term.getName();
            WordCount wordCount = wordCntHashMap.get(word);

            if (wordCount == null) {
                // 该词不存在：加入到hashmap
                WordCount newWordCount = new WordCount();
                newWordCount.countInTitle = 1;
                newWordCount.countInContent = 0;
                wordCntHashMap.put(word, newWordCount);
            } else {
                // 该词存在：词频+1
                wordCount.countInTitle += 1;
            }
        }

        // 3. 针对每个content进行分词
        terms = ToAnalysis.parse(docInfo.getContent()).getTerms();

        // 4. 遍历分词结果统计词频
        for (Term term : terms) {
            // 从hashmap中获取当前词频
            String word = term.getName();
            WordCount wordCount = wordCntHashMap.get(word);

            // 5. 把分词结果统计到hashmap(wordCntHashMap)中：其中weight=10*词在正文出现次数+正文出现的次数
            if (wordCount == null) {
                // 该词不存在：加入到hashmap
                WordCount newWordCount = new WordCount();
                newWordCount.countInTitle = 0;
                newWordCount.countInContent = 1;
                wordCntHashMap.put(word, newWordCount);
            } else {
                // 该词存在：词频+1
                wordCount.countInContent += 1;
            }
        }

        // 6. 遍历hashmap，依次更新倒排索引到结构中
        for (Map.Entry<String, WordCount> entry : wordCntHashMap.entrySet()) {
            // map不可迭代，把map的每个键值对转换成entry，放入set，变成可迭代的对象

            synchronized (locker2) {
                // 获取当前key对应词的相关性List，即invertedIndex的value
                List<Weight> curInvertedList = invertedIndex.get(entry.getKey());

                if (curInvertedList == null) {
                    // 如果该词是首次出现（在倒排索引中还没有记录）
                    ArrayList<Weight> newInvertedList = new ArrayList<>();
                    // 把当前文档，构造成 searcher.Weight 对象，插入进来
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDocId());
                    weight.setWeight(entry.getValue().countInTitle * 10 + entry.getValue().countInContent);
                    newInvertedList.add(weight);
                    invertedIndex.put(entry.getKey(), newInvertedList);
                } else {
                    Weight weight = new Weight();
                    weight.setDocId(docInfo.getDocId());
                    weight.setWeight(entry.getValue().countInTitle * 10 + entry.getValue().countInContent);
                    curInvertedList.add(weight);
                }
            }
        }
    }

    /**
     * 4. 把内存中的索引结构保存到硬盘
     * 使用两个文件，分别保存正排 & 倒排
     */
    public void saveIndexToDisk() {
        long beginTime = System.currentTimeMillis();
        System.out.println("开始保存索引...");

        // 1. 保存目录是否存在，不存在就创建
        File indexFilePath = new File(INDEX_FILE_PATH);
        if (!indexFilePath.exists()) {
            indexFilePath.mkdirs();
        }

        // 2. 创建文件
        File forwardIndexFile = new File(INDEX_FILE_PATH + "ForwardIndex.txt");
        File invertedIndexFile = new File(INDEX_FILE_PATH + "InvertedIndex.txt");

        try {
            objectMapper.writeValue(forwardIndexFile, forwardIndex);
            objectMapper.writeValue(invertedIndexFile, invertedIndex);
        } catch (IOException e) {
            e.printStackTrace();
        }
        long endTime = System.currentTimeMillis();
        System.out.println("索引保存结束！耗时： " + (endTime - beginTime) + "ms");
    }

    // 把硬盘中的索引数据加载到内存中
    public void loadIndexToMemory() {
        long beginTime = System.currentTimeMillis();
        System.out.println("开始加载索引...");
        // 1. 设置加载索引路径
        File forwardIndexFile = new File(INDEX_FILE_PATH + "ForwardIndex.txt");
        File invertedIndexFile = new File(INDEX_FILE_PATH + "InvertedIndex.txt");
        try {
            // 2. 加载索引
            forwardIndex = objectMapper.readValue(forwardIndexFile, new TypeReference<ArrayList<DocInfo>>() {});
            invertedIndex = objectMapper.readValue(invertedIndexFile, new TypeReference<HashMap<String, ArrayList<Weight>>>() {});
        } catch (IOException e) {
            e.printStackTrace();
        }
        long endTime = System.currentTimeMillis();
        System.out.println("加载索引结束! 耗时: " + (endTime - beginTime) + " ms");
    }

    public static void main(String[] args) {
        Index index = new Index();
        index.loadIndexToMemory();
        System.out.println("索引加载完成！");
    }
}
