package com.mxj.indexer.core;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.mxj.indexer.WeightDO;
import com.mxj.indexer.mapper.DatabaseMapper;
import com.mxj.indexer.model.Document;
import com.mxj.indexer.properties.IndexerProperties;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Service;

import java.io.File;
import java.util.*;
import java.util.stream.Collectors;

/**
 * @author yuisama
 * @date 2023/04/11 21:34
 **/
@Slf4j
@Service
@Lazy
public class IndexManager {
    private final List<IndexDocument> forwardIndex = new ArrayList<>();
    private final Map<String, List<DocWeight>> invertedIndex = new HashMap<>();
    private final ObjectMapper objectMapper;
    private final IndexerProperties properties;
   private final DatabaseMapper databaseMapper;

    @Autowired
    public IndexManager( IndexerProperties properties,ObjectMapper objectMapper,DatabaseMapper databaseMapper) {

        this.properties = properties;
        this.objectMapper = objectMapper;
        this.databaseMapper = databaseMapper;
    }

    public void add(Document document) {
        // 添加正排索引
        int docId = forwardIndex.size();
        IndexDocument indexDocument = new IndexDocument(docId, document);
        forwardIndex.add(indexDocument);    // 下标(docId) -> IndexDocument

        // 添加倒排索引
        buildInvertedIndex(indexDocument);
    }

    public void printForwardIndex() {
        int size = forwardIndex.size();
        for (int i = 0; i < size; i++) {
            int docId = i;
            IndexDocument indexDocument = forwardIndex.get(docId);
            System.out.printf("docId: %d -> IndexDocument: %s\n", docId, indexDocument);
        }
    }

    public void printInvertedIndex() {
        Set<String> wordSet = invertedIndex.keySet();
        for (String word : wordSet) {
            System.out.printf("%s: \n", word);
            List<DocWeight> docWeightList = invertedIndex.get(word);
            for (DocWeight docWeight : docWeightList) {
                System.out.printf("    docWeight: %s\n", docWeight);
            }
        }
    }

    private void buildInvertedIndex(IndexDocument indexDocument) {
        int docId = indexDocument.getDocId();
        Document document = indexDocument.getDocument();

        // 1 首先对标题做分词
        List<String> wordListInTitle = ToAnalysis.parse(document.getTitle())
                .getTerms()
                .stream()
                .map(Term::getName)
                .collect(Collectors.toList());

        // 2 其次对正文做分词
        List<String> wordListInContent = ToAnalysis.parse(document.getContent())
                .getTerms()
                .stream()
                .map(Term::getName)
                .collect(Collectors.toList());

        // 3 根据每个次，计算该文档的每个词的出现次数（分别是标题和正文中的）
        Map<String, Integer> wordInTitleMap = new HashMap<>();
        for (String word : wordListInTitle) {
            int count = wordInTitleMap.getOrDefault(word, 0);
            count++;
            wordInTitleMap.put(word, count);
        }

        Map<String, Integer> wordInContentMap = new HashMap<>();
        for (String word : wordListInContent) {
            int count = wordInContentMap.getOrDefault(word, 0);
            count++;
            wordInContentMap.put(word, count);
        }

        // 4 针对每个单词，做倒排索引创建
        Set<String> allWordSet = new HashSet<>(wordInTitleMap.keySet());
        allWordSet.addAll(wordInContentMap.keySet());

        for (String word : allWordSet) {
            // 计算每个单词的权重
            DocWeight docWeight = new DocWeight(word,docId, wordInTitleMap, wordInContentMap);

            List<DocWeight> docWeightList = invertedIndex.get(word);
            // 如果这个单词之前没有出现过，则 docWeightList == null
            // 这种情况下，我们需要特殊处理
            if (docWeightList == null) {
                docWeightList = new ArrayList<>();
                invertedIndex.put(word, docWeightList);
            }

            // 如何什么情况下，将计算出的 docId + weight 放入 invertedIndex  word -> [ {docId, weight}, ]
            docWeightList.add(docWeight);
        }
    }

    // 保存成文件
    @SneakyThrows
    public void save1() {
        // 使用 JSON 将两个索引序列后出来，然后分别保存成两个文件
        // 确保 properties.getIndexRootPath() 是个目录 && 一定存在
        // forward.json
        File forwardFile = new File(properties.getIndexRootPath(), "forward.json");
        //确保IndexRootPath是个目录，并且一定存在
        objectMapper.writeValue(forwardFile, forwardIndex);
        log.debug("正排索引已保存:{}", forwardFile.getCanonicalPath());

//        // inverted.json
        File invertedFile = new File(properties.getIndexRootPath(), "inverted.json");
        objectMapper.writeValue(invertedFile, invertedIndex);
        log.debug("倒排索引已保存:{}", invertedFile.getCanonicalPath());

    }

    // 保存成数据库
    public void save() {
        // 先做正排索引的插入（一条条地插入，其实性能不好）
        log.debug("将索引保存到数据库中。");
        for (IndexDocument indexDocument : forwardIndex) {
            databaseMapper.insertDocument(
                    indexDocument.getDocId(),
                    indexDocument.getDocument().getTitle(),
                    indexDocument.getDocument().getUrl(),
                    indexDocument.getDocument().getContent()
            );
        }
        log.debug("正排索引保存成功。");

        // 再做倒排索引的插入
        Set<Map.Entry<String, List<DocWeight>>> entries = invertedIndex.entrySet();
        for (Map.Entry<String, List<DocWeight>> entry : entries) {
            String word = entry.getKey();
            List<DocWeight> weightList = entry.getValue();
            // 需要先做 weightList -> weights 表 的插入，根据插入结果，得到 wid，在做 word_to_weights 表的插入
            for (DocWeight docWeight : weightList) {
                WeightDO weightDO = new WeightDO(docWeight);
                databaseMapper.insertWeight(weightDO);
                databaseMapper.insertWordToWeight(word, weightDO.getWid());
            }
        }
       log.debug("倒排索引保存成功。");
    }
}

