package com.chenshu.doc_searchengine.service;

import com.chenshu.doc_searchengine.common.Config;
import com.chenshu.doc_searchengine.model.DocInfo;
import com.chenshu.doc_searchengine.model.Result;
import com.chenshu.doc_searchengine.utils.Index;
import com.chenshu.doc_searchengine.utils.Weight;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.stereotype.Service;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * Created with IntelliJ IDEA
 * Description
 * User：chenshu
 * Date：2024-05-03
 * Time：23:16
 */
@Service
public class SearchService {
    //停用词路径
    private static final String STOP_WORD_PATH;
    static {
        if (Config.isOnline) {
            STOP_WORD_PATH = "/root/project/document-search-engine/doc_search_index/stop_word.txt";
        } else {
            STOP_WORD_PATH = "/Users/chenshu/Code/project/document-search-engine/doc_search_index/stop_word.txt";
        }
    }
    //保存停用词
    private final HashSet<String> stopWords = new HashSet<>();
    private final Index index = new Index();

    public SearchService() {
        index.load();
        loadStopWords();
    }
    private void loadStopWords() {
        try(BufferedReader bufferedReader = new BufferedReader(new FileReader(STOP_WORD_PATH), 30 * 1024)) {
            while (true) {
                String line = bufferedReader.readLine();
                if (line == null) {
                    break;
                }
                stopWords.add(line);
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    private String GenDesc(String content, List<Term> terms) {
        //遍历分词结果，看看哪个结果在content中存在
        int firstPos = -1;
        for (Term term : terms) {
            String word = term.getName();
            //全词匹配
            String regex = "(?<![a-zA-Z])" + Pattern.quote(word) + "(?![a-zA-Z])";
            Pattern pattern = Pattern.compile(regex);
            Matcher matcher = pattern.matcher(content.toLowerCase());
            if (matcher.find()) {
                firstPos = matcher.start();
                break;
            }
        }
        if (firstPos == -1) {
            //极端情况：分词内容不再正文中存在
            if (content.length() < 300) {
                return content;
            }
            return content.substring(0, 300) + "...";
        }
        // 从firstPos 为基准位置，往前找60个字符，作为描述的起始位置
        String desc;
        int descBegin = firstPos < 100 ? 0 : firstPos - 100;
        if (descBegin + 200 > content.length()) {
            desc = content.substring(descBegin);
        } else {
            desc = content.substring(descBegin, descBegin + 200);
            desc += "...";
        }

        //在此处加上一个替换操作，把描述中与分词结果相同的部分，加上一层<i>标签
        for (Term term : terms) {
            String word = term.getName();
            desc = desc.replaceAll( "(?i)(?<![a-zA-Z])" + word + "(?![a-zA-Z])", "<i>" + word + "</i>");
        }
        return desc;
    }

    /**
     * 根据分词列表获取所有结果
     * @param terms 词条
     * @return 所有归并加权过的搜索结果
     */
    private List<Weight> getAllTermResult(List<Term> terms) {
        List<Weight> allTermResult = new ArrayList<>();

        for (Term term : terms) {
            String word = term.getName();
            List<Weight> invertedList = index.getInverted(word);
            if (invertedList == null) {
                continue;
            }
            allTermResult.addAll(invertedList);
        }

        /**
         * 如果词数>=2
         * 使用hashMap来归并并加权：key：docId， value：weight
         */
        if (terms.size() >= 2) {
            HashMap<Integer, Integer> map = new HashMap<>();
            for (Weight weight : allTermResult) {
                int docId = weight.getDocId();
                if (map.get(docId) == null) {
                    //哈希表中没有的话就新增键值对
                    map.put(docId, weight.getWeight());
                } else {
                    //哈希表中有的话加权
                    Integer beforeWeight = map.get(docId);
                    map.replace(docId, beforeWeight + weight.getWeight());
                }
            }

            //清空未归并的数据
            List<Weight> finalAllTermResult = new ArrayList<>();
            //插入已归并数据
            map.forEach((key, value) -> {
                // 处理每个键值对
                Weight weight = new Weight();
                weight.setDocId(key);
                weight.setWeight(value);
                finalAllTermResult.add(weight);
            });

            return finalAllTermResult;
        }

        return allTermResult;
    }


    /**
     * 根据查询词，将停用词过滤后返回分词表
     * @param query
     * @return
     */
    private List<Term> getAllTerm(String query) {
        //1. 分词
        List<Term> originTerms = ToAnalysis.parse(query).getTerms();
        //过滤暂停词表
        List<Term> terms = new ArrayList<>();
        for (Term term : originTerms) {
            if (stopWords.contains(term.getName())) {
                continue;
            }
            terms.add(term);
        }
        return terms;
    }

    public List<Result> searchResults(String query, Integer pIndex) {
        //1. 获取分词表
        List<Term> terms = getAllTerm(query);

        //2. 根据分词获取所有索引拉链
        List<Weight> allTermResult = getAllTermResult(terms);
        //3. 对索引拉链进行排序
        allTermResult.sort(new Comparator<Weight>() {
            @Override
            public int compare(Weight o1, Weight o2) {
                return o2.getWeight() - o1.getWeight() ;
            }
        });

        //4. 包装结果
        List<Result> results = new ArrayList<>();
        int pSize = 6;
        int resultSize = allTermResult.size();

        //页码对应的第一篇文章的下标
        int begin = (pIndex-1) * pSize;
        if(pIndex * pSize > allTermResult.size()) {
            if (begin < resultSize) {
                //页码结果少于六个
                for (int i = begin; i < resultSize; i++) {
                    Weight weight = allTermResult.get(i);
                    DocInfo docInfo = index.getDocInfo(weight.getDocId());
                    Result result = new Result();
                    result.setTitle(docInfo.getTitle());
                    result.setUrl(docInfo.getUrl());
                    result.setDesc(GenDesc(docInfo.getContent(), terms));
                    results.add(result);
                }
            } else {
                //超出范围
                return null;
            }
        } else {

            for (int i = begin; i < begin + 6; i++) {
                Weight weight = allTermResult.get(i);
                DocInfo docInfo = index.getDocInfo(weight.getDocId());
                Result result = new Result();
                result.setTitle(docInfo.getTitle());
                result.setUrl(docInfo.getUrl());
                result.setDesc(GenDesc(docInfo.getContent(), terms));
                results.add(result);
            }
        }
        return results;
    }

    public Integer getTotalPage(String query) {
        //1. 分词
        List<Term> terms = getAllTerm(query);
        //2. 获取索引拉链个数
        int resultSize = getAllTermResult(terms).size();
        //3. 返回页码
        double totalPage = (double) resultSize /6;
        //进1
        return (int) Math.ceil(totalPage);
    }
}
