package com.wjd.parser;

import com.wjd.config.Constants;
import com.wjd.utils.StrUtils;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;
import java.util.stream.Stream;

/**
 * 中文分词器
 */
public class ChineseParser {

    // 关键词文件名
    public static String wordFile = Constants.CHINESE_TOKENS_PATH;
    // 停用词文件名
    public static String stopWordFile = Constants.STOP_WORDS_PATH;

    // 中文分隔符
    private static final String TOKEN_DELIMITER = " 。，？！";
    // 词典
    private static final Map<String, Integer> wordTable;
    // 停用词词表
    private static final Set<String> stopWordTable;
    // 词语最大长度
    private static int maxWordLength = 1;

    static {
        wordTable = new HashMap<>();
        stopWordTable = new HashSet<>();
        loadWordFile(wordFile);
        loadStopWordFile(stopWordFile);
    }

    /**
     * 从指定的文件中初始化中文词语字典和字符串次数字典
     *
     * @param filename 文件名
     */
    private static void loadWordFile(String filename) {
        try (Stream<String> lines = Files.lines(Paths.get(filename))) {
            lines.forEach(line -> {
                // 将每行文本进行处理，读取其中的关键词和词频
                StringTokenizer analyse = new StringTokenizer(line);
                String word = analyse.nextToken(); // 词语
                int freq = Integer.parseInt(analyse.nextToken()); // 词频
                maxWordLength = Math.max(word.length(), maxWordLength);
                wordTable.put(word, freq);
            });
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    /**
     * 初始化停用词词表
     *
     * @param filename 文件名
     */
    private static void loadStopWordFile(String filename) {
        try (Stream<String> lines = Files.lines(Paths.get(filename))) {
            lines.forEach(stopWordTable::add);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    /**
     * 返回分词后的词语和词频
     *
     * @param str 字符串
     * @return 词语和词频
     */
    public Map<String, Double> getTF(String str) {
        return parseWords(str);
    }

    /**
     * 返回关键字字符串，每个关键字之间用空格隔开
     *
     * @param str 字符串
     * @return 关键字组成的 key
     */
    public String getKeyWords(String str) {
        Map<String, Double> tf = parseWords(str);
        StringBuilder s = new StringBuilder();
        Set<String> set = tf.keySet();
        for (String value : set) {
            s.append(" ").append(value);
        }
        return s.toString();
    }

    /**
     * 解析字符串，进行中文分词，并记录其词频
     * <p>
     * 双向匹配法，判断是用正向还是逆向匹配
     *
     * @param str 字符串
     * @return 词语和词频
     */
    private Map<String, Double> parseWords(String str) {
        // 中文分词
        List<String> wordlist = new ArrayList<>();
        StringTokenizer tokenizer = new StringTokenizer(str, TOKEN_DELIMITER);
        while (tokenizer.hasMoreTokens()) {
            String s = tokenizer.nextToken();
            List<String> wordlist1 = backwardParse(s);
            List<String> wordlist2 = forwardParse(s);
            // 大颗粒度词越多越好
            if (wordlist1.size() < wordlist2.size()) {
                wordlist.addAll(wordlist1);
            } else {
                wordlist.addAll(wordlist2);
            }
        }

        // 记录每个词的出现频率
        Map<String, Double> tf = new HashMap<>();
        int size = wordlist.size();
        if (size > 0) {
            // 首先去掉其中的停用词
            wordlist.removeIf(stopWordTable::contains);
            // 再添加有效的词语及其词频
            for (String word : wordlist) {
                if (tf.containsKey(word)) {
                    tf.put(word, tf.get(word) + 1.0 / size);
                } else {
                    tf.put(word, 1.0 / size);
                }
            }
        }
        return tf;
    }

    /**
     * 逆向最大匹配法
     * <p>
     * 分析输入的字符串，将其切割成一个个的词语
     *
     * @param str 字符串
     * @return 分词后的词语
     */
    private List<String> backwardParse(String str) {
        List<String> words = new ArrayList<>();
        int from, to = str.length();
        from = Math.max(to - maxWordLength, 0);
        while (to > 0) {
            String word = str.substring(from, to);
            int freq = wordTable.getOrDefault(word, -1);
            if (freq > 0) {
                // 词库中已有该词语
                words.add(word);
                to = from;
                from = Math.max(to - maxWordLength, 0);
            } else if (StrUtils.isNumber(word) || StrUtils.isEnglish(word)) {
                // 跳过数值串或者是英文单词
                to = from;
                from = Math.max(to - maxWordLength, 0);
            } else if (from < to - 1) {
                // 词库没有该词语，尝试缩短词语长度
                from++;
            } else {
                // 如果词库中没有该词就单个字就分出来
                words.add(word);
                to--;
                from = Math.max(to - maxWordLength, 0);
            }
        }
        Collections.reverse(words);      // 逆向排列
        return words;
    }

    /**
     * 正向最大匹配法
     * <p>
     * 分析输入的字符串，将其切割成一个个的词语
     *
     * @param str 字符串
     * @return 分词后的词语
     */
    private List<String> forwardParse(String str) {
        List<String> words = new ArrayList<>();
        int length = str.length();
        int from = 0, to = Math.min(from + maxWordLength, length);
        while (from < length) {
            String word = str.substring(from, to);
            int freq = wordTable.getOrDefault(word, -1);
            if (freq > 0) {
                // 词库中已有该词语
                words.add(word);
                from = to;
                to = Math.min(from + maxWordLength, length);
            } else if (StrUtils.isNumber(word) || StrUtils.isEnglish(word)) {
                // 跳过数值串或者是英文单词就跳过
                from = to;
                to = Math.min(from + maxWordLength, length);
            } else if (from + 1 < to) {
                // 词库没有该词语，尝试缩短词语长度
                to--;
            } else {
                // 如果词库中没有该词就单个字就分出来
                words.add(word);
                from++;
                to = Math.min(from + maxWordLength, length);
            }
        }
        return words;
    }

}
