package com.helei.utils;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.io.*;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @description : 敏感词过滤工具
 */
@Component
public class SensitiveWordCheckUtils {
	private final Logger logger = LoggerFactory.getLogger(SensitiveWordCheckUtils.class);

	/**
	 * 最小匹配规则
	 */
	private final Integer MIN_MATCH_TYPE = 0;
	/**
	 * 最大匹配规则
	 */
	private final Integer MAX_MATCH_TYPE = -1;
	/**
	 * 敏感词DFA树关系标记key
	 */
	private final String IS_END = "isEnd";
	/**
	 * 不是敏感词的最后一个字符
	 */
	private final String END_FALSE = "0";
	/**
	 * 是敏感词的最后一个字符
	 */
	private final String END_TRUE = "1";
	/**
	 * 所有敏感词DFA树的列表
	 */
	private Map sensitiveWordMap = null;
	/**
	 * 敏感词文件存放路径
	 */
	private final String SENSITIVE_WORD_FILE_PATH = "sensitiveWord/";
	/**
	 * 敏感词文件默认编码格式
	 */
	private final String DEFAULT_ENCODING = "utf-8";
	/**
	 * 忽略特殊字符的正则表达式
	 */
	private final String IGNORE_SPECIAL_CHAR_REGEX = "[`~!@#$%^&*()+=|{}':;',\\\\[\\\\].<>/?~！@#￥%……&*（）——+|{}【】‘；：”“’。，、？]|\\s*";
	/**
	 * 忽略特殊字符的正则表达式MATCHER
	 */
	private final Matcher IGNORE_MATCHER = Pattern.compile(IGNORE_SPECIAL_CHAR_REGEX).matcher("");

	/**
	 * 将敏感词文件中的敏感词构建成DFA树
	 */
/*	@PostConstruct
	private void initSensitiveWords() throws FileNotFoundException {
		sensitiveWordMap = new ConcurrentHashMap();
		File dir;
		try {
			dir = ResourceUtils.getFile("classpath:" + SENSITIVE_WORD_FILE_PATH);
		} catch (Exception e) {
			dir = new File(SENSITIVE_WORD_FILE_PATH);
		}
		if (dir.isDirectory()) {
			for (File file : dir.listFiles()) {
				createDFATree(readSensitiveWordFileToSet(file));
				logger.info(String.format("将敏感词文件加载到DFA树列表成功{%s}", file));
			}
			logger.info(String.format("总共构建%s棵DFA敏感词树", sensitiveWordMap.size()));
		} else {
			throw new RuntimeException(String.format("敏感词文件目录不存在{%s}", dir));
		}
	}*/
	@PostConstruct
	private void initSensitiveWords() {
		sensitiveWordMap = new ConcurrentHashMap();
		Resource[] resources = null;
		try {
			resources = new PathMatchingResourcePatternResolver().getResources(SENSITIVE_WORD_FILE_PATH+"*.*");
		} catch (IOException e) {
			throw new RuntimeException(e);
		}
		for (Resource resource : resources) {
			String filename = resource.getFilename();
			InputStream inputStream = null;
			try {
				inputStream = new ClassPathResource(SENSITIVE_WORD_FILE_PATH + filename).getInputStream();
				createDFATree(readSensitiveWordFileToSet(inputStream, filename));
			} catch (IOException e) {
				e.printStackTrace();
			} finally {
				if (inputStream != null) {
					try {
						inputStream.close();
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			}
			logger.info(String.format("将敏感词文件加载到DFA树列表成功{%s}", filename));
		}
		logger.info(String.format("总共构建%s棵DFA敏感词树", sensitiveWordMap.size()));
	}

	/**
	 * 读取文件中的敏感词
	 *
	 * @param file 敏感词文件
	 * @return 敏感词set集合
	 */
	private Set<String> readSensitiveWordFileToSet(File file) {
		Set<String> words = new HashSet<>();
		if (file.exists()) {
			BufferedReader reader = null;
			try {
				reader = new BufferedReader(new InputStreamReader(new FileInputStream(file), DEFAULT_ENCODING));
				String line = "";
				while ((line = reader.readLine()) != null) {
					words.add(line.trim());
				}
			} catch (Exception e) {
				e.printStackTrace();
			} finally {
				if (reader != null) {
					try {
						reader.close();
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			}
		}
		logger.info(String.format("从文件{%s}读取到{%s}个敏感词", file, words.size()));
		return words;
	}

	private Set<String> readSensitiveWordFileToSet(InputStream inputStream, String filename) {
		Set<String> words = new HashSet<>();
		BufferedReader reader = null;
		try {
			reader = new BufferedReader(new InputStreamReader(inputStream, DEFAULT_ENCODING));
			String line = "";
			while ((line = reader.readLine()) != null) {
				words.add(line.trim());
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (reader != null) {
				try {
					reader.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
		logger.info(String.format("从文件{%s}读取到{%s}个敏感词", filename, words.size()));
		return words;
	}

	/**
	 * 按照最小规则获取文本中的敏感词(例如敏感词有[出售,出售军刀],则在此规则下,获取到的敏感词为[出售])
	 *
	 * @param content 文本内容
	 * @return 文本中所包含的敏感词set列表
	 */
	public Set<String> getSensitiveWordMinMatch(String content) {
		return getSensitiveWord(content, MIN_MATCH_TYPE);
	}

	/**
	 * 按照最大规则获取文本中的敏感词(例如敏感词有[出售,出售军刀],则在此规则下,获取到的敏感词为[出售军刀])
	 *
	 * @param content 文本内容
	 * @return 文本中所包含的敏感词set列表
	 */
	public Set<String> getSensitiveWordMaxMatch(String content) {
		return getSensitiveWord(content, MAX_MATCH_TYPE);
	}

	/**
	 * 按照指定规则获取文本中的敏感词
	 *
	 * @param content 文本内容
	 * @return 文本中所包含的敏感词set列表
	 */
	private Set<String> getSensitiveWord(String content, int matchType) {
		Set<String> sensitiveWordList = new HashSet<>();
		for (int i = 0; i < content.length(); i++) {
			// 检查敏感词,长度为0则表示文本中不包含敏感词
			int length = checkSensitiveWord(content, i, matchType);
			if (length > 0) {
				sensitiveWordList.add(content.substring(i, i + length));
				i = i + length - 1;
			}
		}
		return sensitiveWordList;
	}

	/**
	 * 将敏感词构建成DFA树
	 * {
	 * 出={
	 * isEnd=0,
	 * 售={
	 * isEnd=0,
	 * 手={
	 * isEnd=0,
	 * 刀={
	 * isEnd=1
	 * }
	 * },
	 * 军={
	 * isEnd=0,
	 * 刀={
	 * isEnd=1
	 * }
	 * }
	 * }
	 * }
	 * }
	 *
	 * @param sensitiveWords 敏感词列表
	 */
	private void createDFATree(Set<String> sensitiveWords) {
		Iterator<String> it = sensitiveWords.iterator();
		while (it.hasNext()) {
			String word = it.next();
			Map currentMap = sensitiveWordMap;
			for (int i = 0; i < word.length(); i++) {
				char key = word.charAt(i);
				if (isIgnore(key)) {
					continue;
				}
				Object oldValueMap = currentMap.get(key);
				if (oldValueMap == null) {
					// 不存在以key字符的DFA树则需要创建一个
					Map newValueMap = new ConcurrentHashMap();
					newValueMap.put(IS_END, END_FALSE);
					currentMap.put(key, newValueMap);
					currentMap = newValueMap;
				} else {
					currentMap = (Map) oldValueMap;
				}

				if (i == word.length() - 1) {
					// 给最后一个字符添加结束标识
					currentMap.put(IS_END, END_TRUE);
				}
			}
		}
	}

	/**
	 * 从指定索引处检查文本中的敏感词
	 *
	 * @param content    文本内容
	 * @param beginIndex 起始索引
	 * @return 检索到的敏感词的长度
	 */
	private int checkSensitiveWord(String content, int beginIndex, int matchType) {
		// 敏感词结束标识位:用于敏感词只有一个字符的情况
		boolean flag = false;
		// 匹配到的敏感词的长度
		int matchedLength = 0;
		// 当前词的DFA树
		Map currentWordMap = sensitiveWordMap;
		for (int i = beginIndex; i < content.length(); i++) {
			char key = content.charAt(i);
			// 解决空格等特殊字符造成的漏匹配,比如 [军, 刀]
			if (isIgnore(key)) {
				matchedLength++;
				continue;
			}
			// 获取当前字符的DFA树,树为空则表示不存在包含该字符的敏感词
			currentWordMap = (Map) currentWordMap.get(key);
			if (currentWordMap == null) {
				//不存在直接返回
				break;
			} else {
				// 存在则匹配长度+1
				matchedLength++;
				// 判断是否是匹配中的敏感词的最后一位
				if (END_TRUE.equals(currentWordMap.get(IS_END))) {
					flag = true;
					// 如果是最小匹配规则则直接返回(例如敏感词中有[出售,出售军刀],当匹配到"售"字符时,如果是最小规则则不继续向下匹配)
					if (matchType == MIN_MATCH_TYPE) {
						break;
					}
				}
			}
		}
		// 长度小于1则表示不是单词
		if (matchedLength < 1 || !flag) {
			matchedLength = 0;
		}
		return matchedLength;
	}

	/**
	 * 判断是否是要忽略的字符(忽略所有特殊字符以及空格)
	 *
	 * @param specificChar 指定字符
	 * @return 特殊字符或空格true否则false
	 */
	private boolean isIgnore(char specificChar) {
		return IGNORE_MATCHER.reset(String.valueOf(specificChar)).matches();
	}
}
