package com.gframework.fenci.tree;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;

/**
 * 分词数据结构默认实现类
 * 
 * @author Ghwolf
 */
@SuppressWarnings("serial")
class FenciTrieTreeBuilder implements FenciTrieTree, Serializable {

	/**
	 * 存储所有单词的集合根节点.
	 * <p>
	 * 仅初始化过程可改，之后仅只读
	 */
	private final Map<Character, Node> root = new HashMap<>();
	/**
	 * 混淆字存储集合.
	 * <p>
	 * key是会产生混淆的字,value是混淆关联字存储集合
	 * <p>
	 * 仅初始化过程可改，之后仅只读
	 */
	private final Map<Character, ErrorProneWord> errorProneWordMap = new HashMap<>();

	/**
	 * 总词汇数
	 */
	private long wordSize;

	/**
	 * 易混淆字数
	 */
	private long errorProneWordSize;

	/**
	 * 默认配置
	 */
	private static final Config DEFAULT_CONFIG = new Config();

	private FenciTrieTreeBuilder() {
	}

	/**
	 * 根据词汇文件生成本类对象，词汇需要一行一个
	 * 
	 * @param wordsFile 词汇数据文件
	 * @param errorProneWordFile 易混淆字表
	 * @param charset 文件编码
	 * @return 返回本类对象
	 * @throws FileNotFoundException
	 */
	public static FenciTrieTree buildByFile(File wordsFile, File errorProneWordFile, String charset)
			throws FileNotFoundException {
		return buildByInputStream(new FileInputStream(wordsFile),new FileInputStream(errorProneWordFile),charset);
	}
	/**
	 * 根据词汇文件生成本类对象，词汇需要一行一个
	 * 
	 * @param wordsInputStream 词汇数据文件
	 * @param errorProneWordInputStream 易混淆字表
	 * @param charset 文件编码
	 * @return 返回本类对象
	 * @throws FileNotFoundException
	 */
	public static FenciTrieTree buildByInputStream(InputStream wordsInputStream, InputStream errorProneWordInputStream, String charset)
			throws FileNotFoundException {
		FenciTrieTreeBuilder tree = new FenciTrieTreeBuilder();
		try (Scanner scan = new Scanner(wordsInputStream, charset)) {
			while (scan.hasNextLine()) {
				String word = scan.nextLine().trim();
				tree.add(word);
				tree.wordSize ++;
			}
		}
		try (Scanner scan = new Scanner(errorProneWordInputStream, charset)) {
			while (scan.hasNextLine()) {
				String str = scan.nextLine().trim();
				if (str.equals("")) {
					continue;
				}
				String[] wordArr = str.split(" ");
				Character[] charArr = new Character[wordArr.length];
				int foot = 0;
				for (String w : wordArr) {
					if (w.length() != 1) {
						throw new UnsupportedOperationException("混淆字库必须是以字为单位，空格分隔，每行都是相关联的字，不允许出现两个以上的汉字相邻出现！");
					}
					charArr[foot ++] = w.charAt(0);
				}
				
				for (Character c : charArr) {
					tree.errorProneWordMap.computeIfAbsent(c, ErrorProneWord::new).add(charArr);
				}
				
				tree.errorProneWordSize ++;
			}
		}
		return tree;
	}

	private void add(String str) {
		if (str.length() == 0) return;

		Map<Character, Node> map = root;
		Node lastN = null;
		for (char c : str.toCharArray()) {
			lastN = map.computeIfAbsent(c, k -> new Node());
			map = lastN.getLeaf();
		}
		if (lastN != null) {
			lastN.setWordEnd(true);
		}

	}

	/**
	 * 获取词汇总数
	 * 
	 * @return 返回词汇总数
	 */
	@Override
	public long wordSize() {
		return this.wordSize;
	}

	/**
	 * 获取易混淆字数
	 * 
	 * @return 返回易混淆字数
	 */
	@Override
	public long errorProneWordSize() {
		return this.errorProneWordSize;
	}

	@Override
	public Set<String> fenci(String str) {
		return this.fenci(str, DEFAULT_CONFIG);
	}

	@Override
	public List<String> fenciToList(String str) {
		return this.fenciToList(str, DEFAULT_CONFIG);
	}

	@Override
	public List<String> fenciToList(String str, Config config) {
		if (str == null || str.length() == 0) return Collections.emptyList();
		FindVo vo = new FindVo(false);
		if (config.isContainOriginal()) {
			vo.codes.add(str);
		}
		char[][] strSplits = fastSplit(str);
		for (char[] s : strSplits) {
			if (config.isContainOriginal()) {
				vo.codes.add(new String(s));
			}
			vo.charArray = s;
			fenci0(vo,config);
		}
		return (List<String>) vo.codes;
	}

	@Override
	public Set<String> fenci(String str, Config config) {
		if (str == null || str.length() == 0) return Collections.emptySet();
		FindVo vo = new FindVo(true);
		if (config.isContainOriginal()) {
			vo.codes.add(str);
		}
		char[][] strSplits = fastSplit(str);
		for (char[] s : strSplits) {
			if (config.isContainOriginal()) {
				vo.codes.add(new String(s));
			}
			vo.charArray = s;
			fenci0(vo,config);
		}
		return (Set<String>) vo.codes;
	}

	private void fenci0(FindVo vo,Config config) {
		char[] chArr = vo.charArray;
		for (int x = 0; x < chArr.length;) {
			// 提取英文单词
			if (this.isEnglishWord(chArr[x])) {
				int y = x + 1;
				for (; y < chArr.length; y ++) {
					if (!this.isEnglishWord(chArr[y])) {
						break;
					}
				}
				if (y - x > 1) {
					vo.codes.add(new String(chArr, x, y - x));
				}
				x = y;
			} else {
				vo.startIndex = x;
				vo.currentIndex = x;
				find(vo,config);
				x ++;
			}
		}
	}

	private void find(FindVo vo,Config config) {
		int sourceSize = vo.codes.size();
		vo.firstChar = '\0';
		vo.leaf = this.root;
		doFind(vo,config);
		// 第一个字如果作为单字词语，只有在不存在任何以字开头的词语时，才能出现，否则会产生很大的语义误差
		if (vo.firstChar != '\0' && sourceSize == vo.codes.size()) {
			vo.codes.add(Character.toString(vo.firstChar));
		}
		vo.longestCodeIndex = vo.maxCodeIndex;
	}


	private void doFind(FindVo vo,Config config) {
		char[] chArr = vo.charArray;
		Collection<String> codes = vo.codes;

		if (vo.currentIndex >= chArr.length) {
			return;
		}
		char c = chArr[vo.currentIndex];
		Iterable<Character> epw ;
		if (config.isErrorCorrection()) {
			epw = errorProneWordMap.get(c);
			if (epw == null) {
				epw = ErrorProneWord.createSimpleIterable(c);
			}
		} else {
			epw = ErrorProneWord.createSimpleIterable(c);
		}
		for (Character ch : epw) {
			char oldChar = chArr[vo.currentIndex];
			chArr[vo.currentIndex] = ch;
			Node n = vo.leaf.get(ch);
			if (n != null) {
				// 第一个字
				if (vo.currentIndex == vo.startIndex) {
					if (n.isWordEnd() && vo.currentIndex > vo.longestCodeIndex) {
						vo.firstChar = ch;
					}
				} else if (n.isWordEnd()) {
					vo.maxCodeIndex = Math.max(vo.maxCodeIndex, vo.currentIndex);
					codes.add(new String(chArr, vo.startIndex, vo.currentIndex + 1 - vo.startIndex));
				}
				if (!n.getLeaf().isEmpty()) {
					Map<Character, Node> oldLeaf = vo.leaf;
					vo.leaf = n.getLeaf();
					vo.currentIndex ++;
					doFind(vo,config);
					vo.leaf = oldLeaf;
					vo.currentIndex --;
				}
			}

			chArr[vo.currentIndex] = oldChar;
		}
	}

	private class FindVo {

		/** 当前操作的字符的char数组 */
		char[] charArray;
		/** 目前词汇最大到达的字符串的索引值 */
		int maxCodeIndex = -1;
		/** 当前获取char数组值的索引 */
		int currentIndex;
		/** doFind的开始索引 */
		int startIndex;
		/**
		 * 用来处理如果一个字符作为前面字符的词汇的一部分存在，那么就不能够单独成为词汇，这个值就表示之前词汇最大到达的索引为止，以判断是否还可以单独成为词汇
		 */
		int longestCodeIndex = -1;
		/** doFind方法中，存储首字母的变量 */
		char firstChar;
		/** 存放结果词汇的集合 */
		final Collection<String> codes;
		/** 当前叶子节点 */
		Map<Character, Node> leaf;

		FindVo(boolean useSet) {
			if (useSet) {
				codes = new HashSet<>();
			} else {
				codes = new ArrayList<>();
			}
		}
	}

	/**
	 * 节点类
	 */
	private class Node implements Serializable {

		/**
		 * 是否是一个单词的结尾节点
		 */
		private boolean wordEnd;

		/**
		 * 子节点.
		 * <p>
		 * 这里仅初始化时会插入数据，所以不存在线程安全问题
		 */
		private final Map<Character, Node> leaf = new HashMap<>();

		public final Map<Character, Node> getLeaf() {
			return this.leaf;
		}

		void setWordEnd(boolean wordEnd) {
			this.wordEnd = wordEnd;
		}

		/**
		 * 当前节点是否是一个单词的结尾
		 * 
		 * @return 如果是返回true，否则返回false
		 */
		final boolean isWordEnd() {
			return this.wordEnd;
		}
	}

	private boolean isEnglishWord(char c) {
		return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
	}

	private char[][] fastSplit(final String str) {
		List<char[]> list = new ArrayList<>();

		char[] strCharArray = str.toCharArray();

		int start = -1;
		for (int x = 0; x < str.length(); x ++) {
			boolean isSeparator = false;
			char c = str.charAt(x);
			switch (c) {
				case ',':
				case '.':
				case '·':
				case '@':
				case ';':
				case '!':
				case '?':
				case ':':
				case '|':
				case '<':
				case '>':
				case '-':
				case '_':
				case '。':
				case '，':
				case '！':
				case '？':
				case '#':
				case '；':
				case '~':
				case '《':
				case '》':
					isSeparator = true;
					break;
				default: {
					if (Character.isWhitespace(c)) {
						isSeparator = true;
					}
				}
			}
			if (isSeparator) {
				if (start != -1) {
					list.add(Arrays.copyOfRange(strCharArray, start, x));
					start = -1;
				}
			} else if (start == -1) {
				start = x;
			}
		}
		if (start != -1) {
			list.add(Arrays.copyOfRange(strCharArray, start, strCharArray.length));
		}
		return list.toArray(new char[list.size()][]);
	}

}