package com;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/* 易任我 中文分词：Easyword Chinese word segmenter
 * 特点：1.功能单一，只有中文分词功能，没有词性标注及分析等功能
 * 	   2.分词速度快（词库实现了分块查询，比一般的分词系统上快3-4倍）
 *     3.分词准确率高（实现了管道输出输入，分词过程被划分为多个单独处理的阶段，前一阶段的输出可以作为下一阶段的输入）
 *     4.一键部署，容易使用（可以作为java包直接调用，也可以二次开发，代码开源）
 *     5.在算法层及代码层实现了优化处理
 * 作者：章森
 * 2022.10.31
*/

public class Easyword {
	// 系统词库共有 108783 个词, 最长词条：4个汉字
	// 系统词库词条结构：序号   汉语词    词频
	// CnCiku1.txt, 词条长度：1个汉字, 3833个词
	private String cnciku1 = "CnCiku1.txt"; 
	// CnCiku2.txt, 词条长度：2个汉字, 55785个词
	private String cnciku2 = "CnCiku2.txt"; 
	// CnCiku3.txt, 词条长度：3个汉字, 33986个词
	private String cnciku3 = "CnCiku3.txt"; 
	// CnCiku4.txt, 词条长度：4个汉字, 15146个词
	private String cnciku4 = "CnCiku4.txt"; 
	// CnCiku5.txt, 词条长度：5个汉字, 60个
	private String cnciku5 = "CnCiku5.txt"; 
	// CnCiku6.txt, 词条长度：6个汉字, 45个
	private String cnciku6 = "CnCiku6.txt"; 
	// CnCiku7.txt, 词条长度：7个汉字, 27个
	private String cnciku7 = "CnCiku7.txt"; 
	// stopwords.dic, 507个词，词条结构：停用词
	private String stopword = "cnStopwords.txt"; 
	// 现代汉语前缀词，词条结构：前缀词
	private String prefix = "cnPrefix.txt"; 
	// 现代汉语后缀词，后缀词
	private String suffix = "cnSuffix.txt"; 
	// 现代中国名人, 260 人，可以添加，最长5个汉字，词条结构：人名
	private String mingren = "cnMingren.txt"; 
	// 用户自定义词，词条结构：我的词
	private String userdic = "cnUserDic.txt"; 
	// 1-2-3-4-5-6-7 字词分开存放，以便加快检索. 需要用到一字词的词频
	private HashMap<String, Double> mapWord1Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord2Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord3Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord4Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord5Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord6Freq = new HashMap<String, Double>();
	private HashMap<String, Double> mapWord7Freq = new HashMap<String, Double>();
	// Map: word -> freq 
	private List<HashMap<String, Double>> mapWordFreq = new ArrayList<HashMap<String, Double>>(
			Arrays.asList(null, mapWord1Freq, mapWord2Freq, mapWord3Freq, mapWord4Freq, mapWord5Freq, mapWord6Freq, mapWord7Freq));
	// 
	private Set<String> setStopwords = new HashSet<String>();
	private Set<String> setPrefix = new HashSet<String>();
	private Set<String> setSuffix = new HashSet<String>();
	private Set<String> setMingren = new HashSet<String>();
	private Set<String> setUserdic = new HashSet<String>();
	// 汉语标点符号集合
	private Set<String> setFuhao = new HashSet<String>(Arrays.asList(
			"、","，","。","？","！","；","：", "《", "》", "【", "】", "（", "）", "“", "”", "——"));

	// 本分词器是否可用，基本词库读入的时候可能存在问题，导致本分词器不能用。
	private boolean isAvailable = false;
	// 系统词库中最长词条的汉字个数
	private final int MaxLenWord = 4;
	// 分析句子的最大长度，输入的长度超过这个长度后，系统自动切分成较短的句子。
	private final int MaxLenText = 100;
	// 人名库中最长人名
	private final int MaxNameLen = 5;
	
	// -------------------------------------------------
	// 单元测试用
	public static void main(String[] args) {
		// 
		Easyword ez = new Easyword();
		//---------------------------------
/*		
		String text = "如果实在不想重写JDialog的paint()函数，也可以重写Panel的paint()函数。\r\n"
				+ "不要直接重写JDialog的paint()函数，而是仅重写你自定义（或继承）的一个JPanel的paint()；也就是缩小影响范围。\r\n"
				+ "JComponent 类在其 paint() 方法中实现了双缓冲，一般不建议重写 paint() 方法，除非你有特别的效果需求。\r\n"
				+ "建议的方式是重写方法：";
*/
//		String text = "邓颖超生前最喜欢的一个东西。";	// 邓颖超生前最喜欢的一个东西   长春市长春节讲话
//		List<String> ws = ez.segmenter(text);
		List<String> text1 = new ArrayList<String>();
		text1.add("原子"); 
		text1.add("齐");
		text1.add("奥");
		text1.add("塞");
		text1.add("斯");
		text1.add("库");
		text1.add("再上");

		text1 = ez.forSingleWords(text1);
		for(String w : text1) {
			System.out.println(w);
		}

		
		System.out.println("----- Easyword End ------");
	}

	// 开始运行，读入词库等
	public Easyword() {
		super();
		// 初始化 词库
		initBasicdic();
		initStopwords();
		initPrefix();
		initSuffix();
		initUserdic();
		initMingren();
		// 
		if (!isAvailable) {
			System.out.println("Error: Easyword not available!");
		}
/*		
		// ciku, 词条个数：108783， 最长词条：4个汉字
		int ciCnt = mapWordFreq.size();
		System.out.println("词条个数：" + ciCnt);
		for(Entry<String, Double> en : mapWordFreq.entrySet()) {
			// print: word -> freq
			System.out.println(" " + en.getKey() + " -> " + en.getValue()); 
		}
*/		
	}

	// 基本分词器+去歧义，输入中文句子或篇章（多个句子），返回 分词结果
	public List<String> segmenter(String text, boolean isDisambiguity) {
		if (isDisambiguity)
			return forAmbiguityWords(segmenter(text));
		else
			return segmenter(text);
	}
	
	// 基本分词器，输入中文句子或篇章（多个句子），返回 分词结果
	public List<String> segmenter(String text) {
		List<String> ws = new ArrayList<String>();
		if(!isAvailable) {
			System.out.println("Error: Easyword not available!");
			ws.add(text);
			return ws;
		}
		int cnLen = text.length();
		// 句子太短，特殊处理
		if (cnLen < 3) {
			ws.add(text);
			return ws;
		}
		// 长句子的处理，MaxLenText=100个汉字
		List<String> sents = null;
		if (cnLen > MaxLenText) {
			sents = cutText2Sentence(text);
		}
		// 长句子切分成多个正常句子处理
		if (sents != null && sents.size() > 1) {
			for(String st : sents) {
				List<String> swd = segmentSentence(st);
				ws.addAll(swd);
			}			
		}
		// 正常句子的处理，分词, 最长词条：MaxLenWord=4个汉字
		else {
			List<String> swd = segmentSentence(text);
			ws.addAll(swd);
		}
		return ws;
	}

	// 分词器，输入中文句子，返回 分词结果
	public List<String> segmentSentence(String sent) {
		List<String> ws = new ArrayList<String>();
		if(!isAvailable) {
			System.out.println("Error: Easyword not available!");
			ws.add(sent);
			return ws;
		}
		int cnLen = sent.length();
		// 句子太短，特殊处理
		if (cnLen < 3) {
			ws.add(sent);
			return ws;
		}
		// 正常句子的处理，分词, 最长词条：4个汉字		
		String subw = ""; 
		int wLen = 0;	// 分出的词的长度
		int wCon = 0; 	// 连续汉字或英文的个数
		int lastp = 0;	// 防止越界
		boolean bt = false;
		// 最大匹配分词算法
		for(int i=0; i<cnLen; ) {
			wCon = 0;
			
			// 首先把英文字符串提取出来
			for(int j=i; j<cnLen; j++) {
				subw = sent.substring(j, j+1);
//				System.out.println("yingwen:" + subw);
				if(!isChineseUtf8(subw)) {
					wCon = wCon + 1;
					continue;
				}
				else {
					// 字符 sent[j-1] 是汉字
					if (wCon > 0) {
						lastp = Math.min(cnLen, i + wCon);
						subw = sent.substring(i, lastp);
						ws.add(subw);
					}
//					System.out.println("yingwen len:" + wCon);
//					System.out.println("yingwen:" + subw);
					break;
				}
			}
			// 到了句子末尾都没有遇到汉字
			if (i + wCon >= cnLen) {
				if (wCon > 0) {
					subw = sent.substring(i, cnLen);
					ws.add(subw);
				}
				break;
			}
			// 调整起始位置
			i = i + wCon;
			// 当前开始的字符sent[i]是汉字，是否连续MaxLenWord=4个字符都是汉字?
			bt = false;
			wCon = 0;
			for(int j=i+1; j<cnLen; j++) {
				// 连续MaxLenWord=4个字符都是汉字, 不用再继续检测
				if (wCon >= MaxLenWord) {
					break;
				}
				subw = sent.substring(i, j);
				if (isChineseUtf8(subw)) {
					wCon = wCon + 1;
					continue;
				}
				// 字符 sent[j-1] 不是汉字
				else {
					// 如果连续汉字的个数 < 3, 则作为一个词
					if (wCon > 0 && wCon < 3) {
						subw = sent.substring(i, i + wCon);
						ws.add(subw);
//						System.out.println("hanzi:" + subw);
						bt = true;
						i = i + wCon;
					}
					else {
						// 连续汉字的个数 > 3, 进入下面正常分词处理
					}
					break;
				}
			}
			// 是否到了句子末尾都没有遇到英文
			if (i + wCon >= cnLen) {
				if (wCon > 0 && wCon < 3) {
					subw = sent.substring(i, cnLen);
					ws.add(subw);
					break;
				}
			}
			// 又遇到了英文字符， goto for(i)
			if (bt) {			
				continue;
			}			
			
			// 连续至少MaxLenWord=4个字符都是汉字，切分出一个词
			wLen = 0;
			bt = false;
			for(int j=MaxLenWord; j>0; j--) {
				lastp = Math.min(cnLen, i+j);
				subw = sent.substring(i, lastp);
				// 系统词库中存在该词条？
				wLen = subw.length();
				if (wLen < 1) {
					// 出错了
					break;
				}
				if (mapWordFreq.get(wLen).containsKey(subw)) {
					bt = true;
					wLen = j;
					//System.out.println("j=: " + j);
					ws.add(subw);
					break;
				}
			}// for j
			// 是否在词库中成功找到词条，如果没有成功, 则切分成单字。
			if (!bt) {
				wLen = 1;
				ws.add(subw);
			}
			i = i + wLen;
		} // for i
		return ws;
	}

	// load ciku: CnCiku2, CnCiku3, CnCiku4, CnCiku5, CnCiku6, CnCiku7
	private void initBasicdic() {
		boolean bt1 = initBasicdic(cnciku1, mapWord1Freq, "utf8");
		boolean bt2 = initBasicdic(cnciku2, mapWord2Freq, "utf8");
		boolean bt3 = initBasicdic(cnciku3, mapWord3Freq, "utf8");
		boolean bt4 = initBasicdic(cnciku4, mapWord4Freq, "utf8");
		boolean bt5 = initBasicdic(cnciku5, mapWord5Freq, "utf8");
		boolean bt6 = initBasicdic(cnciku6, mapWord6Freq, "utf8");
		boolean bt7 = initBasicdic(cnciku7, mapWord7Freq, "utf8");
		isAvailable = (bt1 && bt2 && bt3 && bt4 && bt5 && bt6 && bt7);
		return; 
	}
	
	// 读入一个词库，输入词库名和存放的map, 词库结构： 序号  词	  词频(整数)
	private boolean initBasicdic(String cikux, HashMap<String, Double> mapwf, String code) {
		// 文件编码的 gb2312
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        // 最长词条
        int ciMaxLen = 1;
        mapwf.clear();
        try {
        	String file = System.getProperty("user.dir") + "/resource/" + cikux;
            File fh = new File(file);
            if(!fh.exists()) { 
            	isAvailable = false;
            	System.out.println("The file not exist: " + cikux);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis, code); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
				//System.out.println("词条：" + line);
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	// ws[]: 序号  词	  词频(整数)
				String[] ws = line.split("\\s+");
				if( ws.length < 3)	continue;
				// Map: key -> value
				if (mapwf.containsKey(ws[1])) {
					System.out.println(cikux+"词条重复：" + ws[1]);
				}
				else {
					double v = 1.0;
					try {
						v = Double.parseDouble(ws[2]);
					}
					catch(NumberFormatException e) {
						// skip
						System.out.println(cikux+"词频问题：" + ws[2]);
					}
					mapwf.put(ws[1], v);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println(cikux+"词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
        //System.out.println("Count: " + count);
    	//System.out.println("最长词条：" + ciMaxLen);		
		return true;
	}

	// load cnStopwords.txt, gb2312
	private boolean initStopwords() {
		// 文件编码 gb2312
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        try{
        	String file = System.getProperty("user.dir") + "/resource/" + stopword;
            File fh = new File(file);
            if(!fh.exists()) { 
            	System.out.println("The file not exist: " + stopword);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis, "gb2312"); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	line = line.trim();
	        	if (line.length() < 1)
	        		continue;
				if (setStopwords.contains(line)) {
					System.out.println("停用词词条重复：" + line);
				}
				else {
					setStopwords.add(line);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println("停用词词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
		return true;
	}

	// load cnPrefix.txt, utf-8
	private boolean initPrefix() {
		// 文件编码 utf-8
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        try{
        	String file = System.getProperty("user.dir") + "/resource/" + prefix;
            File fh = new File(file);
            if(!fh.exists()) { 
            	System.out.println("The file not exist: " + prefix);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	line = line.trim();
	        	if (line.length() < 1)
	        		continue;
				if (setPrefix.contains(line)) {
					System.out.println("前缀词条重复：" + line);
				}
				else {
					setPrefix.add(line);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println("前缀词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
		return true;
	}

	// load cnSuffix.txt, utf-8
	private boolean initSuffix() {
		// 文件编码 utf-8
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        try{
        	String file = System.getProperty("user.dir") + "/resource/" + suffix;
            File fh = new File(file);
            if(!fh.exists()) { 
            	System.out.println("The file not exist: " + suffix);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	line = line.trim();
	        	if (line.length() < 1)
	        		continue;
				if (setSuffix.contains(line)) {
					System.out.println("后缀词条重复：" + line);
				}
				else {
					setSuffix.add(line);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println("后缀词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
		return true;
	}

	// load cnMingren.txt, utf-8
	private boolean initMingren() {
		// 文件编码 utf-8
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        try{
        	String file = System.getProperty("user.dir") + "/resource/" + mingren;
            File fh = new File(file);
            if(!fh.exists()) { 
            	System.out.println("The file not exist: " + mingren);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	line = line.trim();
	        	if (line.length() < 1)
	        		continue;
				if (setMingren.contains(line)) {
					System.out.println("名人词条重复：" + line);
				}
				else {
					setMingren.add(line);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println("名人词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
		return true;
	}

	// load cnUserdic.txt, utf-8
	private boolean initUserdic() {
		// 文件编码 utf-8
		FileInputStream fis = null;
        InputStreamReader isr = null;
        BufferedReader br = null;
        try{
        	String file = System.getProperty("user.dir") + "/resource/" + userdic;
            File fh = new File(file);
            if(!fh.exists()) { 
            	System.out.println("The file not exist: " + userdic);
                return false;
            }
            fis = new FileInputStream(file);  
	        isr = new InputStreamReader(fis); // 
			br = new BufferedReader(isr);
	        String line = "";
	        boolean bt = true;
	        while ((line = br.readLine()) != null) {
	        	// #...., //... 都是 注释行，过滤掉
	        	if(line.startsWith("#|//")) continue;
	        	line = line.trim();
	        	if (line.length() < 1)
	        		continue;
				if (setUserdic.contains(line)) {
					System.out.println("用户词条重复：" + line);
				}
				else {
					setUserdic.add(line);
				}
				// debug
				if (bt) {
					bt = false;
					System.out.println("用户词条：" + line);
				}
	        }
		} catch(Exception ex) {
			ex.printStackTrace();
		} finally{
				try {
			        br.close();
			        isr.close();
			        fis.close();
				}catch(IOException ex) {
					ex.printStackTrace();
				}
		}
		return true;
	}

	// convert byte[] to Hex String, 将字节数组变为ASSIC码表示的字符串, 例如：
	// 输入: byte bytes[] = {(byte)0, (byte)134, (byte)0, (byte)61};
	// 转换后的 HexString: 0086003D
	private String byteArrayToHexString(byte[] ba) {
		StringBuilder sb = new StringBuilder(ba.length * 2);
		for (byte b : ba)
			sb.append(String.format("%02x", b & 0xff));
		return sb.toString();
	}
	
	// 判断输入的字符串是否为纯汉字
	// 只要字符串中有一个字符不是中文，返回 false, 否则 返回 true.
	private boolean isChineseUtf8(String str) {
		Pattern pattern = Pattern.compile("[\u0391-\uFFE5]+$");
		return pattern.matcher(str).matches();
	}

	// 判断输入的字符串是否为汉字标点
	// 只要字符串中有一个字符不是中文符号，返回 false, 否则 返回 true.
	private boolean isChineseFuhaoUtf8(String str) {
		for(int i=0; i<str.length(); i++) {
			String s = str.substring(i, i+1);
			if (!setFuhao.contains(s)) {
				return false;
			}
		}
		return true;
	}
	
	// 根据标点符号和正则表达式，把篇章切分成句子
	private List<String> cutText2Sentence(String text) {
		List<String> sents = new ArrayList<String>();
		int textLen = text.length();
		// 句子太短，特殊处理
		if (textLen < 3) {
			sents.add(text);
			return sents;
		}
        // 正则表达式含义：任意个字符且以“，。？！；;!?”中任一标点结尾的串
        Pattern p = Pattern.compile(".*?[，。、？！；：;!?\r\n]");
		Matcher m = p.matcher(text);
		String st;
		// 已经匹配的长度
		int mLen = 0;
		while(m.find()) {
			st = m.group();
			// 注意：如果文本中存在"\r\n"，定位将不准确！
			mLen = mLen + st.length();
			if (st.length() == 1 && (st.startsWith("\r") || st.startsWith("\n"))) {
				continue;
			}
			// 分开的句子
			//System.out.println("长度：" + st.length());
			sents.add(st);
		}
		// 是否匹配完全
		if( textLen > mLen) {
			st = text.substring(mLen);
			sents.add(st);
			//System.out.println("last:" + st);
		}
		return sents;
	}
	
	// 合并连续多个（3个及以上）单字词, 用到：停顿词，前缀，后缀
	private List<String> forSingleWords(List<String> ws) {
		List<String> words = new ArrayList<String>();
		int ns = 0;	// 连续单字的个数
		for(int i=0; i<ws.size(); ) {
			String w = ws.get(i);
			// 停顿词 跳过
			if (setStopwords.contains(w)) {
				words.add(w);
				i++;
				continue;
			}
			// 遇到前缀词，合并下一个词
			if (setPrefix.contains(w)) {
				if (i+1 < ws.size()) {
					w = w + ws.get(i+1);
				}
				words.add(w);
				i = i + 2;
				continue;
			}
			// 其它单字词处理
			String ww = "";
			ns = 0;
			boolean isStopword = false;			
			while(w.length() == 1) {
				isStopword = false;
				ww = ww + w;
				ns++;
				if (i+ns >= ws.size())
					break;
				w = ws.get(i+ns);
				// 遇到停顿词或后缀词 跳出循环
				if (setStopwords.contains(w) || setSuffix.contains(w)) {
					words.add(ww);
					words.add(w);
					isStopword = true;
					break;
				}

			}
			// 停顿词跳出的情况
			if (isStopword) {
				i = i + ns + 1;
				continue;
			}
			// 连续单字结束的情况
			if (ns > 0) {
				words.add(ww);
				i = i + ns;
			}
			// 没有连续单字的情况
			else {
				words.add(w);
				i++;
			}
		}
		
		return words;
	}
	
	// 2+1, 3+2+1 型词的歧义问题
	private List<String> forAmbiguityWords(List<String> ws) {
		List<String> words = new ArrayList<String>();
		int nLen = ws.size();
		for(int i=0; i<nLen; i++) {
			if (i == nLen-1) {
				words.add(ws.get(i));
				continue;
			}
			String w1old = ws.get(i);
			// AB+C 型
			if (w1old.length() == 2) {
				String w2old = ws.get(i+1);
				// 汉字标点符号
				if (isChineseFuhaoUtf8(w2old)) {
					words.add(w1old);
					words.add(w2old);
					i = i + 1;					
					continue;
				}
				if (w2old.length() == 1 ) {
					
					String w1new = w1old.substring(0, 1);
					String w2new = w1old.substring(1) + w2old;
					double fnew1 = 1.0; 
					if (mapWordFreq.get(1).containsKey(w1new))
						fnew1 = mapWordFreq.get(1).get(w1new);
					double fnew2 = 1.0; 
					if (mapWordFreq.get(2).containsKey(w2new))
						fnew2 = mapWordFreq.get(2).get(w2new);
					double fold1 = 1.0; 
					if (mapWordFreq.get(2).containsKey(w1old))
						fold1 = mapWordFreq.get(2).get(w1old);
					double fold2 = 1.0; 
					if (mapWordFreq.get(1).containsKey(w2old))
						fold2 = mapWordFreq.get(1).get(w2old);
					System.out.println(w1old + "=:" + fold1);
					System.out.println(w2old + "=:" + fold2);
					System.out.println(w1new + "=:" + fnew1);
					System.out.println(w2new + "=:" + fnew2);
					// 如果单字是停用词的话，词频计算失真！
					if (setStopwords.contains(w2old) || setStopwords.contains(w1new)) {
						if (fnew1 > fold2) {
							words.add(w1new);
							words.add(w2new);
						}
						else {
							words.add(w1old);
							words.add(w2old);
						}
					}
					else {
						// 根据单字概率大小，其次是双字词概率大小 决策
						if (fnew1*fnew2 > fold1*fold2) {
							words.add(w1new);
							words.add(w2new);
						}
						else {
							words.add(w1old);
							words.add(w2old);
						}
					}
					// 跳过当前2个词
					i = i + 1;					
				} // w2old.length() == 1 
				else {
					words.add(w1old);
				}
				continue;
			}
			// ABC+DE+F -> AB+CD+EF? 例如 发展中 国家 庭 养殖 产业 
			if (w1old.length() == 3) {
				if (i == nLen-2) {
					words.add(ws.get(i));
					continue;
				}				
				String w2old = ws.get(i+1);
				String w3old = ws.get(i+2);
				if (w2old.length() != 2 || w3old.length() != 1) {
					words.add(ws.get(i));
					continue;
				}
				// 汉字标点符号
				if (isChineseFuhaoUtf8(w3old)) {
					words.add(w1old);
					words.add(w2old);
					words.add(w3old);
					i = i + 2;					
					continue;
				}
				String w1new = w1old.substring(0, 2);
				String w2new = w1old.substring(2) + w2old.substring(0, 1);
				String w3new = w2old.substring(1) + w3old;
				double fold1 = 1.0;
				if (mapWordFreq.get(3).containsKey(w1old))
					fold1 = mapWordFreq.get(3).get(w1old);
				double fold2 = 1.0;
				if (mapWordFreq.get(2).containsKey(w2old))
					fold2 = mapWordFreq.get(2).get(w2old);
				double fold3 = 1.0;
				if (mapWordFreq.get(1).containsKey(w3old))
					fold3 = mapWordFreq.get(1).get(w3old);
				double fnew1 = 1.0;
				if (mapWordFreq.get(2).containsKey(w1new))
					fnew1 = mapWordFreq.get(2).get(w1new);
				double fnew2 = 1.0;
				if (mapWordFreq.get(2).containsKey(w2new))
					fnew2 = mapWordFreq.get(2).get(w2new);
				double fnew3 = 1.0;
				if (mapWordFreq.get(2).containsKey(w3new))
					fnew3 = mapWordFreq.get(2).get(w3new);
				System.out.println(w2new + "=:" + fnew2);
				// 首先根据概率大小 决策
				if (fnew1*fnew2*fnew3 > fold1*fold2*fold3) {
					words.add(w1new);
					words.add(w2new);
					words.add(w3new);
					// 跳过当前2个词
					i = i + 2;					
				}
				else {
					words.add(w1old);
				}
				continue;
			} // if 3
			// 其它 长度
			words.add(w1old);
		} // for i
		
		return words;
	}
	
	// 现代人名的词典切分
	private List<String> forMingrenWords(List<String> ws) {
		List<String> words = new ArrayList<String>();
		int nLen = ws.size();
		// MaxNameLen=5, 记录每个词的长度
		int[] ci5 = new int[] {0, 0, 0, 0, 0, 0, 0};
		// 在已经切分的词中发现人名，并修改原来的切分
		for(int i=0; i<nLen;) {
			String w = ws.get(i);
			// 当前词 已经是人名，跳过
			if (setMingren.contains(w)) {
//				System.out.println("开始：" + w);
				words.add(w);
				i++;
				continue;
			}
			ci5[0] = w.length();
			// 当前词 可能是名字的前部分（一个字或多个字）
			String mr = isPartMingren(w);
			// |mr| > |w|
			int ns = 0;
			if (mr != null) {
				// 取出足够长度的原汉字串 MaxNameLen = 5
				while(w.length() < MaxNameLen) {
					if (i+ns+1 >= nLen) {
						break;
					}
					ns++;
					w = w + ws.get(i+ns);
					ci5[ns] = ws.get(i+ns).length();
				}
				// 包含以 mr 开头的名字?(可能多个，如：刘强，刘强东，刘强东方)
				boolean bm = false;
				boolean bn = false;
				for(int k=w.length(); k>0; k--) {
					mr = w.substring(0, k);	
					bm = false;
					bn = false;
					if (setMingren.contains(mr)) {
//						System.out.println("姓名：" + mr);
						words.add(mr);
						int wk = w.length() - k;
						if (wk == 0) {
							i = i + ns;	//?
							bn = true;
							break;
						}
						int eLen = 0;
						for(int j=0; j<=ns; j++) {
							eLen = eLen + ci5[j];
							// 名字串 用到了前 j 个词的合并
							if (eLen >= k) {
								if (eLen > k) {
									// 余下的词的部分
									mr = w.substring(k, eLen);
//									System.out.println("余下：" + mr);
									words.add(mr);
								}
								i = i + j;
								bn = true;
								break;
							}
						}// for j
						bm = true;
						break;
					} // if
				}// for k
				// 是名字，且完全匹配
				if (bn) {
					i++;
					continue;
				}
				// 不是名字
				if(!bm) {
//					System.out.println("!bm不是姓名：" + ws.get(i));
					words.add(ws.get(i));
					i++;
					continue;
				}
			}
			else {
//				System.out.println("else不是姓名：" + ws.get(i));
				words.add(ws.get(i));
				i++;
				continue;
			} // 
		}// for i
		
		return words;
	}

	// 在 Mingren 集合中查找
	private String isPartMingren(String w) {
		for(String m : setMingren) {
			// |m| > |w| 
			if (m.startsWith(w)) {
				return m;
			}
		}
		return null;
	}
	
	// 一些不能用词频解决的歧义问题: 发展中国家庭养猪->发展 中国 家庭 养猪
	private List<String> forSpecialCaseWords(List<String> ws) {
		List<String> words = new ArrayList<String>();
		int nLen = ws.size();
		for(int i=0; i<nLen; i++) {
			if (i == nLen-1) {
				words.add(ws.get(i));
				continue;
			}
			String w1old = ws.get(i);
			String w2old = ws.get(i+1);
			// 结合成分子
			if (w1old.equals("成分") && w2old.equals("子")) {
				words.add("成");
				words.add("分子");
				i++;
				continue;
			}
			// 北京大学 生前 来
			if (w1old.equals("北京大学") && w2old.equals("生前")) {
				if (i == nLen-2) {
					words.add("北京大学");
					words.add("生前");
					i++;
					continue;
				}
				String w3old = ws.get(i+2);
				if (w3old.equals("来")) {
					words.add("北京");
					words.add("大学生");
					words.add("前来");
					i = i+2;
					continue;
				}
				else {
					words.add("北京大学");
					continue;
				}
			}
			// 叠字词，如 好好的，轻轻地
			if (w1old.equals(w2old)) {
				if (i == nLen-2) {
					words.add(w1old+w2old);
					i++;
					continue;
				}
				String w3old = ws.get(i+2);
				if (w3old.length() == 1 && (w3old.equals("的") || w3old.equals("地"))) {
					words.add(w1old+w2old+w3old);
					i = i+2;
				}
				else {
					words.add(w1old+w2old);
					i++;
				}
				continue;
			}			
			// A一A 词，如 跳一跳，打一打
			if (w1old.length()==1 && w2old.equals("一")) {
				if (i == nLen-2) {
					words.add(w1old);
					words.add(w2old);
					i++;
					continue;
				}
				String w3old = ws.get(i+2);
				if (w3old.length() == 1 && w3old.equals(w1old)) {
					words.add(w1old+w2old+w3old);
				}
				else {
					words.add(w1old);
					words.add(w2old);
					words.add(w3old);
				}
				i = i+2;
				continue;
			}			
			//
			
			
			
			
			// 所有 case 都不符合，直接添加
			words.add(w1old);
		}		
		
		return words;
	}

    // 采用流（Stream）的方式来读取文件, 输出文件。
    public void rwTextFile(String rName, String wName, int wLen)   {
		List<String> lines = new ArrayList<String>();
		// 可以读取不同编码的文件：gb2312, utf-8
    	try {
	        if (rName == null || rName.length() == 0) {
	        	System.out.println("File Error: " + rName);
	            return;
	        }
	        String path = System.getProperty("user.dir") + "/resource/";
	        File file = new File(path+rName);
	        if(!file.exists()) { 
	        	System.out.println("File Error: " + rName);
	            return;
	        }
	        // 读取文件要先生成 读取流 BufferedReader
	        InputStream is = new FileInputStream(path+file);
	        InputStreamReader isr = new InputStreamReader(is);
	        BufferedReader br = new BufferedReader(isr);
	        
	        String line = "";
	        //StringBuilder sb = new StringBuilder();
	        while ((line = br.readLine()) != null) {
		        String[] ws = line.split("\\s+");
		        if( ws.length <2) {
		        	continue;
		        }
		        if( ws[1].length() != wLen) {
		        	continue;
		        }
	            lines.add(line);
	            System.out.println(line);
	        }
	        br.close();
	        isr.close();
	        is.close();
	
	        // 将读入的内容写入文件
	        String fileOut = path + wName;
	        // 输出文件要先生成流 Bufferedwriter
	        OutputStream os = new FileOutputStream(fileOut);
	        OutputStreamWriter osr = new OutputStreamWriter(os);
	        BufferedWriter bw = new BufferedWriter(osr);
	        // 遍历 HashMap, 输出 <key, value>
	        if(lines!=null && (!lines.isEmpty())) {
				for (String ln : lines) {
					bw.write(ln);
					System.out.println("Out: " + ln);
				}
		    }
			bw.close();
			osr.close();
			os.close();
    	}
    	catch(Exception e) {
    		e.printStackTrace();
    	}
    }

}
