/**
 * AbstractTokenizer.java
 *
 * @author ZhuJiahui1991
 * @date 2018年1月31日
 */
package com.zhujiahui.nlp.core.tokenizer;

import java.util.ArrayList;
import java.util.List;

import org.apache.commons.lang3.StringUtils;

import com.hankcs.hanlp.seg.common.Term;
import com.zhujiahui.nlp.core.domain.IndexToken;
import com.zhujiahui.nlp.core.domain.Token;

/**
 * @description
 * 
 * @author ZhuJiahui1991
 * @date 2018年1月31日
 * 
 */
public abstract class AbstractTokenizer {
	
	// 是否进行部分字符的合并
	public boolean isSymbolMerged = false;
	
	// 是否进行标点符号的过滤
	public boolean isPunctuationFiltered = false;

	public String regex = "^[a-z0-9A-Z\\-]+$";

	/**
	 * 默认构造函数
	 */
	public AbstractTokenizer() {

	}

	public abstract List<Token> segment(String sentence);

	public abstract List<IndexToken> segmentWithIndex(String sentence);

	/**
	 * 不合并方式下的分词后处理
	 * 
	 * @param termList HanLP termList
	 * @return List<Token>
	 */
	public List<Token> postSegment(List<Term> termList) {

		Token modelToken = new Token(Token.DEFAULT_WORD, Token.DEFAULT_POS);
		List<Token> segmentList = new ArrayList<Token>();
		String thisWord = null;
		String thisPos = null;

		if (isPunctuationFiltered) {
			for (Term term : termList) {
				thisWord = term.word;
				thisPos = term.nature.toString();

				if (StringUtils.trim(thisWord).length() > 0
						&& !thisPos.equals("w")) {
					Token thisToken = (Token) modelToken.clone();
					thisToken.setWord(thisWord);
					thisToken.setPos(thisPos);
					segmentList.add(thisToken);
				}
			}
		} else {
			for (Term term : termList) {
				thisWord = term.word;
				thisPos = term.nature.toString();
				Token thisToken = (Token) modelToken.clone();
				thisToken.setWord(thisWord);
				thisToken.setPos(thisPos);
				segmentList.add(thisToken);
			}
		}

		return segmentList;
	}

	/**
	 * 合并方式下的分词后处理
	 * 
	 * @param termList HanLP termList
	 * @return List<Token>
	 */
	public List<Token> postMergeSegment(List<Term> termList) {

		// 当前的词汇和词性
		String thisWord = null;
		String thisPos = null;

		// 上一个词汇和词性
		String lastWord = null;
		String lastPos = null;

		// 是否处于合并状态
		boolean isMerged = false;
		int startIndex = 0;

		List<Token> segmentList = new ArrayList<Token>();
		Token modelToken = new Token(Token.DEFAULT_WORD, Token.DEFAULT_POS);
		StringBuilder sb = new StringBuilder();

		while (startIndex < termList.size()) {
			
			thisWord = termList.get(startIndex).word;
			thisPos = termList.get(startIndex).nature.toString();

			if (null == lastWord) {
				sb.append(thisWord);
				lastWord = thisWord;
				lastPos = thisPos;
				++startIndex;
				continue;
			}

			if (thisWord.equals("-")) {
				// 若为横线则直接添加
				if (sb.length() > 0) {
					isMerged = true;
				}
				sb.append(thisWord);
			} else if (thisWord.matches(regex)) {
				// 若为字母数字横线组合
				if (sb.length() > 0) {
					// 缓冲区不为空
					if (lastWord.matches(regex) && (sb.length() > 0)) {
						// 若上一个单词为字母数字横线组合
						sb.append(thisWord);
						isMerged = true;
					} else {
						// 若上一个单词不是字母数字横线组合
						Token tempToken = this.mergeToken(modelToken, lastPos,
								sb, isMerged);
						segmentList.add(tempToken);
						sb.delete(0, sb.length());
						isMerged = false;
						sb.append(thisWord);
					}
				} else {
					// 缓冲区为空
					sb.append(thisWord);
					isMerged = false;
				}
			} else if (StringUtils.trim(thisWord).length() < 1
					&& thisPos.equals("w")) {
				// 若为空格
				if (sb.length() > 0) {
					Token tempToken = this.mergeToken(modelToken, lastPos, sb,
							isMerged);
					segmentList.add(tempToken);
					sb.delete(0, sb.length());
				}

				isMerged = false;

				if (!this.isPunctuationFiltered) {
					segmentList.add(new Token(thisWord, "w"));
				}
			} else {
				// 若为中文
				if (sb.length() > 0) {
					// 缓冲区不为空
					if (sb.substring(sb.length() - 1).equals("-")) {
						// 若上一个词为-
						sb.append(thisWord);
						isMerged = true;
					} else {
						// 若上一个词不是-
						// 则直接将缓冲区添加到列表中
						Token tempToken = this.mergeToken(modelToken, lastPos,
								sb, isMerged);
						segmentList.add(tempToken);
						sb.delete(0, sb.length());
						sb.append(thisWord);
						isMerged = false;
					}
				} else {
					// 缓冲区为空
					sb.append(thisWord);
					isMerged = false;
				}
			}
			lastWord = thisWord;
			lastPos = thisPos;
			++startIndex;
		}

		if (sb.length() > 0) {
			Token tempToken = this.mergeToken(modelToken, lastPos, sb,
					isMerged);
			segmentList.add(tempToken);
		}

		return segmentList;
	}

	/**
	 * 将缓冲区的结果合并为一个新的Token
	 * 
	 * @param modelToken
	 *            Token克隆模型
	 * @param lastPos
	 *            上一个词的词性
	 * @param sb
	 *            缓冲区
	 * @param isMerged
	 *            是否处于合并状态
	 * @return 合并后的Token
	 */
	private Token mergeToken(Token modelToken, String lastPos, StringBuilder sb,
			boolean isMerged) {

		Token tempToken = (Token) modelToken.clone();
		tempToken.setWord(sb.toString());
		if (isMerged) {
			tempToken.setPos("nz");
		} else {
			tempToken.setPos(lastPos);
		}

		return tempToken;
	}

	/**
	 * @return the isSymbolMerged
	 */
	public boolean isSymbolMerged() {
		return isSymbolMerged;
	}

	/**
	 * @param isSymbolMerged
	 *            the isSymbolMerged to set
	 */
	public void setSymbolMerged(boolean isSymbolMerged) {
		this.isSymbolMerged = isSymbolMerged;
	}

	/**
	 * @return the isPunctuationFiltered
	 */
	public boolean isPunctuationFiltered() {
		return isPunctuationFiltered;
	}

	/**
	 * @param isPunctuationFiltered
	 *            the isPunctuationFiltered to set
	 */
	public void setPunctuationFiltered(boolean isPunctuationFiltered) {
		this.isPunctuationFiltered = isPunctuationFiltered;
	}

}
