package sharewithus.analyzer.seg;

import gnu.trove.list.linked.TLinkedList;

import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.List;


import sharewithus.analyzer.Bigram;
import sharewithus.analyzer.dic.DicWord;
import sharewithus.analyzer.utils.MatchRet;

/**
 * 二元分词器
 * 
 * @author terry
 * 
 */
public class BigramSegmenter extends AbstractSegmenter implements SUSegmenter {

	private static final double MIN_PROB = Double.NEGATIVE_INFINITY / 2;
	private static final double LAMBDA1 = 0.3;
	private static final double LAMBDA2 = 0.7;

	@Override
	protected List<SUToken> doSeg(String txt) {
		List<SUToken> tokens = new ArrayList<SUToken>();
		Deque<Integer> deque = new ArrayDeque<Integer>();
		int len = txt.length() + 1;
		int[] prevNode = new int[len];// 最佳前驱节点数组
		double[] probs = new double[len]; // 节点概率
		DicWord[] prevWords = new DicWord[len];
		prevWords[0] = DicWord.START_WORD;
		probs[0] = 0;// 节点0的初始概率是1,取log后是0

		List<MatchRet<DicWord>> ret = new TLinkedList<MatchRet<DicWord>>();
		for (int i = 1; i < len; i++) {
			double maxProb = MIN_PROB;
			int maxPrev = -1;
			DicWord bestPrevWord = null;
			dic.matchAll(txt, i - 1, ret);// 到词典中查询
			for (MatchRet<DicWord> m1 : ret) {
				DicWord w1 = m1.getData();
				if (w1.getTerm() == null) {
					w1.setTerm(m1.getTerm());
				}

				int start = i - w1.getTerm().length();
				DicWord w2 = prevWords[start];
				double wordProb = 0;
				int f2 = 0;
				int bigramFreq = getBigramFreq(w2, w1);// 从二元词典找二元频率
				if (w2 != null)
					f2 = w2.getCost();
				wordProb = LAMBDA1 * (w1.getCost() == 0 ? 1 : w1.getCost())
						/ dic.totalFreq() + LAMBDA2
						* (f2 == 0 ? 0 : (bigramFreq / f2));// 平滑后的二元概率
				// 节点概率对应的基本公式是：P(t1.start)*P(t1|t2)
				double nodeProb = probs[start] + (Math.log(wordProb));
				
				if (nodeProb > maxProb) {// 概率最大的算作最佳前趋
					maxPrev = start;
					maxProb = nodeProb;
					bestPrevWord = w1;
				}
			}
			probs[i] = maxProb; // 记录节点i的概率
			prevNode[i] = maxPrev; // 记录节点i的最佳前趋节点
			prevWords[i] = bestPrevWord; // 节点i的最佳前驱词
		}
		
		for (int i = prevNode.length - 1; i > 0; i = prevNode[i])
			// 从后向前回朔最佳前驱节点
			deque.push(i);
		int start = 0;
		ret.clear();
		for (Integer end : deque) {
			// System.out.println(start);
			String sub = txt.substring(start, end);
			// list.add(sub);
			DicWord word = prevWords[end];
			if (word == null) {
				word = DicWord.getUnknowWord();
				word.setTerm(sub);
			}
			// System.out.println(word);
			tokens.add(new SUToken(word.getTerm(), word.getPosInfs(), start,
					end));
			//System.out.println(sub + " /" + start + "," + end);
			start = end;
		}
		return tokens;
	}

	private int getBigramFreq(DicWord prev, DicWord next) {
		if (prev == null || next == null)
			return 0;
		int f = dic.getBigramFreq(Bigram.getKey(prev.getId(), next.getId()));
		return f;
	}

	public static void main(String[] args) {
		SUSegmenter seg = new BigramSegmenter();
		String txt[] = { 
				"总统有意见他",
				"他不了解手的作用",
				"张三说的确实在理",
				"小便当汤,大便当饭,我爱便当"
		};
		for (String s : txt) {
			List<SUToken> ret = seg.segment(s);
			System.out.println("原文:" + s);
			System.out.println("---------------------------------" );
			for (SUToken token : ret) {
				System.out.println(token.getStart() + "-" + token.getEnd() + " : "
						+ token.getTerm() + " : " + token.getTaggedPOS());
			}
			System.out.println();
		}
	}
}
