package com.nlp.trie.posTagger;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.ArrayList;
import java.util.StringTokenizer;

/**
 * 词性标注类
 * @author ygsong.abcft
 *
 */
public class Tagger {
	
	private static Tagger dic = null;
	
	static final double minValue = -1000000000.0 / 2.0;
	
	public static Tagger getInstance() {
		if (dic == null) {
			dic = new Tagger();
		}
		return dic;
	}
	
	//定义语料库的转移概率
	private int[][] transFreq = new int[PartOfSpeech.names.length][PartOfSpeech.names.length];
	//每次词性的频次
	private int[] typeFreq = new int[PartOfSpeech.names.length];
	private int totalFreq;//所有词的总频次
	
	/**
	 * 
	 * @param curState  前一个词性
	 * @param toTranState 后一个词性
	 * @return
	 */
	public double getTransProb(byte curState, byte toTranState) {
		return Math.log(0.9 * transFreq[curState][toTranState]
				/ typeFreq[curState] + 0.1 * typeFreq[curState]/ totalFreq);
	}
	
	/**
	 * 加载语料库
	 */
	public Tagger() {
		BufferedReader read = null;
		try {
			URI uri = Tagger.class.getClass().getResource("/POSTransFreq.txt").toURI();
			InputStream file = new FileInputStream(new File(uri));
			read = new BufferedReader(new InputStreamReader(file, "UTF-8"));
			String line = null;
			while((line = read.readLine()) != null) {
				StringTokenizer st = new StringTokenizer(line, ":");
				
				int pre = PartOfSpeech.values.get(st.nextToken());
				int next = PartOfSpeech.values.get(st.nextToken());
				int frq = Integer.parseInt(st.nextToken());
				transFreq[pre][next] = frq;
				typeFreq[next] += frq;
				totalFreq += frq;
			}
		} catch (Exception e) {
			e.printStackTrace();
		}finally {
			if (read != null) {
				try {
					read.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}
	
	/**
	 * 标注估计词性(隐马尔科夫模型， 维特比算法)
	 * @param observations  要标注词性的结合
	 * @return  估计的标注类型序列
	 */
	public byte[] viterbi(ArrayList<WordTokenInf> observations) {
		//增加start和end节点
		WordTypes startType = new WordTypes(1);
		startType.insert(0, PartOfSpeech.start, 1);
		observations.add(0, new WordTokenInf(-1, 0, "Start", startType));
		
		WordTypes endType = new WordTypes(1);
		endType.insert(0, PartOfSpeech.end, 100);
		observations.add(new WordTokenInf(-1, 0, "End", endType));
		
		// 初始化各个节点的隐状态,每个节点都有WordTypes.values().length种隐状态
		// 形成了一个row=stageLength,line=WordTypes.values().length的二维数组
		int stageLength = observations.size();
		double[][] prob = new double[stageLength][PartOfSpeech.names.length];
		for (int i = 0; i < stageLength; ++i) {
			for (int j = 0;  j< PartOfSpeech.names.length; ++j) {
				prob[i][j] = minValue;
			}
		}
		
		//在隐马尔科夫模型中，每个隐状态都有一个最佳前驱，
		//此二维数组用来存储每个状态的最佳前驱
		byte[][] bestPre = new byte[stageLength][PartOfSpeech.names.length];
		for (int i = 0; i < stageLength; ++i) {
			for (int j = 0; j < PartOfSpeech.names.length; ++j) {
				bestPre[i][j] = PartOfSpeech.unknow;
			}
		}
		
		prob[0][PartOfSpeech.start] = 0;
		
		//viterbi算法计算最佳前驱
		
		//遍历每一个观察值
		for (int stage = 1; stage < stageLength; stage++) {
			WordTokenInf nextInf = observations.get(stage);
			if (nextInf.data == null) {
				continue;
			}
			
			//比那里当前观察值所对应的每个状态(词性)
			for (int nextIndex = 0; nextIndex < nextInf.data.keys.length; ++nextIndex) {
				WordTokenInf preInf = observations.get(stage-1);
				if (preInf.data == null) {
					continue;
				}
				
				//发射概率 = 中国作为名词出现的概率 / 名词在语料库中出现的概率(正确)
				//发射概率 = 中国作为名词出现的概率 / 中国出现的频率(错误)
				
				byte nextPOS = nextInf.data.keys[nextIndex].pos;
				double weitht = nextInf.data.vals[nextIndex];
				if (weitht == 0d) {
					weitht = 0.00001;
				}
				
				//转移概率
				double emiprob = Math.log(weitht / typeFreq[nextPOS]);
				
				//遍历能够到达当前观察值  当前状态上一时刻的每一个状态
				for (int prevIndex = 0; prevIndex < preInf.data.keys.length; ++prevIndex) {
					byte prevPOS = preInf.data.keys[prevIndex].pos;
					//转移概率的对数
					double transprob = this.getTransProb(prevPOS, nextPOS);
					//前驱最佳概率
					double preProb = prob[stage-1][prevPOS];
					
					System.out.println("transprob " + transprob +" preProb"+preProb +" emiprob"+emiprob);
					//log(前驱最佳概率) + log(发射概率) + log(转移概率)
					double currentprob = preProb + transprob + emiprob;
					
					System.out.println("currentprob " + currentprob + " " +
					 prob[stage][nextPOS]);
					
					//计算最佳前驱
					if (prob[stage][nextPOS] < currentprob) {
						prob[stage][nextPOS] = currentprob;
						bestPre[stage][nextPOS] = prevPOS;
					}
				}
			}
		}
		
		//逆向求解路径
		byte currentTag = PartOfSpeech.end;
		//存放最佳词性标注序列结果
		byte[] bestTag = new byte[stageLength];
		for (int i = (stageLength - 1); i > 1; i--) {
			//最佳前驱节点对应的词性
			bestTag[i-1] = bestPre[i][currentTag];
			currentTag = bestTag[i - 1];
		}
		
		//构造返回结果
		byte[] resultTag = new byte[stageLength-2];
		System.arraycopy(bestTag,  1, resultTag, 0, resultTag.length);
		
		//将ret之前添加的start和end节点去除
		observations.remove(stageLength-1);
		observations.remove(0);
		return resultTag;
	}

}
