package runnable;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.Random;

import model.ConvolutionLayer;
import model.LinearLayer;
import model.LookupTableLayer;
import model.MaxFuncLayer;
import model.NonlinearLayer;

import org.jblas.DoubleMatrix;

import util.Params;
import util.RandomMatrix;
import util.SentenceIterator;
import adt.PredPos;
import adt.Sentence;

/**
 * The main entrance of the program.
 * 
 * 程序主入口。
 * 
 * @author Tianyu Xu
 * 
 */
public class Main {
	// global components
	// 全局组件
	public static Hashtable<Character, DoubleMatrix> wordDict = null;
	public static Hashtable<Integer, DoubleMatrix> posDict = null;
	private static SentenceIterator iterator = null;

	// Neutral Network layers
	// 各层神经网络
	/**
	 * Lookup Table Layer
	 */
	private static LookupTableLayer layer1 = null;
	/**
	 * Convolution Layer (input -> convolution, weightIC)
	 */
	private static ConvolutionLayer layer2 = null;
	/**
	 * Max Function Layer
	 */
	private static MaxFuncLayer layer3 = null;
	/**
	 * Linear Layer1 (convolution -> linear, combineOutput, weightCH)
	 */
	private static LinearLayer layer4 = null;
	/**
	 * Nonlinear Layer (linear -> nonlinear, nonLinearOutput)
	 */
	private static NonlinearLayer layer5 = null;
	/**
	 * Linear Layer2 (nonlinear -> linear, weightHO, biasOutput)
	 */
	private static LinearLayer layer6 = null;

	// 统计量
	private static double totalCorrectCnt = 0;
	private static double totalCnt = 0;
	private static double iterationCorrectCnt = 0;
	private static double iterationCnt = 1;

	// Viterbi算法参数
	private static DoubleMatrix A = null;
	private static DoubleMatrix Pi = null;

	/**
	 * Initialize all the global components
	 * 
	 * 初始化各全局对象
	 * 
	 * @param args
	 *            命令行中读入的参数列表
	 */
	private static void initialize(String[] args) {
		// DEBUG 重定向标准输出
		try {
			PrintStream out = new PrintStream(File.createTempFile("run", ".out", new File("./output")));
			System.setOut(out);
		} catch (IOException e1) {
			e1.printStackTrace();
		}

		// read parameters from console
		// 从命令行读取输入相关的参数
		if (args.length < 2) {
			System.exit(1);
		}
		if (args.length == 3) {
			RandomMatrix.setSeed(Long.parseLong(args[2]));
		} else {
			RandomMatrix.setSeed(new Random().nextLong());
		}
		Params.CHAR_LIST_PATH = args[0];
		Params.TRAINING_FILE_LIST_PATH = args[1];

		// initialize the word dictionary
		// 初始化字符字典
		wordDict = new Hashtable<Character, DoubleMatrix>();
		// read in all the characters
		// 从文件中读入字符集
		try {
			BufferedReader r = new BufferedReader(new InputStreamReader(new FileInputStream(Params.CHAR_LIST_PATH), "UTF8"));
			String s = r.readLine();
			while (s != null && !s.equals("")) {
				// generate random feature vectors
				// 生成随机特征向量
				wordDict.put(s.charAt(0), RandomMatrix.rand(Params.WORD_FEATURE_SIZE));
				s = r.readLine();
			}
			r.close();
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}

		// initialize the POS-tag dictionary
		// 初始化POS-tag字典
		posDict = new Hashtable<Integer, DoubleMatrix>();
		for (int i = 0; i < Params.POS_TAG_NUM; i++) {
			posDict.put(i, RandomMatrix.rand(Params.POS_FEATURE_SIZE));
		}

		// initialize the sentence iterator
		// 初始化训练句子迭代器
		iterator = new SentenceIterator(Params.TRAINING_FILE_LIST_PATH);

		// initialize the network layers
		// 初始化各层神经网络
		layer1 = new LookupTableLayer();
		layer2 = new ConvolutionLayer();
		layer3 = new MaxFuncLayer();
		layer4 = new LinearLayer(Params.LINEAR_HIDDEN_LAYER_SIZE, Params.CONVOLITION_LAYER_SIZE);
		layer5 = new NonlinearLayer();
		layer6 = new LinearLayer(Params.SRL_TAG_SIZE, Params.LINEAR_HIDDEN_LAYER_SIZE);

		// initialize the Viterbi parameters
		// 初始化维特比算法参数
		A = RandomMatrix.rand(Params.SRL_TAG_SIZE, Params.SRL_TAG_SIZE);
		Pi = RandomMatrix.rand(Params.SRL_TAG_SIZE);

		// DEBUG 查看 Viterbi 算法初始参数
		// System.out.println(A);
		// System.out.println(Pi);
	}

	/**
	 * The main() method of the program.
	 * 
	 * 程序的主方法。
	 * 
	 * @param args
	 *            命令行中读入的参数列表
	 */
	public static void main(String[] args) {
		initialize(args);

		int iteration = 0;
		// 训练停止条件
		// while (iteration++ < Params.MAX_ITERATION) {
		while (iterationCorrectCnt / iterationCnt < 0.90) {
			iteration++;
			iterationCnt = 0;
			iterationCorrectCnt = 0;

			boolean debug = true;

			Sentence sentence;
			// TimeCounter timer = new TimeCounter(
			// "#### Iterating all the sentences ...");

			// for each sentence
			// 遍历每个句子
			while (iterator.hasMoreElements()) {
				sentence = iterator.nextElement();
				// TimeCounter sentenceTimer = new
				// TimeCounter(sentence.toString());
				int leftIndex = Params.PADDING_SIZE;
				int rightIndex = sentence.length() - Params.PADDING_SIZE;

				// if there is no predicate, continue
				// 如果没有谓词，跳过本句
				PredPos[] predPosList = sentence.getPredPosList();
				if (predPosList == null)
					continue;

				// for each predicate in the sentence
				// 对句子中的每个谓词
				for (int predIndex = 0; predIndex < predPosList.length; predIndex++) {
					// add total word count
					// 添加总字数统计
					int length = sentence.lengthWithoutPaddings();
					totalCnt += length;
					iterationCnt += length;

					// 得到谓词的首末位置
					PredPos predPos = predPosList[predIndex];

					// 重置最大层，开始记录取到最大值时的索引
					layer3.startNewSentence(length);

					// define a output matrix
					// 定义句子对于该谓词字符的特征矩阵
					DoubleMatrix output = new DoubleMatrix(Params.SRL_TAG_SIZE, length);
					// 训练时使用的保存神经网络各层输出的列表
					ArrayList<DoubleMatrix> convolutionOutputList = new ArrayList<DoubleMatrix>(length);
					ArrayList<DoubleMatrix> maxOutputList = new ArrayList<DoubleMatrix>(length);
					ArrayList<DoubleMatrix> hiddenOutputList = new ArrayList<DoubleMatrix>(length);
					ArrayList<DoubleMatrix> nonlinearOutputList = new ArrayList<DoubleMatrix>(length);

					// get the semi-finished feature matrix
					// 获得半成品特征矩阵
					DoubleMatrix inputMatrix = layer1.getSemiFinishedFeatureMatrix(sentence);

					// 设置句子中每个字关于谓词的位移
					setPredOffset(inputMatrix, predPos);

					// for each word construct network output matrix
					// 对于句子中的每个字，构造神经网络输出矩阵
					for (int pivot = leftIndex; pivot < rightIndex; pivot++) {
						int wordIndex = pivot - Params.PADDING_SIZE;

						// set the "involved bit"
						// 设置involved位
						int LAST_BIT = Params.FEATURE_VECTOR_SIZE - 1;
						for (int i = 0; i < sentence.length(); i++) {
							inputMatrix.put(LAST_BIT, i, i - pivot);
						}

						// feed forwards
						// 前馈传播
						DoubleMatrix convolutionOutput = layer2.performConvolutionTrans(inputMatrix);
						DoubleMatrix maxOutput = layer3.performMaxTrans(convolutionOutput, wordIndex);
						DoubleMatrix hiddenOutput = layer4.performLinearTrans(maxOutput);
						DoubleMatrix nonlinearOutput = layer5.performNonlinearTrans(hiddenOutput);
						DoubleMatrix finalOutput = layer6.performLinearTrans(nonlinearOutput);

						// fill the output matrix
						// 填入输出矩阵的第 wordIndex 列
						output.putColumn(wordIndex, finalOutput);

						// save the weights for training
						// 保存网络各层权重用于训练
						convolutionOutputList.add(convolutionOutput);
						maxOutputList.add(maxOutput);
						hiddenOutputList.add(hiddenOutput);
						nonlinearOutputList.add(nonlinearOutput);
					}

					// 使用维特比算法得到句子关于此谓词的输出标签
					int[] tags = getTagsByViterbi(sentence, output);
					int[] correctTags = getCorrectTags(sentence, predIndex);

					// 统计各项参数
					for (int i = 0; i < tags.length; i++) {
						if (tags[i] == correctTags[i]) {
							totalCorrectCnt++;
							iterationCorrectCnt++;
						}
					}

					// DEBUG 每50次迭代输出一次句子信息
					if (debug && (iteration < 10 || iteration % 50 == 0)) {
						debug = false;

						System.out.printf("\n%s\n", sentence.toStrWithoutPaddings());
						for (int i = 0; i < tags.length; i++)
							System.out.printf("%d\t", tags[i]);
						System.out.println();
						for (int i = 0; i < tags.length; i++)
							System.out.printf("%d\t", correctTags[i]);
						System.out.printf("\n\n");
						for (int r = 0; r < output.rows; r++) {
							for (int c = 0; c < output.columns; c++) {
								System.out.printf("%7.2f ", output.get(r, c));
							}
							System.out.println();
						}

						System.out.printf("input matrix: %s\n", inputMatrix);
						System.out.printf("convolution output 0: %s\n", convolutionOutputList.get(0));
						System.out.printf("convolution output 1: %s\n", convolutionOutputList.get(1));
						System.out.printf("max output 0: %s\n", maxOutputList.get(0));
						System.out.printf("max output 1: %s\n\n\n", maxOutputList.get(1));
					}

					// 训练Viterbi参数
					trainViterbi(tags, correctTags);

					// 训练神经网络
					trainNetwork(sentence, tags, correctTags, output, nonlinearOutputList, hiddenOutputList, maxOutputList, inputMatrix);
				}
				// sentenceTimer.stop("The sentence is processed in %s.\n");
			}
			// timer.stop("#### All the sentences processed in %s.\n");

			System.out.printf("ITERATION: %d\tTOTAL_CORRECT: %d\tTOTAL: %d\tCURR_CORRECT: %d\tCURR: %d\tCURR_RATE: %f\n", iteration, (int) totalCorrectCnt, (int) totalCnt,
					(int) iterationCorrectCnt, (int) iterationCnt, iterationCorrectCnt / iterationCnt);

			// DEBUG 查看新的 Viterbi 算法参数
			// System.out.println(Pi);
			// System.out.println(A);

			// reset the sentence iterator
			// 重置句子迭代器
			iterator.resetPointer();
		}
	}

	/**
	 * 设置输入矩阵中每个字关于某谓词的相对位置
	 * 
	 * @param inputMatrix
	 *            要设置的输入矩阵
	 * @param predPos
	 *            中心谓词起止位置
	 */
	private static void setPredOffset(DoubleMatrix inputMatrix, PredPos predPos) {
		for (int c = 0; c < inputMatrix.columns; c++) {
			if (c < predPos.start()) {
				inputMatrix.put(Params.FEATURE_VECTOR_SIZE - 2, c, c - predPos.start());
			} else if (c > predPos.end()) {
				inputMatrix.put(Params.FEATURE_VECTOR_SIZE - 2, c, c - predPos.end());
			}
		}
	}

	/**
	 * 获得某个句子中的所有字符关于第 predIndex 个谓词的语义标签
	 * 
	 * @param sentence
	 *            要标注的句子
	 * @param predIndex
	 *            句子的谓词编号
	 * @return 句子中的每个字符关于第 predIndex 个谓词的语义标签，以索引数组形式给出
	 */
	private static int[] getCorrectTags(Sentence sentence, int predIndex) {
		int length = sentence.lengthWithoutPaddings();
		int[] result = new int[length];

		for (int i = 0; i < length; i++) {
			result[i] = sentence.wordAt(i + Params.PADDING_SIZE).getSrlTagList().get(predIndex) - Params.POS_TAG_NUM;
		}
		return result;
	}

	/**
	 * 训练维特比算法
	 * 
	 * @param tags
	 *            算法给出的语义标签索引序列
	 * @param correctTags
	 *            正确的语义标签索引序列
	 */
	private static void trainViterbi(int[] tags, int[] correctTags) {
		// 调整初始矩阵
		if (tags[0] != correctTags[0]) {
			Pi.put(tags[0], Pi.get(tags[0]) - Params.VITERBI_LEARNING_RATE);
			Pi.put(correctTags[0], Pi.get(correctTags[0]) + Params.VITERBI_LEARNING_RATE);
		}
		// 调整转移矩阵
		for (int i = 1; i < tags.length; i++) {
			if (correctTags[i] != tags[i] || correctTags[i - 1] != tags[i - 1]) {
				A.put(tags[i - 1], tags[i], A.get(tags[i - 1], tags[i]) - Params.VITERBI_LEARNING_RATE);
				A.put(correctTags[i - 1], correctTags[i], A.get(correctTags[i - 1], correctTags[i]) + Params.VITERBI_LEARNING_RATE);
			}
		}
	}

	/**
	 * 训练神经网络
	 * 
	 * @param s
	 *            要训练的句子
	 * @param tags
	 *            算法给出的语义标签索引序列
	 * @param correctTags
	 *            正确的语义标签索引序列
	 * @param finalOutput
	 *            最终的网络输出
	 * @param nonlinearOutput
	 *            非线性层输出列表
	 * @param hiddenOutput
	 *            隐藏层输出列表
	 * @param maxOutput
	 *            最大层输出列表
	 * @param inputMatrix
	 *            输入矩阵
	 */
	@SuppressWarnings("unused")
	private static void trainNetwork(Sentence s, int[] tags, int[] correctTags, DoubleMatrix finalOutput, ArrayList<DoubleMatrix> nonlinearOutput,
			ArrayList<DoubleMatrix> hiddenOutput, ArrayList<DoubleMatrix> maxOutput, DoubleMatrix inputMatrix) {
		// 获取要调整的网络参数
		DoubleMatrix weightHO = layer6.getW();
		DoubleMatrix biasOutput = layer6.getBias();
		DoubleMatrix weightCH = layer4.getW();
		DoubleMatrix biasHidden = layer4.getBias();
		DoubleMatrix weightIC = layer2.getW();
		DoubleMatrix biasConvolution = layer2.getBias();

		// 获取句子长度（不含左右 paddings）
		int length = s.lengthWithoutPaddings();
		assert length == tags.length;

		// 判断是否有错误的标签
		boolean isWrong = false;
		for (int i = 0; i < length; i++) {
			if (tags[i] != correctTags[i]) {
				isWrong = true;
				break;
			}
		}

		// if the output tag is different from the correct tag
		// 如果网络的输出标签和正确标签有误，调整权重
		if (isWrong) {
			double[][] derivOutput = new double[length][Params.SRL_TAG_SIZE];

			// set the partial derivative of the output layer (finalOutput,
			// layer6)
			// 设置输出层偏导
			for (int pos = 0; pos < length; pos++) {
				if (tags[pos] != correctTags[pos]) {
					derivOutput[pos][tags[pos]] = 1;
					derivOutput[pos][correctTags[pos]] = -1;
				}
			}

			// compute the partial derivative of weight from nonlinear to output
			// (layer 6)
			// 计算非线性层到输出层权重矩阵偏导
			double[][] derivWeightHO = new double[Params.SRL_TAG_SIZE][Params.LINEAR_HIDDEN_LAYER_SIZE];
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
					for (int pos = 0; pos < length; pos++) {
						derivWeightHO[t][h] += derivOutput[pos][t] * nonlinearOutput.get(pos).get(h);
					}
				}
			}

			// compute the partial derivative of the bias of output layer
			// 计算输出层bias偏导
			double[] derivBiasOutput = new double[Params.SRL_TAG_SIZE];
			for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
				for (int pos = 0; pos < length; pos++) {
					derivBiasOutput[t] += derivOutput[pos][t];
				}
			}

			// update the weightHO matrix (layer6)
			// 更新隐藏层到输出层权重矩阵
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
					weightHO.put(t, h, getGoodValue(weightHO.get(t, h) - Params.LEARNING_RATE * derivWeightHO[t][h]));
				}
			}

			// update the output layer bias vector (layer 6)
			// 更新输出层bias
			for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
				biasOutput.put(t, getGoodValue(biasOutput.get(t) - Params.LEARNING_RATE * derivBiasOutput[t]));
			}

			// compute the partial derivative of the nonlinear layer
			// (nonLinearOutput, layer5)
			// 计算非线性层偏导
			double[][] derivNonlinear = new double[length][Params.LINEAR_HIDDEN_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
					for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
						derivNonlinear[pos][h] += derivOutput[pos][t] * weightHO.get(t, h);
					}
				}
			}

			// compute the partial derivative of the hidden layer (hiddenOutput,
			// layer4)
			// 计算隐藏层的偏导
			double[][] derivHidden = new double[length][Params.LINEAR_HIDDEN_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
					if (Params.NONLINEAR_FUNC_TYPE == Params.SIGMOID) {
						double fx = hiddenOutput.get(pos).get(h);
						derivHidden[pos][h] = derivNonlinear[pos][h] * NonlinearLayer.sigmoid(fx) * (1 - NonlinearLayer.sigmoid(fx));
					} else {
						// UNCHECK
						if ((derivNonlinear[pos][h] <= 1) && (derivNonlinear[pos][h] >= -1)) {
							derivHidden[pos][h] = derivNonlinear[pos][h];
						} else {
							derivHidden[pos][h] = 0.0d;
						}
					}
				}
			}

			// compute the partial derivative of weight from convolution to
			// hidden (layer4)
			// 计算卷积层到隐藏层权重偏导
			double[][] derivWeightCH = new double[Params.LINEAR_HIDDEN_LAYER_SIZE][Params.CONVOLITION_LAYER_SIZE];
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					for (int pos = 0; pos < length; pos++) {
						derivWeightCH[h][c] = derivHidden[pos][h] * maxOutput.get(pos).get(c);
					}
				}
			}

			// compute the partial derivative of the bias of hidden layer
			// 计算隐藏层bias偏导
			double[] derivBiasHidden = new double[Params.LINEAR_HIDDEN_LAYER_SIZE];
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				for (int pos = 0; pos < length; pos++) {
					derivBiasHidden[h] += derivHidden[pos][h];
				}
			}

			// update the weightCH matrix (layer4)
			// 更新卷积层到隐藏层权重矩阵
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					weightCH.put(h, c, getGoodValue(weightCH.get(h, c) - Params.LEARNING_RATE * derivWeightCH[h][c]));
				}
			}

			// update the hidden layer bias vector (layer4)
			// 更新隐藏层bias
			for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
				biasHidden.put(h, getGoodValue(biasHidden.get(h) - Params.LEARNING_RATE * derivBiasHidden[h]));
			}

			// TODO UNCHECK
			// compute the partial derivative of the max layer (layer3)
			// 计算最大层偏导
			double[][] derivMax = new double[length][Params.CONVOLITION_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					for (int h = 0; h < Params.LINEAR_HIDDEN_LAYER_SIZE; h++) {
						derivMax[pos][c] += derivHidden[pos][h] * weightCH.get(h, c);
					}
				}
			}

			// TODO UNCHECK
			// compute the partial derivative of the convolution layer
			// 计算卷积层偏导
			double[][] derivConvolution = new double[length][Params.CONVOLITION_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				int[] maxIndexArr = MaxFuncLayer.getMaxIndex().get(pos);
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					derivConvolution[maxIndexArr[c]][c] += derivMax[pos][c];
				}
			}

			// TODO UNCHECK
			// compute the partial derivative of weight from input to
			// convolution
			// 计算输入层到卷积层权重偏导
			double[][] derivWeightIC = new double[Params.CONVOLITION_LAYER_SIZE][Params.WINDOW_SIZE * Params.FEATURE_VECTOR_SIZE];
			for (int p = 0; p < length; p++) {
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					for (int w = 0; w < Params.WINDOW_SIZE; w++) {
						for (int d = 0; d < Params.FEATURE_VECTOR_SIZE; d++) {
							derivWeightIC[c][d + w * Params.FEATURE_VECTOR_SIZE] += derivConvolution[p][c] * inputMatrix.get(d, p + w);
						}
					}
				}
			}

			// compute the partial derivative of the bias of convolution layer
			// 计算卷积层bias
			double[] derivBiasConvolution = new double[Params.CONVOLITION_LAYER_SIZE];
			for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
				for (int p = 0; p < length; p++) {
					derivBiasConvolution[c] += derivConvolution[p][c];
				}
			}

			// update the weightIC matrix
			// 更新输入层到卷积层权重偏导weightIC
			for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
				for (int w = 0; w < Params.WINDOW_SIZE; w++) {
					for (int d = 0; d < Params.FEATURE_VECTOR_SIZE; d++) {
						int index = d + w * Params.FEATURE_VECTOR_SIZE;
						weightIC.put(c, index, getGoodValue(weightIC.get(c, index) - Params.LEARNING_RATE * derivWeightIC[c][index]));
					}
				}
			}

			// update the convolution layer bias vector
			// 更新卷积层bias
			for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
				biasConvolution.put(c, getGoodValue(biasConvolution.get(c) - Params.LEARNING_RATE * derivBiasConvolution[c]));
			}

			// compute the partial derivative of the input layer
			// 计算输入层偏导
			double[][][] derivInput = new double[length][Params.WINDOW_SIZE][Params.TRAINABLE_VECTOR_SIZE];
			for (int p = 0; p < length; p++) {
				for (int c = 0; c < Params.CONVOLITION_LAYER_SIZE; c++) {
					for (int w = 0; w < Params.WINDOW_SIZE; w++) {
						for (int d = 0; d < Params.TRAINABLE_VECTOR_SIZE; d++) {
							derivInput[p][w][d] += derivConvolution[p][c] * weightIC.get(c, d + w * Params.FEATURE_VECTOR_SIZE);
						}
					}
				}
			}

			// update the feature matrix
			// 更新特征矩阵
			for (int p = 0; p < length; p++) {
				for (int w = 0; w < Params.WINDOW_SIZE; w++) {
					for (int d = 0; d < Params.TRAINABLE_VECTOR_SIZE; d++) {
						double newValue = getGoodValue(inputMatrix.get(d, p + w) - Params.LEARNING_RATE * derivInput[p][w][d]);
						// inputMatrix.put(d, p + w, newValue);
						// if should update the word feature lookup table
						// 如果需要更新字符特征表
						if (d < Params.WORD_FEATURE_SIZE) {
							wordDict.get(s.wordAt(w + p).getCh()).put(d, newValue);
						}
						// if should update the POS feature lookup table
						// 如果需要更新POS标签特征表
						else {
							posDict.get(s.wordAt(w + p).getPosTagNum()).put(d - Params.WORD_FEATURE_SIZE, newValue);
						}
					}
				}
			}
		}
	}

	/**
	 * 获取调整范围后的 double 值，防止出现无穷情况
	 * 
	 * @param v
	 *            要调整的 double 值
	 * @return 调整后的 double 值
	 */
	private static double getGoodValue(double v) {
		if (v >= Params.VALUE_MAX) {
			return Params.VALUE_MAX;
		} else if (v <= -Params.VALUE_MAX) {
			return -Params.VALUE_MAX;
		} else {
			return v;
		}
	}

	/**
	 * According to the network output matrix, print out the calculated tags of
	 * each characters in the given sentence.
	 * 
	 * 根据网络计算得到的输出矩阵，输出句子中每个字符的语义标签。
	 * 
	 * @param sen
	 *            与输出矩阵对应的句子对象
	 * @param output
	 *            神经网络的输出矩阵
	 */
	private static int[] getTagsByViterbi(Sentence sen, DoubleMatrix output) {
		int K = Params.SRL_TAG_SIZE;
		int T = output.columns;
		// 第T个字符是第K个标签的最大可能
		DoubleMatrix T1 = new DoubleMatrix(K, T);
		// 该最大可能对应的前一个标签是什么
		int[][] T2 = new int[K][T];
		// 输出结果
		int[] X = new int[T];

		for (int i = 0; i < Params.SRL_TAG_SIZE; i++) {
			T1.put(i, 0, Pi.get(i) + output.get(i, 0));
			T2[i][0] = 0;
		}

		// 对于句子的每个位置 i
		for (int i = 1; i < T; i++) {
			// 计算在该位置放置标签 j 后的最大可能性
			for (int j = 0; j < K; j++) {
				double max = -Double.MAX_VALUE;
				int argmax = -1;

				// 遍历查找最大的可能性
				for (int k = 0; k < K; k++) {
					double value = T1.get(k, i - 1) + A.get(k, j) + output.get(j, i);
					if (max < value) {
						max = value;
						argmax = k;
					}
				}
				T1.put(j, i, max);
				T2[j][i] = argmax;
			}
		}

		int ZT_1 = -1;
		double max = -Double.MAX_VALUE;

		for (int k = 0; k < K; k++) {
			if (max < T1.get(k, T - 1)) {
				max = T1.get(k, T - 1);
				ZT_1 = k;
			}
		}

		X[T - 1] = ZT_1;
		int Zi_1 = ZT_1;
		for (int i = T - 1; i >= 1; i--) {
			Zi_1 = T2[Zi_1][i];
			X[i - 1] = Zi_1;
		}

		return X;
	}
}
