package runnable;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.Random;

import model.LinearLayer;
import model.LookupTableLayer;
import model.NonlinearLayer;

import org.jblas.DoubleMatrix;

import util.Params;
import util.RandomMatrix;
import util.SentenceIterator;
import util.TimeCounter;
import adt.PredPos;
import adt.Sentence;

/**
 * The main entrance of the program.
 * 
 * 程序主入口。
 * 
 * @author Tianyu Xu
 * 
 */
public class Main {
	// global components
	// 全局组件
	/**
	 * 字符到字符特征的映射表
	 */
	public static Hashtable<Character, DoubleMatrix> wordDict = null;
	/**
	 * POS 标签到 POS 标签特征的映射表
	 */
	public static Hashtable<Integer, DoubleMatrix> posDict = null;
	/**
	 * 训练集数据迭代器
	 */
	private static SentenceIterator trainIterator = null;
	/**
	 * 测试集数据迭代器
	 */
	private static SentenceIterator testIterator = null;

	// Neutral Network layers
	// 各层神经网络
	/**
	 * Lookup Table Layer
	 */
	private static LookupTableLayer lookupTableLayer = null;
	/**
	 * Hidden Layer
	 */
	private static LinearLayer hiddenLayer = null;
	/**
	 * Nonlinear Layer
	 */
	private static NonlinearLayer nonLinearLayer = null;
	/**
	 * Output Layer
	 */
	private static LinearLayer outputLayer = null;

	// 统计量
	/**
	 * 训练过程中正确标注的个数
	 */
	private static double trainCorrectCnt = 0;
	/**
	 * 训练过程中标注的个数
	 */
	private static double trainCnt = 0;
	/**
	 * 一次训练迭代中标注正确的个数
	 */
	private static double iterationCorrectCnt = 0;
	/**
	 * 一次训练迭代中的标注个数
	 */
	private static double iterationCnt = 1;

	// Viterbi 算法参数
	/**
	 * Viterbi 算法中的转移矩阵
	 */
	private static DoubleMatrix A = null;
	/**
	 * Viterbi 算法中的初始概率向量
	 */
	private static DoubleMatrix Pi = null;

	/**
	 * Initialize all the global components
	 * 
	 * 初始化各全局对象
	 * 
	 * @param args
	 *            命令行中读入的参数列表
	 */
	private static void initialize(String[] args) {
		// 处理命令行参数
		processArgs(args);

		// 打印重要参数
		System.out.printf("#### Parameters:\nWINDOW_SIZE: %d\nHIDDEN_LAYER_SIZE: %d\nLEARNING_RATE: %f\nVITERBI_LEARNING_RATE: %f\nWORD_FEATURE_SIZE: %d\nTHRESHOLD:%f\n\n",
				Params.WINDOW_SIZE, Params.HIDDEN_LAYER_SIZE, Params.LEARNING_RATE, Params.VITERBI_LEARNING_RATE, Params.WORD_FEATURE_SIZE, Params.PRECISION_THRESHOLD);

		// initialize the word dictionary
		// 初始化字符字典
		wordDict = new Hashtable<Character, DoubleMatrix>();
		readCharListFile();

		// initialize the POS-tag dictionary
		// 初始化POS-tag字典
		posDict = new Hashtable<Integer, DoubleMatrix>();
		for (int i = 0; i < Params.POS_TAG_NUM; i++) {
			posDict.put(i, RandomMatrix.rand(Params.POS_FEATURE_SIZE));
		}

		// initialize the training sentence iterator
		// 初始化训练句子迭代器
		trainIterator = new SentenceIterator(Params.TRAIN_FILE_LIST_PATH);

		// 初始化测试句子迭代器
		testIterator = new SentenceIterator(Params.TEST_FILE_LIST_PATH);

		// initialize the network layers
		// 初始化各层神经网络
		lookupTableLayer = new LookupTableLayer();
		hiddenLayer = new LinearLayer(Params.HIDDEN_LAYER_SIZE, Params.WINDOW_FEATURE_SIZE);
		nonLinearLayer = new NonlinearLayer();
		outputLayer = new LinearLayer(Params.SRL_TAG_SIZE, Params.HIDDEN_LAYER_SIZE);

		// initialize the Viterbi parameters
		// 初始化维特比算法参数
		A = RandomMatrix.rand(Params.SRL_TAG_SIZE, Params.SRL_TAG_SIZE);
		Pi = RandomMatrix.rand(Params.SRL_TAG_SIZE);

		// DEBUG 查看 Viterbi 算法初始参数
		if (Params.PRINT_VITERBI_INIT_PARAM) {
			System.out.println(A);
			System.out.println(Pi);
		}
	}

	/**
	 * 处理命令行参数
	 * 
	 * @param args
	 *            从控制台获得的命令行参数
	 */
	private static void processArgs(String[] args) {
		// 如果输入不符合要求
		if (args.length < 4) {
			System.err.println("Usage: Main <char_list_path> <train_list_path> <test_list_path> <file|screen> [seed]");
			System.exit(1);
		}

		// 如果需要重定向标准输出
		if (args[3].trim().equals("file")) {
			try {
				PrintStream out = new PrintStream(File.createTempFile("run", ".out", new File("./output")));
				System.setOut(out);
			} catch (IOException e1) {
				e1.printStackTrace();
			}
		}

		// 输出所有参数
		System.out.println("#### Arguments:");
		for (int i = 0; i < args.length; i++) {
			System.out.printf("args[%d]: %s\n", i, args[i]);
		}
		System.out.println();

		Params.CHAR_LIST_PATH = args[0];
		Params.TRAIN_FILE_LIST_PATH = args[1];
		Params.TEST_FILE_LIST_PATH = args[2];

		// 如果指定了随机数种子
		if (args.length == 5) {
			RandomMatrix.setSeed(Long.parseLong(args[4]));
		} else {
			RandomMatrix.setSeed(new Random().nextLong());
		}
	}

	/**
	 * 从文件中读入字符集并生成随机的字符特征向量
	 */
	private static void readCharListFile() {
		try {
			BufferedReader r = new BufferedReader(new InputStreamReader(new FileInputStream(Params.CHAR_LIST_PATH), "UTF8"));
			String s = r.readLine();
			while (s != null && !s.equals("")) {
				// generate random feature vectors
				// 生成随机特征向量
				wordDict.put(s.charAt(0), RandomMatrix.rand(Params.WORD_FEATURE_SIZE));
				s = r.readLine();
			}
			r.close();
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * The main() method of the program.
	 * 
	 * 程序的主方法。
	 * 
	 * @param args
	 *            命令行中读入的参数列表
	 */
	public static void main(String[] args) {
		initialize(args);

		int iteration = 0;

		/* begin 训练阶段 */

		// 训练停止条件
		while ((iteration < Params.MAX_ITERATION)) {
			iteration++;
			iterationCnt = 0;
			iterationCorrectCnt = 0;

			// DEBUG
			boolean printSentDetail = false;

			Sentence sentence;
			TimeCounter timer = new TimeCounter();

			// for each sentence
			// 遍历每个句子
			while (trainIterator.hasMoreElements()) {
				sentence = trainIterator.nextElement();
				int leftIndex = Params.PADDING_SIZE;
				int rightIndex = sentence.length() - Params.PADDING_SIZE;

				// if there is no predicate, continue
				// 如果没有谓词，跳过本句
				PredPos[] predPosList = sentence.getPredPosList();
				if (predPosList == null)
					continue;

				// for each predicate in the sentence
				// 对句子中的每个谓词
				for (int predIndex = 0; predIndex < predPosList.length; predIndex++) {
					// 得到谓词的首末位置
					PredPos predPos = predPosList[predIndex];
					// 如果某语义列没有谓词信息，跳过
					if (predPos == null)
						continue;

					// add total word count
					// 添加总字数统计
					int length = sentence.lengthWithoutPaddings();
					trainCnt += length;
					iterationCnt += length;

					// define a output matrix
					// 定义句子对于该谓词字符的特征矩阵
					DoubleMatrix output = new DoubleMatrix(Params.SRL_TAG_SIZE, length);

					// 训练时使用的保存神经网络各层输出的列表
					ArrayList<DoubleMatrix> hiddenOutputList = new ArrayList<DoubleMatrix>(length);
					ArrayList<DoubleMatrix> nonlinearOutputList = new ArrayList<DoubleMatrix>(length);

					// get the semi-finished feature matrix
					// 获得半成品特征矩阵
					DoubleMatrix inputMatrix = lookupTableLayer.getSemiFinishedFeatureMatrix(sentence);

					// 设置句子中每个字关于谓词的位移
					setPredOffset(inputMatrix, predPos);

					// for each word construct network output matrix
					// 对于句子中的每个字，构造神经网络输出矩阵
					for (int pivot = leftIndex; pivot < rightIndex; pivot++) {
						int wordIndex = pivot - Params.PADDING_SIZE;

						// set the "involved bit"
						// 设置involved位
						final int LAST_BIT = Params.FEATURE_VECTOR_SIZE - 1;
						for (int i = 0; i < sentence.length(); i++) {
							inputMatrix.put(LAST_BIT, i, i - pivot);
						}

						// 构造窗口向量
						DoubleMatrix windows = new DoubleMatrix(0, 1);
						for (int c = pivot - Params.PADDING_SIZE; c <= pivot + Params.PADDING_SIZE; c++) {
							windows = DoubleMatrix.concatVertically(windows, inputMatrix.getColumn(c));
						}

						// feed forwards
						// 前馈传播
						DoubleMatrix hiddenOutput = hiddenLayer.performLinearTrans(windows);
						DoubleMatrix nonlinearOutput = nonLinearLayer.performNonlinearTrans(hiddenOutput);
						DoubleMatrix finalOutput = outputLayer.performLinearTrans(nonlinearOutput);

						// fill the output matrix
						// 填入输出矩阵的第 wordIndex 列
						output.putColumn(wordIndex, finalOutput);

						// save the weights for training
						// 保存网络各层权重用于训练
						hiddenOutputList.add(hiddenOutput);
						nonlinearOutputList.add(nonlinearOutput);
					}

					// 使用维特比算法得到句子关于此谓词的输出标签
					int[] tags = getTagsByViterbi(sentence, output);
					int[] correctTags = getCorrectTags(sentence, predIndex);

					// 统计各项参数
					for (int i = 0; i < tags.length; i++) {
						if (tags[i] == correctTags[i]) {
							trainCorrectCnt++;
							iterationCorrectCnt++;
						}
					}

					// DEBUG 每50次迭代输出一次句子信息
					if (printSentDetail && (iteration < 10 || iteration % 50 == 0)) {
						printSentDetail = false;

						System.out.printf("\n%s\n", sentence.toStrWithoutPaddings());
						for (int i = 0; i < tags.length; i++)
							System.out.printf("%d\t", tags[i]);
						System.out.println();
						for (int i = 0; i < tags.length; i++)
							System.out.printf("%d\t", correctTags[i]);
						System.out.printf("\n\n");
						for (int r = 0; r < output.rows; r++) {
							for (int c = 0; c < output.columns; c++) {
								System.out.printf("%7.2f ", output.get(r, c));
							}
							System.out.println();
						}
					}

					// 训练Viterbi参数
					trainViterbi(tags, correctTags);

					// 训练神经网络
					trainNetwork(sentence, tags, correctTags, output, nonlinearOutputList, hiddenOutputList, inputMatrix);
				}
			}
			System.out.printf("ITERATION: %d\tTOTAL_CORRECT: %d\tTOTAL: %d\tCURR_CORRECT: %d\tCURR: %d\tCURR_RATE: %f\tTIME: %s\n", iteration, (int) trainCorrectCnt,
					(int) trainCnt, (int) iterationCorrectCnt, (int) iterationCnt, iterationCorrectCnt / iterationCnt, timer.stop());

			// DEBUG 查看新的 Viterbi 算法参数
			if (Params.PRINT_VITERBI_PARAM) {
				System.out.println(Pi);
				System.out.println(A);
			}

			// reset the sentence iterator
			// 重置句子迭代器
			trainIterator.resetPointer();
		}

		/* end 训练阶段 */

		/* begin 测试阶段 */

		TimeCounter timer = new TimeCounter();
		Sentence sentence;
		iterationCnt = 0;
		iterationCorrectCnt = 0;
		double correctNonO = 0;
		double nonO = 0;
		double taggedNonO = 0;

		// for each sentence
		// 遍历每个句子
		while (testIterator.hasMoreElements()) {
			sentence = testIterator.nextElement();
			int leftIndex = Params.PADDING_SIZE;
			int rightIndex = sentence.length() - Params.PADDING_SIZE;

			// if there is no predicate, continue
			// 如果没有谓词，跳过本句
			// TODO 暂时假定已知谓词位置；之后将修改为使用神经网络获得可能的谓词位置
			PredPos[] predPosList = sentence.getPredPosList();
			if (predPosList == null)
				continue;

			// for each predicate in the sentence
			// 对句子中的每个谓词
			for (int predIndex = 0; predIndex < predPosList.length; predIndex++) {
				// 得到谓词的首末位置
				PredPos predPos = predPosList[predIndex];
				// 如果某语义列没有谓词信息，跳过
				if (predPos == null)
					continue;

				// add total word count
				// 添加总字数统计
				int length = sentence.lengthWithoutPaddings();
				iterationCnt += length;

				// define a output matrix
				// 定义句子对于该谓词字符的特征矩阵
				DoubleMatrix output = new DoubleMatrix(Params.SRL_TAG_SIZE, length);

				// get the semi-finished feature matrix
				// 获得半成品特征矩阵
				DoubleMatrix inputMatrix = lookupTableLayer.getSemiFinishedFeatureMatrix(sentence);

				// 设置句子中每个字关于谓词的位移
				setPredOffset(inputMatrix, predPos);

				// for each word construct network output matrix
				// 对于句子中的每个字，构造神经网络输出矩阵
				for (int pivot = leftIndex; pivot < rightIndex; pivot++) {
					int wordIndex = pivot - Params.PADDING_SIZE;

					// set the "involved bit"
					// 设置involved位
					final int LAST_BIT = Params.FEATURE_VECTOR_SIZE - 1;
					for (int i = 0; i < sentence.length(); i++) {
						inputMatrix.put(LAST_BIT, i, i - pivot);
					}

					// 构造窗口向量
					DoubleMatrix windows = new DoubleMatrix(0, 1);
					for (int c = pivot - Params.PADDING_SIZE; c <= pivot + Params.PADDING_SIZE; c++) {
						windows = DoubleMatrix.concatVertically(windows, inputMatrix.getColumn(c));
					}

					// feed forwards
					// 前馈传播
					DoubleMatrix hiddenOutput = hiddenLayer.performLinearTrans(windows);
					DoubleMatrix nonlinearOutput = nonLinearLayer.performNonlinearTrans(hiddenOutput);
					DoubleMatrix finalOutput = outputLayer.performLinearTrans(nonlinearOutput);

					// fill the output matrix
					// 填入输出矩阵的第 wordIndex 列
					output.putColumn(wordIndex, finalOutput);
				}

				// 使用维特比算法得到句子关于此谓词的输出标签
				int[] tags = getTagsByViterbi(sentence, output);
				int[] correctTags = getCorrectTags(sentence, predIndex);

				// 统计各项参数
				for (int i = 0; i < tags.length; i++) {
					// 6 is for tag "O"
					boolean isNonO = correctTags[i] != 6;
					boolean isTaggedNonO = tags[i] != 6;
					if (isTaggedNonO)
						taggedNonO++;
					if (isNonO)
						nonO++;
					// 如果标对了
					if (tags[i] == correctTags[i]) {
						iterationCorrectCnt++;
						if (isNonO)
							correctNonO++;
					}
				}
			}
		}

		double precision = correctNonO / taggedNonO;
		double recall = correctNonO / nonO;
		double F_Measure = 2 * precision * recall / (precision + recall);

		System.out.printf("NON_O_CORRECT: %d\tNON_O: %d\tTAGGED_NON_O: %d\tPRECISION: %f\tRECALL: %f\tF_MEASURE: %f\tTOTAL_CORRECT: %d\tTOTAL: %d\tRATE: %f\tTIME: %s\n",
				(int) correctNonO, (int) nonO, (int) taggedNonO, precision, recall, F_Measure, (int) iterationCorrectCnt, (int) iterationCnt, iterationCorrectCnt / iterationCnt,
				timer.stop());

		/* end 测试阶段 */
	}

	/**
	 * 设置输入矩阵中每个字关于某谓词的相对位置
	 * 
	 * @param inputMatrix
	 *            要设置的输入矩阵
	 * @param predPos
	 *            中心谓词起止位置
	 */
	private static void setPredOffset(DoubleMatrix inputMatrix, PredPos predPos) {
		for (int c = 0; c < inputMatrix.columns; c++) {
			if (c < predPos.start()) {
				inputMatrix.put(Params.FEATURE_VECTOR_SIZE - 2, c, c - predPos.start());
			} else if (c > predPos.end()) {
				inputMatrix.put(Params.FEATURE_VECTOR_SIZE - 2, c, c - predPos.end());
			}
		}
	}

	/**
	 * 获得某个句子中的所有字符关于第 predIndex 个谓词的语义标签
	 * 
	 * @param sentence
	 *            要标注的句子
	 * @param predIndex
	 *            句子的谓词编号
	 * @return 句子中的每个字符关于第 predIndex 个谓词的语义标签，以索引数组形式给出
	 */
	private static int[] getCorrectTags(Sentence sentence, int predIndex) {
		int length = sentence.lengthWithoutPaddings();
		int[] result = new int[length];

		for (int i = 0; i < length; i++) {
			result[i] = sentence.wordAt(i + Params.PADDING_SIZE).getSrlTagList().get(predIndex) - Params.POS_TAG_NUM;
		}
		return result;
	}

	/**
	 * 训练维特比算法
	 * 
	 * @param tags
	 *            算法给出的语义标签索引序列
	 * @param correctTags
	 *            正确的语义标签索引序列
	 */
	private static void trainViterbi(int[] tags, int[] correctTags) {
		// 调整初始矩阵
		if (tags[0] != correctTags[0]) {
			Pi.put(tags[0], Pi.get(tags[0]) - Params.VITERBI_LEARNING_RATE);
			Pi.put(correctTags[0], Pi.get(correctTags[0]) + Params.VITERBI_LEARNING_RATE);
		}
		// 调整转移矩阵
		for (int i = 1; i < tags.length; i++) {
			if (correctTags[i] != tags[i] || correctTags[i - 1] != tags[i - 1]) {
				A.put(tags[i - 1], tags[i], A.get(tags[i - 1], tags[i]) - Params.VITERBI_LEARNING_RATE);
				A.put(correctTags[i - 1], correctTags[i], A.get(correctTags[i - 1], correctTags[i]) + Params.VITERBI_LEARNING_RATE);
			}
		}
	}

	/**
	 * 训练神经网络
	 * 
	 * @param s
	 *            要训练的句子
	 * @param tags
	 *            算法给出的语义标签索引序列
	 * @param correctTags
	 *            正确的语义标签索引序列
	 * @param finalOutput
	 *            最终的网络输出
	 * @param nonlinearOutput
	 *            非线性层输出列表
	 * @param hiddenOutput
	 *            隐藏层输出列表
	 * @param inputMatrix
	 *            输入矩阵
	 */
	@SuppressWarnings("unused")
	private static void trainNetwork(Sentence s, int[] tags, int[] correctTags, DoubleMatrix finalOutput, ArrayList<DoubleMatrix> nonlinearOutput,
			ArrayList<DoubleMatrix> hiddenOutput, DoubleMatrix inputMatrix) {
		// 获取要调整的网络参数
		DoubleMatrix weightHO = outputLayer.getW();
		DoubleMatrix biasOutput = outputLayer.getBias();
		DoubleMatrix weightIH = hiddenLayer.getW();
		DoubleMatrix biasHidden = hiddenLayer.getBias();

		// 获取句子长度（不含左右 paddings）
		int length = s.lengthWithoutPaddings();
		assert length == tags.length;

		// 判断是否有错误的标签
		boolean isWrong = false;
		for (int i = 0; i < length; i++) {
			if (tags[i] != correctTags[i]) {
				isWrong = true;
				break;
			}
		}

		// if the output tag is different from the correct tag
		// 如果网络的输出标签有误，调整权重
		if (isWrong) {
			double[][] derivOutput = new double[length][Params.SRL_TAG_SIZE];

			// set the partial derivative of the output layer (finalOutput,
			// layer6)
			// 设置输出层偏导
			for (int pos = 0; pos < length; pos++) {
				if (tags[pos] != correctTags[pos]) {
					derivOutput[pos][tags[pos]] = 1;
					derivOutput[pos][correctTags[pos]] = -1;
				}
			}

			// compute the partial derivative of weight from nonlinear to output
			// (layer 6)
			// 计算非线性层到输出层权重矩阵偏导
			double[][] derivWeightHO = new double[Params.SRL_TAG_SIZE][Params.HIDDEN_LAYER_SIZE];
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
					for (int pos = 0; pos < length; pos++) {
						derivWeightHO[t][h] += derivOutput[pos][t] * nonlinearOutput.get(pos).get(h);
					}
				}
			}

			// compute the partial derivative of the bias of output layer
			// 计算输出层bias偏导
			double[] derivBiasOutput = new double[Params.SRL_TAG_SIZE];
			for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
				for (int pos = 0; pos < length; pos++) {
					derivBiasOutput[t] += derivOutput[pos][t];
				}
			}

			// update the weightHO matrix (layer6)
			// 更新隐藏层到输出层权重矩阵
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
					weightHO.put(t, h, getGoodValue(weightHO.get(t, h) - Params.LEARNING_RATE * derivWeightHO[t][h]));
				}
			}

			// update the output layer bias vector (layer 6)
			// 更新输出层bias
			for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
				biasOutput.put(t, getGoodValue(biasOutput.get(t) - Params.LEARNING_RATE * derivBiasOutput[t]));
			}

			// compute the partial derivative of the nonlinear layer
			// 计算非线性层偏导
			double[][] derivNonlinear = new double[length][Params.HIDDEN_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
					for (int t = 0; t < Params.SRL_TAG_SIZE; t++) {
						derivNonlinear[pos][h] += derivOutput[pos][t] * weightHO.get(t, h);
					}
				}
			}

			// compute the partial derivative of the hidden layer
			// 计算隐藏层的偏导
			double[][] derivHidden = new double[length][Params.HIDDEN_LAYER_SIZE];
			for (int pos = 0; pos < length; pos++) {
				for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
					if (Params.NONLINEAR_FUNC_TYPE == Params.SIGMOID) {
						double fx = hiddenOutput.get(pos).get(h);
						derivHidden[pos][h] = derivNonlinear[pos][h] * NonlinearLayer.sigmoid(fx) * (1 - NonlinearLayer.sigmoid(fx));
					} else {
						// UNCHECK
						if ((derivNonlinear[pos][h] <= 1) && (derivNonlinear[pos][h] >= -1)) {
							derivHidden[pos][h] = derivNonlinear[pos][h];
						} else {
							derivHidden[pos][h] = 0.0d;
						}
					}
				}
			}

			// compute the partial derivative of weight from input to hidden
			// 计算输入层到隐藏层权重偏导
			double[][] derivWeightIH = new double[Params.HIDDEN_LAYER_SIZE][Params.WINDOW_FEATURE_SIZE];
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				for (int pos = 0; pos < length; pos++) {
					for (int w = 0; w < Params.WINDOW_SIZE; w++) {
						int windowOffset = w * Params.FEATURE_VECTOR_SIZE;
						for (int f = 0; f < Params.FEATURE_VECTOR_SIZE; f++) {
							derivWeightIH[h][windowOffset + f] = derivHidden[pos][h] * inputMatrix.get(f, pos + w);
						}
					}
				}
			}

			// compute the partial derivative of the bias of hidden layer
			// 计算隐藏层bias偏导
			double[] derivBiasHidden = new double[Params.HIDDEN_LAYER_SIZE];
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				for (int pos = 0; pos < length; pos++) {
					derivBiasHidden[h] += derivHidden[pos][h];
				}
			}

			// update the weightIH matrix
			// 更新输入层到隐藏层权重矩阵
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				for (int w = 0; w < Params.WINDOW_FEATURE_SIZE; w++) {
					weightIH.put(h, w, getGoodValue(weightIH.get(h, w) - Params.LEARNING_RATE * derivWeightIH[h][w]));
				}
			}

			// update the hidden layer bias vector (layer4)
			// 更新隐藏层bias
			for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
				biasHidden.put(h, getGoodValue(biasHidden.get(h) - Params.LEARNING_RATE * derivBiasHidden[h]));
			}

			// compute the partial derivative of the input layer
			// 计算输入层偏导
			double[][][] derivInput = new double[length][Params.WINDOW_SIZE][Params.TRAINABLE_VECTOR_SIZE];
			for (int p = 0; p < length; p++) {
				for (int h = 0; h < Params.HIDDEN_LAYER_SIZE; h++) {
					for (int w = 0; w < Params.WINDOW_SIZE; w++) {
						int windowOffset = w * Params.FEATURE_VECTOR_SIZE;
						for (int d = 0; d < Params.TRAINABLE_VECTOR_SIZE; d++) {
							derivInput[p][w][d] += derivHidden[p][h] * weightIH.get(h, d + windowOffset);
						}
					}
				}
			}

			// update the feature matrix
			// 更新特征矩阵
			for (int p = 0; p < length; p++) {
				for (int w = 0; w < Params.WINDOW_SIZE; w++) {
					for (int d = 0; d < Params.TRAINABLE_VECTOR_SIZE; d++) {
						double newValue = getGoodValue(inputMatrix.get(d, p + w) - Params.LEARNING_RATE * derivInput[p][w][d]);
						// inputMatrix.put(d, p + w, newValue);
						// if should update the word feature lookup table
						// 如果需要更新字符特征表
						if (d < Params.WORD_FEATURE_SIZE) {
							wordDict.get(s.wordAt(w + p).getCh()).put(d, newValue);
						}
						// if should update the POS feature lookup table
						// 如果需要更新POS标签特征表
						else {
							posDict.get(s.wordAt(w + p).getPosTagNum()).put(d - Params.WORD_FEATURE_SIZE, newValue);
						}
					}
				}
			}
		}
	}

	/**
	 * 获取调整范围后的 double 值，防止出现无穷情况
	 * 
	 * @param v
	 *            要调整的 double 值
	 * @return 调整后的 double 值
	 */
	private static double getGoodValue(double v) {
		if (v >= Params.VALUE_MAX) {
			return Params.VALUE_MAX;
		} else if (v <= -Params.VALUE_MAX) {
			return -Params.VALUE_MAX;
		} else {
			return v;
		}
	}

	/**
	 * According to the network output matrix, print out the calculated tags of
	 * each characters in the given sentence.
	 * 
	 * 根据网络计算得到的输出矩阵，输出句子中每个字符的语义标签。
	 * 
	 * @param sen
	 *            与输出矩阵对应的句子对象
	 * @param output
	 *            神经网络的输出矩阵
	 */
	private static int[] getTagsByViterbi(Sentence sen, DoubleMatrix output) {
		int K = Params.SRL_TAG_SIZE;
		int T = output.columns;
		// 第T个字符是第K个标签的最大可能
		DoubleMatrix T1 = new DoubleMatrix(K, T);
		// 该最大可能对应的前一个标签是什么
		int[][] T2 = new int[K][T];
		// 输出结果
		int[] X = new int[T];

		for (int i = 0; i < Params.SRL_TAG_SIZE; i++) {
			T1.put(i, 0, Pi.get(i) + output.get(i, 0));
			T2[i][0] = 0;
		}

		// 对于句子的每个位置 i
		for (int i = 1; i < T; i++) {
			// 计算在该位置放置标签 j 后的最大可能性
			for (int j = 0; j < K; j++) {
				double max = -Double.MAX_VALUE;
				int argmax = -1;

				// 遍历查找最大的可能性
				for (int k = 0; k < K; k++) {
					double value = T1.get(k, i - 1) + A.get(k, j) + output.get(j, i);
					if (max < value) {
						max = value;
						argmax = k;
					}
				}
				T1.put(j, i, max);
				T2[j][i] = argmax;
			}
		}

		int ZT_1 = -1;
		double max = -Double.MAX_VALUE;

		for (int k = 0; k < K; k++) {
			if (max < T1.get(k, T - 1)) {
				max = T1.get(k, T - 1);
				ZT_1 = k;
			}
		}

		X[T - 1] = ZT_1;
		int Zi_1 = ZT_1;
		for (int i = T - 1; i >= 1; i--) {
			Zi_1 = T2[Zi_1][i];
			X[i - 1] = Zi_1;
		}

		return X;
	}
}
