import tensorflow as tf

class BiRNN(object):
	def __init__(self, sequence_length, num_classes, vocabulary_size, embedding_size, hidden_size, batch_size, layer_num):
		# 首先输入占位符
		self.place_dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		# self.place_batch_size = tf.placeholder(tf.int32, name='batch_size')       # 动态指定batch_size的大小, 适用于最后一批不等于batch_size的情况
		# self.place_windows = tf.placeholder(tf.int32, shape=[None], name='place_windows_bug_ids')
		# 第一维设为None是因为batch-size不确定, 因为最后一个batch的大小往往不是batch_size
		self.b_features = tf.placeholder(dtype=tf.int32, shape=[None, sequence_length], name='place_features')
		self.b_labels = tf.placeholder(dtype=tf.int64, shape=[None], name='place_labels')


		# print(self.b_features)
		# print(self.b_labels)
		with tf.name_scope('embedding'):
			# 单词embedding
			embedding = tf.get_variable('embedding', [vocabulary_size, embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			# tf.one_hot()

		lstm_fw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)
		lstm_bw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)

		# dropout
		# with tf.name_scope('dropout'):
		# 	lstm_fw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_fw_cell, input_keep_prob=self.place_dropout_keep_prob)
		# 	lstm_bw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_bw_cell, input_keep_prob=self.place_dropout_keep_prob)

		# 同MultiRNNCell组合使用, 否则会出错
		def get_a_lstm_cell():
			cell = tf.nn.rnn_cell.GRUCell(hidden_size)
			cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.place_dropout_keep_prob)
			return cell

		# 但是有这么一种言论: 不能把LSTM cell提前组合成MultiRNNCell再调用bidirectional_dynamic_rnn进行计算
		# 据说这样相当于只有最后一层才进行concat, 是错误的
		# https://cloud.tencent.com/developer/article/1085072
		# 所以上文链接里没有使用MultiRNNCell, 而只是使用了[get_a_lstm_cell() for _ in range(2)]
		lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		# print(lstm_fw_cells)
		# print(lstm_bw_cells)
		# 相当于权重初始化
		# 这里的运行机制是什么, 为什么b_labels获取到的第一维是None, 这么说, 在实际run之前, 都必须初始化完毕喽
		# 所以这里必须上实值: batch_size
		# initial_state_fw = lstm_fw_cell.zero_state(batch_size, tf.float32)
		# initial_state_bw = lstm_bw_cell.zero_state(batch_size, tf.float32)
		outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs,
		                                              dtype=tf.float32,
		                                              # initial_state_fw=initial_state_fw,
		                                              # initial_state_bw=initial_state_bw
		                                              )

		print('outputs: {}'.format(outputs))
		# 池化层找出了一个最大的时间步输出
		fw_output = tf.layers.max_pooling1d(outputs[0], pool_size=sequence_length, strides=1)
		bw_output = tf.layers.max_pooling1d(outputs[1], pool_size=sequence_length, strides=1)
		print('fw_output: {}'.format(fw_output))
		print('bw_output: {}'.format(bw_output))

		# 拼接前向输出和后向输出
		# 直接拼接前向输出与后向输出不会损失一部分后向输出的特征么........
		# bi_outputs = tf.reshape(tf.concat([fw_output, bw_output], 1), [-1, hidden_size * 2])
		# 将前向输出与后向输出相加
		bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')

		print('bi_outputs: {}'.format(bi_outputs))

		# 定义输出层
		with tf.name_scope('output'):
			# w_softmax = tf.get_variable('w_softmax', shape=[hidden_size * 2, num_classes])
			w_softmax = tf.get_variable('w_softmax', shape=[hidden_size, num_classes])
			b_softmax = tf.get_variable('b_softmax', shape=[num_classes])
			self.logits = tf.matmul(bi_outputs, w_softmax) + b_softmax  # 上层输出与权重乘后加上偏置作为输出
			# 计算评估矩阵
			# predicted_classes = tf.argmax(logits, 1)  # 输出每个第一维度下, 第二维度中最大值的索引, outputs的维度=len(第一维度)
			predicted_classes = self.logits

		with tf.name_scope('loss'):
			# logits形如[d_0, d_1, ..., d_{r-1}, num_classes]
			self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)

		with tf.name_scope('accuracy'):
			# accuracy = tf.metrics.accuracy(labels=b_labels, predictions=predicted_classes, name='acc_op')
			_, top_1_indice = tf.nn.top_k(predicted_classes, k=1)
			_, top_5_indice = tf.nn.top_k(predicted_classes, k=5)
			_, self.top_10_indice = tf.nn.top_k(predicted_classes, k=10)




			# self.b_labels = tf.cast(self.b_labels, dtype=tf.int64, name='labels_int64')
			# 只传top[0]出去的话, 外部接收的top总为nan
			top_1 = tf.metrics.precision_at_top_k(labels=self.b_labels, predictions_idx=top_1_indice, k=1, name='top1')
			top_5 = tf.metrics.precision_at_top_k(labels=self.b_labels, predictions_idx=top_5_indice, k=5, name='top5')
			print(top_1)
			self.metrics = {
				'top_1': top_1,
				'top_5': top_5
			}
			# targets是索引位, 并不是predictions的具体值
			# 这里return的是包含布尔值的一个tensor