import tensorflow as tf
'''
使用一个单向RNN代替原先网络结构里的BiRNN,来对比网络效果会不会更好
'''

class BiRNN(object):
	def __init__(self, sequence_length, num_classes, vocabulary_size, developers_size, embedding_size, hidden_size,
	             batch_size, active_size, layer_num):
		# 首先输入占位符
		self.place_dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

		# 第一维设为None是因为batch-size不确定, 因为最后一个batch的大小往往不是batch_size
		self.b_features = tf.placeholder(dtype=tf.int32, shape=[None, sequence_length], name='place_features')
		self.b_labels = tf.placeholder(dtype=tf.int64, shape=[None], name='place_labels')
		# 活跃度提取部分的features
		self.b_active_features = tf.placeholder(dtype=tf.float32, shape=[None, active_size],
		                                        name='batch_active_features')

		l2_loss = tf.constant(0.0)

		with tf.name_scope('embedding'):
			# 单词embedding
			# 这个是embedding表?
			# embedding: 相对one-hot来说, 是降维了....应该是这么理解的

			embedding = tf.get_variable('embedding', [vocabulary_size, embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			# hint: reduce features length
			# print('这是input的shape: {}'.format(self.inputs.shape))    # (?, 400, 200)

			# self.inputs = tf.one_hot(self.b_features, depth=vocabulary_size)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)

		# lstm_fw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)
		# lstm_bw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)

		with tf.name_scope('rnn_active'):
			print(self.b_active_features)
			inputs_active = tf.expand_dims(self.b_active_features, -1)  # 将原先的两维扩展成3维, 添加第三维为1
			print(inputs_active)
			lstm_rnn_cell = tf.nn.rnn_cell.GRUCell(hidden_size)  # 抽取活跃度的RNN单元
			lstm_rnn_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_rnn_cell,
			                                              input_keep_prob=self.place_dropout_keep_prob)
			active_outputs, _ = tf.nn.dynamic_rnn(lstm_rnn_cell, inputs=inputs_active, dtype=tf.float32)
			print('active_outputs:{}'.format(active_outputs))  # (?, 25, hidden_size)


		# 同MultiRNNCell组合使用, 否则会出错
		def get_a_lstm_cell():
			# hint: tf.nn.rnn_cell.LSTM()
			cell = tf.nn.rnn_cell.GRUCell(hidden_size)
			cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.place_dropout_keep_prob)
			return cell

		content_rnn_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		outputs, _ = tf.nn.dynamic_rnn(content_rnn_cells, inputs=self.inputs, dtype=tf.float32)

		# 池化层找出了一个最大的时间步输出
		fw_output = tf.layers.max_pooling1d(outputs, pool_size=sequence_length, strides=1)
		fw_output = tf.reshape(fw_output, [-1, hidden_size], name='bi_fusion_outputs')
		print('fw_output: {}'.format(fw_output))

		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
		with tf.name_scope('fusion'):
			# 对于单向RNN, 我们只取最后一个时间步的输出
			# 原先active_outputs.shape=[batch_size, active_size, hidden_size]
			# active_size就是时间步的步数, 我们取最后一步, 就是-1
			active_outputs = active_outputs[:, -1, :]  # [batch_size, hidden_size]
			print('active_outputs: {}'.format(active_outputs))
			# 将两个神经网络的高层特征通过元素间相乘予以融合
			fusion_outputs = tf.multiply(fw_output, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			print('fusion_outputs.shape={}'.format(fusion_outputs.shape))  # (?, hidden_size)
		# 定义输出层
		with tf.name_scope('output'):
			# w_softmax = tf.get_variable('w_softmax', shape=[hidden_size * 2, num_classes])
			w_softmax = tf.get_variable('w_softmax', shape=[hidden_size, num_classes])
			b_softmax = tf.get_variable('b_softmax', shape=[num_classes])

			# l2_loss += tf.nn.l2_loss(w_softmax)
			# l2_loss += tf.nn.l2_loss(b_softmax)
			# logits = tf.nn.softmax(tf.matmul(fusion_outputs, w_softmax) + b_softmax)  # 上层输出与权重乘后加上偏置作为输出
			logits = tf.matmul(fusion_outputs, w_softmax) + b_softmax  # 上层输出与权重乘后加上偏置作为输出
			print('logits.shape()={}'.format(logits.shape))  # (?, 759), 759是开发者数量
			# 计算评估矩阵
			# predicted_classes = tf.argmax(logits, 1)  # 输出每个第一维度下, 第二维度中最大值的索引, outputs的维度=len(第一维度)
			predicted_classes = logits

		with tf.name_scope('loss'):
			l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
			# logits形如[d_0, d_1, ..., d_{r-1}, num_classes]
			self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=logits)
			# tf.losses.softmax_cross_entropy  # one-hot label
			self.loss = tf.reduce_mean(self.loss) + 0.001 * l2_loss
			# self.loss = self.loss + 0.001 * l2_loss

		with tf.name_scope('accuracy'):

			bool_top_1 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=1, name='bool_top_1')
			bool_top_5 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=5, name='bool_top_5')

			self.acc_top_1 = tf.reduce_mean(tf.cast(bool_top_1, dtype=tf.float32), name="acc_top_1")
			self.acc_top_5 = tf.reduce_mean(tf.cast(bool_top_5, dtype=tf.float32), name="acc_top_5")
			print(self.acc_top_1)
			'''这里需要注意, 我单独把tf.metrics.precision_at_top_k这块代码摘出去测试过
				得到的topK其实并不是真实的topK, topK*K之后得到的数值才是真实的topK, 所以可以看到外面我获取到top_5之后,
				还乘了个5.'''
			self.metrics = {
				'top_1': self.acc_top_1,
				'top_5': self.acc_top_5
			}
