import tensorflow as tf


class BiRNN(object):
	def __init__(self, sequence_length, num_classes, vocabulary_size, developers_size, embedding_size, hidden_size,
	             batch_size, active_size, layer_num, cost_matrix):
		# 首先输入占位符
		self.place_dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		# self.place_batch_size = tf.placeholder(tf.int32, name='batch_size')       # 动态指定batch_size的大小, 适用于最后一批不等于batch_size的情况
		# self.place_windows = tf.placeholder(tf.int32, shape=[None], name='place_windows_bug_ids')
		# 第一维设为None是因为batch-size不确定, 因为最后一个batch的大小往往不是batch_size
		self.b_features = tf.placeholder(dtype=tf.int32, shape=[None, sequence_length], name='place_features')
		self.b_labels = tf.placeholder(dtype=tf.int64, shape=[None], name='place_labels')
		# 活跃度提取部分的features
		self.b_active_features = tf.placeholder(dtype=tf.int32, shape=[None, active_size],
		                                        name='batch_active_features')
		self.b_sequence_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_sequence_lengths')
		self.b_active_actual_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_active_actual_lengths')
		self.cost_matrix = tf.constant(cost_matrix, dtype=tf.float32, name='cost_matrix')
		# self.cost_matrix = tf.placeholder(shape=[developers_size], dtype=tf.float32, name='cost_matrix')
		print(self.cost_matrix)
		l2_loss = tf.constant(0.0)

		# print(self.b_features)
		# print(self.b_labels)
		with tf.name_scope('embedding'):
			# 单词embedding
			# 这个是embedding表?
			# embedding: 相对one-hot来说, 是降维了....应该是这么理解的

			embedding = tf.get_variable('embedding', [vocabulary_size, embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			# hint: reduce features length
			# print('这是input的shape: {}'.format(self.inputs.shape))    # (?, 400, 200)
			# 为了节省内存, 直到输入模型的时候, 单词才被转换成one-hot编码
			# 但是运行过程中发现, 就算是这样, 仍然发生了OOM事件, 只能尽量减少三个维度各自的值.
			# self.inputs = tf.one_hot(self.b_features, depth=vocabulary_size)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)

		# lstm_fw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)
		# lstm_bw_cell = tf.nn.rnn_cell.GRUCell(hidden_size)

			# 同MultiRNNCell组合使用, 否则会出错
		def get_a_lstm_cell():
			# cell = tf.nn.rnn_cell.GRUCell(hidden_size, bias_initializer=tf.orthogonal_initializer, kernel_initializer=tf.orthogonal_initializer)
			# hint: tf.nn.rnn_cell.LSTM()
			cell = tf.nn.rnn_cell.GRUCell(hidden_size)
			cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.place_dropout_keep_prob)
			return cellD

		def length(sequence):
			'''
			caculate the timestep_size in a batch
			:param sequence: a batch's features
			:return:
			'''
			used = tf.sign(tf.reduce_max(tf.abs(sequence), 2))
			length = tf.reduce_sum(used, 1)
			length = tf.cast(length, tf.int32)
			return length

		with tf.name_scope('rnn_active'):
			print(self.b_active_features)
			# inputs_active = tf.expand_dims(self.b_active_features, -1)  # 将原先的两维扩展成3维, 添加第三维为1
			inputs_active = tf.one_hot(self.b_active_features, depth=developers_size, dtype=tf.float32)
			print(inputs_active)
			# lstm_rnn_cell = tf.nn.rnn_cell.GRUCell(hidden_size)  # 抽取活跃度的RNN单元
			cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)],
			                                            state_is_tuple=True)
			active_outputs, _ = tf.nn.dynamic_rnn(cells, inputs=inputs_active, dtype=tf.float32,
			                                      sequence_length=self.b_active_actual_lengths)
			print('active_outputs:{}'.format(active_outputs))  # (?, 25, hidden_size)

		# 但是有这么一种言论: 不能把LSTM cell提前组合成MultiRNNCell再调用bidirectional_dynamic_rnn进行计算
		# 据说这样相当于只有最后一层才进行concat, 是错误的
		# https://cloud.tencent.com/developer/article/1085072
		# 所以上文链接里没有使用MultiRNNCell, 而只是使用了[get_a_lstm_cell() for _ in range(2)]
		lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		# print(lstm_fw_cells)
		# print(lstm_bw_cells)

		# 相当于权重初始化
		# 这里的运行机制是什么, 为什么b_labels获取到的第一维是None, 这么说, 在实际run之前, 都必须初始化完毕喽
		# 所以这里必须上实值: batch_size
		# hint:batch=tf.shape(self.b_features)[0]
		initial_state_fw = lstm_fw_cells.zero_state(batch_size, tf.float32)
		initial_state_bw = lstm_bw_cells.zero_state(batch_size, tf.float32)
		outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs,
		                                              sequence_length=self.b_sequence_lengths,
		                                              dtype=tf.float32,
		                                              initial_state_fw=initial_state_fw,
		                                              initial_state_bw=initial_state_bw
		                                              )


		print('outputs: {}'.format(outputs))
		# 池化层找出了一个最大的时间步输出
		fw_output = tf.layers.max_pooling1d(outputs[0], pool_size=sequence_length, strides=1)
		bw_output = tf.layers.max_pooling1d(outputs[1], pool_size=sequence_length, strides=1)
		print('fw_output: {}'.format(fw_output))
		print('bw_output: {}'.format(bw_output))

		# 拼接前向输出和后向输出
		# 直接拼接前向输出与后向输出不会损失一部分后向输出的特征么........
		# bi_outputs = tf.reshape(tf.concat([fw_output, bw_output], 1), [-1, hidden_size * 2])
		# 将前向输出与后向输出相加
		bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')

		# print('bi_outputs: {}'.format(bi_outputs))
		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
		with tf.name_scope('fusion'):
			# 对于单向RNN, 我们只取最后一个时间步的输出
			# 原先active_outputs.shape=[batch_size, active_size, hidden_size]
			# active_size就是时间步的步数, 我们取最后一步, 就是-1
			active_outputs = active_outputs[:, -1, :]  # [batch_size, hidden_size]
			# 将两个神经网络的高层特征通过元素间相乘予以融合
			fusion_outputs = tf.multiply(bi_outputs, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.add(bi_outputs, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.concat([bi_outputs, active_outputs], axis=1, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = bi_outputs  # [batch_size, hidden_size]
			print('fusion_outputs.shape={}'.format(fusion_outputs.shape))  # (?, hidden_size)
		# 定义输出层
		with tf.name_scope('output'):
			# w_softmax = tf.get_variable('w_softmax', shape=[hidden_size * 2, num_classes])
			w_softmax = tf.get_variable('w_softmax', shape=[hidden_size, num_classes])
			b_softmax = tf.get_variable('b_softmax', shape=[num_classes])

			# l2_loss += tf.nn.l2_loss(w_softmax)
			# l2_loss += tf.nn.l2_loss(b_softmax)
			# logits = tf.nn.softmax(tf.matmul(fusion_outputs, w_softmax) + b_softmax)  # 上层输出与权重乘后加上偏置作为输出
			logits = tf.matmul(fusion_outputs, w_softmax) + b_softmax  # 上层输出与权重乘后加上偏置作为输出
			# logits = tf.matmul(bi_outputs, w_softmax) + b_softmax  # 意思是融合没用
			print('logits.shape()={}'.format(logits.shape))  # (?, 759), 759是开发者数量
			# 计算评估矩阵
			# predicted_classes = tf.argmax(logits, 1)  # 输出每个第一维度下, 第二维度中最大值的索引, outputs的维度=len(第一维度)
			# 在这里插入代价敏感矩阵与原先的概率相乘

		def new_layer(inputs, units, activation_function=None):
			'''
			尝试自定义一个新层
			:param inputs:
			:param units:
			:param activation_function:
			:return:
			'''
			weights = tf.Variable(tf.zeros([developers_size, developers_size]))
			biases = tf.Variable(tf.zeros([1, units])+0.1)
			# wx_plus_b = tf.matmul(inputs, weights) + biases
			inputs = tf.reshape(tf.tile(inputs, [1, developers_size]), shape=[batch_size, developers_size, developers_size])
			outputs = tf.reduce_sum(tf.multiply(inputs, weights),axis=1)     # 每行对应点乘，之后每列求和，化为[1, K]

			if activation_function is None:
				outputs = outputs
			else:
				outputs = activation_function(outputs)
			return outputs

		# with tf.name_scope('cost'):
		# # 	logits = new_layer(logits, developers_size)
		# 	logits = tf.layers.dense(inputs=logits, units=developers_size, use_bias=False)

		with tf.name_scope('loss'):

			# y = new_layer(logits, developers_size)
			l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
			# logits形如[d_0, d_1, ..., d_{r-1}, num_classes]
			y = tf.nn.softmax(logits)       # 通过softmax来归一化概率值, y=Tensor("loss/Softmax:0", shape=(32, 642), dtype=float32)
			# y = tf.layers.dense(inputs=logits, units=developers_size, use_bias=False)            # 加入全连接
			# y = tf.nn.softmax(logits)
			# print('y={}'.format(y))
			# if self.is_train == 1:    # 如果为1，说明是微调进程，为0说明是预训练进程，不做代价敏感调整,结果这个没用
			# 在这里插入代价敏感矩阵与原先的概率相乘
			# y = tf.multiply(y, 1-self.cost_matrix)
			y = tf.matmul(y, 1-self.cost_matrix)
			# y = tf.reshape(tf.tile(y, [1, developers_size]), shape=[batch_size, developers_size, developers_size])
			# y = tf.reduce_sum(tf.multiply(y, 1 - self.cost_matrix), axis=1)     # 每行对应点乘，之后每列求和，化为[1, K]
			# print(y)            # Tensor("loss/Sum:0", shape=(32, 642), dtype=float32)
			# y = tf.nn.softmax(y)        # 再次softmax，方便输入
			y = y/tf.reduce_sum(y, axis=1, keep_dims=True)          # 对惩罚加和之后的概率归一化,方便后面计算loss
			# 之后可能改造一下？
			one_labels = tf.one_hot(self.b_labels, depth=developers_size)       # 在计算交叉熵之前应该one-hot化
			# 如果我没猜错的话，这里应该用矩阵乘法而不是点乘
			# 2018-09-03 16:27:50 错了，应该用点乘，二货
			self.loss = (-tf.reduce_sum(tf.multiply(tf.to_float(one_labels), tf.log(y)))) / batch_size

			# self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=logits)
			# tf.losses.softmax_cross_entropy  # one-hot label
			self.loss = tf.reduce_mean(self.loss) + 0.001 * l2_loss
			print(self.loss)
			# predicted_classes = logits
			predicted_classes = y       # 这里之前写的好像有点儿问题。。。
			print(predicted_classes)


		with tf.name_scope('accuracy'):
			# accuracy = tf.metrics.accuracy(labels=b_labels, predictions=predicted_classes, name='acc_op')
			# _, top_1_indice = tf.nn.top_k(predicted_classes, k=1)
			# _, top_5_indice = tf.nn.top_k(predicted_classes, k=5)

			# self.b_labels = tf.cast(self.b_labels, dtype=tf.int64, name='labels_int64')
			# 只传top[0]出去的话, 外部接收的top总为nan
			# top_1 = tf.metrics.precision_at_top_k(labels=self.b_labels, predictions_idx=top_1_indice, k=1, name='top1')
			# top_5 = tf.metrics.precision_at_top_k(labels=self.b_labels, predictions_idx=top_5_indice, k=5, name='top5')

			bool_top_1 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=1, name='bool_top_1')
			bool_top_5 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=5, name='bool_top_5')

			#这里是为了检查都是哪些样本预测出错，所以获取了top5的索引
			_, self.top_5_indices = tf.nn.top_k(predicted_classes, k=5)
			self.acc_top_1 = tf.reduce_mean(tf.cast(bool_top_1, dtype=tf.float32), name="acc_top_1")
			self.acc_top_5 = tf.reduce_mean(tf.cast(bool_top_5, dtype=tf.float32), name="acc_top_5")
			print(self.acc_top_1)
			'''这里需要注意, 我单独把tf.metrics.precision_at_top_k这块代码摘出去测试过
				得到的topK其实并不是真实的topK, topK*K之后得到的数值才是真实的topK, 所以可以看到外面我获取到top_5之后,
				还乘了个5.
				所以后来我干脆废弃了tf.metrics.precision_at_top_k这种写法，因为实在玩不转，可能是我对API的理解有问题'''
			self.metrics = {
				'top_1': self.acc_top_1,
				'top_5': self.acc_top_5
			}
		# targets是索引位, 并不是predictions的具体值
		# 这里return的是包含布尔值的一个tensor
		# 这种方法似乎不行
		# top_1_op = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=1)
		# top_5_op = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=5)
		# self.metrics = {
		# 	'top_1': top_1_op,
		# 	'top_5': top_5_op}
