import tensorflow as tf


class BiRNN_CNN(object):
	def __init__(self, sequence_length, num_classes, vocabulary_size, developers_size, embedding_size, hidden_size,
	             batch_size, active_size, layer_num, cost_matrix, filter_sizes, num_filters):
		# 首先输入占位符
		self.place_dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		# 第一维设为None是因为batch-size不确定, 因为最后一个batch的大小往往不是batch_size
		self.b_features = tf.placeholder(dtype=tf.int32, shape=[None, sequence_length], name='place_features')
		self.b_labels = tf.placeholder(dtype=tf.int64, shape=[None], name='place_labels')
		# 活跃度提取部分的features
		self.b_active_features = tf.placeholder(dtype=tf.int32, shape=[None, active_size], name='batch_active_features')
		self.b_sequence_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_sequence_lengths')
		self.b_active_actual_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_active_actual_lengths')
		self.cost_matrix = tf.constant(cost_matrix, dtype=tf.float32, name='cost_matrix')
		# self.cost_matrix = tf.placeholder(shape=[developers_size], dtype=tf.float32, name='cost_matrix')
		self.is_train = tf.placeholder(tf.bool, name='is_train')
		l2_loss = tf.constant(0.0)

		with tf.name_scope('embedding'):
			# 单词embedding
			# 这个是embedding表?
			# embedding: 相对one-hot来说, 是降维了....应该是这么理解的
			embedding = tf.get_variable('embedding', [vocabulary_size, embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			# hint: reduce features length
			# print('这是input的shape: {}'.format(self.inputs.shape))    # (?, 400, 200)
			# 为了节省内存, 直到输入模型的时候, 单词才被转换成one-hot编码
			# 但是运行过程中发现, 就算是这样, 仍然发生了OOM事件, 只能尽量减少三个维度各自的值.
			# self.inputs = tf.one_hot(self.b_features, depth=vocabulary_size)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)

			# 同MultiRNNCell组合使用, 否则会出错
		def get_a_lstm_cell():
			# hint: tf.nn.rnn_cell.LSTM()
			cell = tf.nn.rnn_cell.GRUCell(hidden_size)
			cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.place_dropout_keep_prob)
			return cellD

		with tf.name_scope('rnn_active'):
			print(self.b_active_features)
			inputs_active = tf.one_hot(self.b_active_features, depth=developers_size, dtype=tf.float32)
			print(inputs_active)
			cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)],
			                                            state_is_tuple=True)
			active_outputs, _ = tf.nn.dynamic_rnn(cells, inputs=inputs_active, dtype=tf.float32,
			                                      sequence_length=self.b_active_actual_lengths)
			print('active_outputs:{}'.format(active_outputs))  # (?, 25, hidden_size)

		lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)
		lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([get_a_lstm_cell() for _ in range(layer_num)], state_is_tuple=True)

		# 相当于权重初始化
		# 这里的运行机制是什么, 为什么b_labels获取到的第一维是None, 这么说, 在实际run之前, 都必须初始化完毕喽
		# 所以这里必须上实值: batch_size
		# hint:batch=tf.shape(self.b_features)[0]
		initial_state_fw = lstm_fw_cells.zero_state(batch_size, tf.float32)
		initial_state_bw = lstm_bw_cells.zero_state(batch_size, tf.float32)
		outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs,
		                                              sequence_length=self.b_sequence_lengths,
		                                              dtype=tf.float32,
		                                              initial_state_fw=initial_state_fw,
		                                              initial_state_bw=initial_state_bw
		                                              )

		print('outputs: {}'.format(outputs))
		# 池化层找出了一个最大的时间步输出
		fw_output = tf.layers.max_pooling1d(outputs[0], pool_size=sequence_length, strides=1)
		bw_output = tf.layers.max_pooling1d(outputs[1], pool_size=sequence_length, strides=1)
		print('fw_output: {}'.format(fw_output))
		print('bw_output: {}'.format(bw_output))

		# 拼接前向输出和后向输出
		# 直接拼接前向输出与后向输出不会损失一部分后向输出的特征么........
		# bi_outputs = tf.reshape(tf.concat([fw_output, bw_output], 1), [-1, hidden_size * 2])
		# 将前向输出与后向输出相加
		bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')

		pooled_outputs = []
		for i, filter_size in enumerate(filter_sizes):
			with tf.name_scope('conv-maxpool-%s' % filter_size):
				filter_shape = [filter_size, embedding_size, 1, num_filters]
				W_1 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W_1')
				b_1 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name='b_1')

				conv_1 = tf.nn.conv2d(tf.expand_dims(self.inputs, -1), W_1, strides=[1,1,1,1], padding='VALID',name='conv_1')

				h_1 = tf.layers.batch_normalization(conv_1, training=self.is_train, name='bn_{}'.format(filter_size))
				h_1 = tf.nn.sigmoid(tf.nn.bias_add(h_1, b_1), name='sigmoid_1')

				pooled = tf.nn.max_pool(h_1, ksize=[1, sequence_length-filter_size + 1, 1, 1], strides=[1,1,1,1], padding='VALID', name='pool')
				print(pooled)
				pooled_outputs.append(pooled)
		num_filters_total = num_filters * len(filter_sizes)
		self.h_pool = tf.concat(pooled_outputs, 3)
		self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

		self.fcn_1 = tf.layers.dense(inputs=self.h_pool_flat, units=hidden_size, activation=tf.nn.sigmoid)


		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
		with tf.name_scope('fusion'):
			# 对于单向RNN, 我们只取最后一个时间步的输出
			active_outputs = active_outputs[:, -1, :]  # [batch_size, hidden_size]
			# 将三个神经网络的高层特征通过元素间相乘予以融合
			fusion_outputs = tf.multiply(bi_outputs, self.fcn_1, name='fusion_outputs')

			fusion_outputs = tf.multiply(fusion_outputs, active_outputs)  # [batch_size, hidden_size]
			# fusion_outputs = tf.add(bi_outputs, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.concat([bi_outputs, active_outputs], axis=1, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = bi_outputs  # [batch_size, hidden_size]
			print('fusion_outputs.shape={}'.format(fusion_outputs.shape))  # (?, hidden_size)
		# 定义输出层
		with tf.name_scope('output'):
			w_softmax = tf.get_variable('w_softmax', shape=[hidden_size, num_classes])
			b_softmax = tf.get_variable('b_softmax', shape=[num_classes])

			logits = tf.matmul(fusion_outputs, w_softmax) + b_softmax  # 上层输出与权重乘后加上偏置作为输出
			# logits = tf.matmul(bi_outputs, w_softmax) + b_softmax  # 意思是融合没用
			print('logits.shape()={}'.format(logits.shape))  # (?, 759), 759是开发者数量
			# 计算评估矩阵
			# predicted_classes = tf.argmax(logits, 1)  # 输出每个第一维度下, 第二维度中最大值的索引, outputs的维度=len(第一维度)

		with tf.name_scope('loss'):
			l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
			# logits形如[d_0, d_1, ..., d_{r-1}, num_classes]
			y = tf.nn.softmax(logits)       # 通过softmax来归一化概率值, y=Tensor("loss/Softmax:0", shape=(32, 642), dtype=float32)

			# 在这里插入代价敏感矩阵与原先的概率相乘
			# y = tf.multiply(y, 1-self.cost_matrix)
			y = tf.matmul(y, 1-self.cost_matrix)
			# y = tf.reshape(tf.tile(y, [1, developers_size]), shape=[batch_size, developers_size, developers_size])
			# print(y)            # Tensor("loss/Sum:0", shape=(32, 642), dtype=float32)
			y = y/tf.reduce_sum(y, axis=1, keep_dims=True)          # 对惩罚加和之后的概率归一化,方便后面计算loss
			# 之后可能改造一下？
			one_labels = tf.one_hot(self.b_labels, depth=developers_size)       # 在计算交叉熵之前应该one-hot化
			# 如果我没猜错的话，这里应该用矩阵乘法而不是点乘
			# 2018-09-03 16:27:50 错了，应该用点乘，二货
			self.loss = (-tf.reduce_sum(tf.multiply(tf.to_float(one_labels), tf.log(y)))) / batch_size

			# self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=logits)
			self.loss = tf.reduce_mean(self.loss) + 0.001 * l2_loss
			print(self.loss)
			predicted_classes = y       # 这里之前写的好像有点儿问题。。。
			print(predicted_classes)


		with tf.name_scope('accuracy'):
			bool_top_1 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=1, name='bool_top_1')
			bool_top_5 = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=5, name='bool_top_5')

			#这里是为了检查都是哪些样本预测出错，所以获取了top5的索引
			_, self.top_5_indices = tf.nn.top_k(predicted_classes, k=5)
			self.acc_top_1 = tf.reduce_mean(tf.cast(bool_top_1, dtype=tf.float32), name="acc_top_1")
			self.acc_top_5 = tf.reduce_mean(tf.cast(bool_top_5, dtype=tf.float32), name="acc_top_5")
			print(self.acc_top_1)
			'''这里需要注意, 我单独把tf.metrics.precision_at_top_k这块代码摘出去测试过
				得到的topK其实并不是真实的topK, topK*K之后得到的数值才是真实的topK, 所以可以看到外面我获取到top_5之后,
				还乘了个5.
				所以后来我干脆废弃了tf.metrics.precision_at_top_k这种写法，因为实在玩不转，可能是我对API的理解有问题'''
			self.metrics = {
				'top_1': self.acc_top_1,
				'top_5': self.acc_top_5
			}
		# targets是索引位, 并不是predictions的具体值
		# 这里return的是包含布尔值的一个tensor
		# 这种方法似乎不行
		# top_1_op = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=1)
		# top_5_op = tf.nn.in_top_k(predictions=predicted_classes, targets=self.b_labels, k=5)
		# self.metrics = {
		# 	'top_1': top_1_op,
		# 	'top_5': top_5_op}
