import tensorflow as tf
import os


class BiBasicText(object):
	def __init__(self, Flags, n_labels, vocabulary_size, cost_matrix=None, root_dir='./', lookup_table=None, wv_model=None, config=None):

		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		self.batch_size = tf.placeholder(dtype=tf.float32, shape=[], name='batch_size')
		self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')


		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence
		# self.batch_size = self.b_labels.shape[0]#Flags.batch_size
		self.n_layer = Flags.n_layer
		self.lr = Flags.lr
		self.l2 = Flags.l2
		# self.dever_embedding = Flags.dever_embedding
		# self.module = Flags.module
		# self.primary = Flags.primary
		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		self.wv_model = wv_model
		self.attention = False
		self.config = config
		self.residual = Flags.residual
		self.basic_model = Flags.basic_model
		self.fusion = Flags.fusion
		
		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, self.max_sentence + 3], name='batch')
		self.b_features, self.b_labels, self.b_sequence_lengths, self.b_pcs  =tf.split(self.batch, [self.max_sentence, 1, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))
		self.b_pcs = tf.reshape(tf.cast(self.b_pcs, dtype=tf.int32), shape=(-1,))

	def layer_embedding(self):
		with tf.name_scope('embedding'):
			embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)
	
	def import_embedding(self):
		'''
		将外部导入的查找表作为tensorflow训练需要的查找表，注意设置成不可训练。
		TODO：当然也可以设置成可训练，这样子就把词向量作为初值了
		:return:
		'''
		with tf.name_scope('embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			embedding = tf.constant(self.lookup_table, name='embedding')
			# embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32, trainable=False)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  #
	
	def layer_one_hot(self):
		'''
			原始的one-hot编码单词
		'''
		with tf.name_scope('embedding'):
			self.inputs = tf.one_hot(self.b_features, depth=self.vocabulary_size, dtype=tf.float32)
			if self.residual:	# 启用残差网络
				self.inputs = tf.layers.dense(self.inputs, units=self.hidden_size, activation=tf.nn.relu)
				self.inputs = tf.nn.dropout(self.inputs, self.keep_prob)

	def get_a_lstm_cell(self):
		cell = tf.nn.rnn_cell.GRUCell(self.hidden_size)
		if self.residual:	# 启用残差网络
			cell = tf.nn.rnn_cell.ResidualWrapper(cell)
		cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.keep_prob)
		return cellD
	
	def layer_text_rnn(self):
		with tf.name_scope('bi_rnn'):
			lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs, sequence_length=self.b_sequence_lengths, dtype=tf.float32)
			print('outputs: {}'.format(outputs))
		return outputs
	
	def layer_reduction_with_attention(self, outputs):
		with tf.name_scope('attention'):
			H = outputs[0] + outputs[1]
			bi_outputs = self.layer_attention(H, self.max_sentence)	# sentence vector
			with tf.name_scope('transform'):  # attention的时候好像不需要转换
				bi_outputs = self._custom_dense(inputs=bi_outputs, units=self.hidden_size, activation=tf.nn.relu)
		return bi_outputs
	
	def layer_reduction_with_max_pooling(self, outputs):
		# if self.basic_model == 'BiBasicText':
		fw_output = tf.expand_dims(outputs[0], axis=-1)
		bw_output = tf.expand_dims(outputs[1], axis=-1)
		# 接最大池化
		fw_output = tf.nn.max_pool(fw_output, ksize=[1, self.max_sentence, 1, 1], strides=[1, 1, 1, 1], padding='VALID')
		bw_output = tf.nn.max_pool(bw_output, ksize=[1, self.max_sentence, 1, 1], strides=[1, 1, 1, 1], padding='VALID')

		print('fw_output: {}'.format(fw_output))
		print('bw_output: {}'.format(bw_output))

		bi_outputs = tf.reshape(tf.concat([tf.reshape(fw_output, shape=(-1, self.hidden_size)), tf.reshape(bw_output, shape=(-1, self.hidden_size))], 1), [-1, 2*self.hidden_size]) # 拼接前向输出和后向输出
		# 将前向输出与后向输出相加
		# bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')
		print('bi_outputs: {}'.format(bi_outputs))
		with tf.name_scope('transform'):  # transform bi_outputs to the size: hidden_size, in order to multiply active outputs
			bi_outputs = self._custom_dense(inputs=bi_outputs, units=self.hidden_size, activation=tf.nn.relu)
		return bi_outputs
	def layer_reduction_with_k_max_pooling(self, outputs):
		# elif self.basic_model == 'BiBasicText-1':
		fw_output = tf.reshape(tf.nn.top_k(tf.transpose(outputs[0], [0, 2, 1]), k=3)[0], [-1, 3*self.embedding_size])	# (batch_size, k*embedding_size)
		bw_output = tf.reshape(tf.nn.top_k(tf.transpose(outputs[1], [0, 2, 1]), k=3)[0], [-1, 3*self.embedding_size])
		bi_outputs = tf.reshape(tf.concat([fw_output, bw_output], 1), [-1, 6*self.hidden_size]) # 拼接前向输出和后向输出
		with tf.name_scope('transform'):  # transform bi_outputs to the size: hidden_size, in order to multiply active outputs
			bi_outputs = self._custom_dense(inputs=bi_outputs, units=self.hidden_size, activation=tf.nn.relu)
		return bi_outputs
	
	def layer_attention(self, H, T):
		'''
			基于
			Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification
			一文修改的attention结构
		'''
		# 一维query版基础attention
		# W = tf.Variable(tf.random_normal([self.hidden_size], stddev=0.1), name='attention_w')
		# M = tf.nn.tanh(H)	# (B, T, D)		batch major
		# alpha = tf.matmul(tf.reshape(M, [-1, self.hidden_size]), tf.reshape(W, [-1, 1]))	#(B*T, D) × (D, 1)=(B*T, 1)
		# alpha = tf.nn.softmax(tf.reshape(alpha, [-1, T]))	#(B, T)
		# r = tf.matmul(tf.transpose(H, [0, 2, 1]), tf.expand_dims(alpha, -1))		# H.shape=(B,T,D)，alpha.shape=(B,T)，shape不对应，无法直接矩阵乘，通过tf.transpose将(B,T,D)->(B,D,T), 同(B,T,1)的alpha做矩阵乘
		# r = tf.reshape(r, [-1, self.hidden_size])
		# output = tf.nn.tanh(r)
		# output = tf.nn.dropout(output, self.keep_prob)	# 可以对attention的输出做Dropout处理

		## 二维Query版基础attention
		Q = tf.Variable(tf.random_normal([T, self.hidden_size], stddev=0.1), name='attention_query')	# (T,D)
		M = tf.nn.tanh(H)	# (B,T,D)
		alpha = tf.matmul(tf.transpose(M, [1, 0, 2]), tf.expand_dims(Q, -1))	# (T, B, 1)
		alpha = tf.transpose(alpha, [1, 0, 2])		# (B, T, 1)
		alpha = tf.nn.softmax(tf.reshape(alpha, [-1, T]))	#(B, T)
		r = tf.matmul(tf.transpose(H, [0, 2, 1]), tf.expand_dims(alpha, -1))		# H.shape=(B,T,D)，alpha.shape=(B,T)，shape不对应，无法直接矩阵乘，通过tf.transpose将(B,T,D)->(B,D,T), 同(B,T,1)的alpha做矩阵乘
		r = tf.reshape(r, [-1, self.hidden_size])
		output = tf.nn.tanh(r)
		return output
	
	def layer_pcs_with_BiDense(self):
		with tf.name_scope('pcs'):
			# if self.basic_model == 'BiBasicText':
			b_pcs = tf.one_hot(self.b_pcs, depth=self.config.pcs_size, dtype=tf.float32)
			b_pcs = tf.layers.dense(b_pcs, units=1000, activation=tf.nn.relu)
			output_pcs = tf.layers.dense(b_pcs, units=self.embedding_size, activation=tf.nn.relu)
		return output_pcs

	def layer_pcs_with_embedding(self):
		with tf.name_scope('pcs'):
			# if self.basic_model == "BiBasicText-1":
			lookup_table = tf.Variable(tf.random_normal([self.config.pcs_size, 1000]))
			b_pcs = tf.nn.embedding_lookup(lookup_table, self.b_pcs)
			output_pcs = tf.layers.dense(b_pcs, units=self.embedding_size, activation=tf.nn.relu)
		return output_pcs

	def layer_pcs_with_linearDense(self):
		with tf.name_scope('pcs'):
			b_pcs = tf.one_hot(self.b_pcs, depth=self.config.pcs_size, dtype=tf.float32)
				# dense比不上embedding，我觉得问题应该是出在激活函数上，那问题来了，relu在大于0处是线性的，跟恒等激活函数一样，
				# 二者唯一不一样的地方在于小于0的地方，恒等激活函数还是恒等，但是relu是归0
				# 所以说，有负数的话表现更好一些？
			b_pcs = tf.layers.dense(b_pcs, units=1000, activation=None)
			output_pcs = tf.layers.dense(b_pcs, units=self.embedding_size, activation=tf.nn.relu)
		return output_pcs

	def layer_fusion(self, text_outputs, pc_outputs):
		with tf.name_scope('fusion'):		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
			if self.fusion == 'mul':
				# 将两个神经网络的高层特征通过元素间相乘予以融合
				outputs = tf.multiply(text_outputs, pc_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			elif self.fusion == 'add':
				outputs = tf.add(text_outputs, pc_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			elif self.fusion == 'concat':
				outputs = tf.concat([text_outputs, pc_outputs], axis=1, name='fusion_outputs')  # [batch_size, hidden_size]
			elif self.fusion == 'maxpooling':
				outputs = tf.stack([text_outputs, pc_outputs], 1)
				outputs = tf.reduce_max(outputs, 1)		# 最大池化
				outputs = tf.reshape(outputs, (-1, self.hidden_size))		# [batch_size, hidden_size]
			elif self.fusion == 'attention':
				outputs = tf.stack([text_outputs, pc_outputs], 1)	#(B, 2, D)
				outputs = self.layer_attention(outputs, 2)
			elif self.fusion == 'meanpooling':
				outputs = tf.stack([text_outputs, pc_outputs], 1)	#(B, 2, D)
				outputs = tf.expand_dims(outputs, -1)
				outputs = tf.nn.avg_pool(outputs,  ksize=[1, 2, 1, 1], strides=[1,1,1,1], padding='VALID')	# (B, 1, 1, D)
				outputs = tf.reshape(outputs, (-1, self.hidden_size))		# [batch_size, hidden_size]
			
			# fusion_outputs = transform_bi_outputs
			print('fusion_outputs={}'.format(outputs))
		# with tf.name_scope('dropout'):
		# 	self.l_dropout = tf.layers.dropout(self.fusion_outputs, self.keep_prob)
		return outputs

	def layer_output(self, outputs):
		with tf.name_scope('output'):
			if self.fusion == 'mul':
				w_softmax = tf.get_variable('w_softmax', shape=[self.hidden_size, self.n_labels])
				b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
				self.logits = tf.matmul(outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
				print('logits={}'.format(self.logits))
			else:
				self.logits = tf.layers.dense(outputs, units=self.n_labels, activation=None)


	def layer_loss(self):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接

			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss

			self.loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(self.loss))
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_2_indices = tf.nn.top_k(self.logits, k=2, name='top_2_indices')
			_, self.top_3_indices = tf.nn.top_k(self.logits, k=3, name='top_3_indices')
			_, self.top_4_indices = tf.nn.top_k(self.logits, k=4, name='top_4_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_2 = caculate_topK(self.top_2_indices, 2)
			self.acc_top_3 = caculate_topK(self.top_3_indices, 3)
			self.acc_top_4 = caculate_topK(self.top_4_indices, 4)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_2': self.acc_top_2,
				'top_3': self.acc_top_3,
				'top_4': self.acc_top_4,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
	def _custom_dense(self, inputs, units, activation):
		'''
			根据传入的不同字符串来决定调用什么类型的激活函数。
			note that tf1.12会自动将str格式的激活函数转成callable的，遗憾的是，服务器上用的是tf1.8，没有这个功能
			所以重写了这个方法。
		'''
		# if activation == 'relu':
		# 	return tf.layers.dense(inputs, units, tf.nn.relu)
		# elif activation == 'sigmoid':
		# 	return tf.layers.dense(inputs, units, tf.nn.sigmoid)
		# elif activation == 'tanh':
		# 	return tf.layers.dense(inputs, units, tf.nn.tanh)
		# elif activation == "swish":
		return tf.layers.dense(inputs, units, activation)

		
	def build(self):
		# if 'text' in self.module:		# 引入文本模块
		if self.lookup_table is None:
			if self.wv_model == 'one-hot':
				self.layer_one_hot()
			elif self.wv_model == 'embedding':
				self.layer_embedding()		# BP训练的embedding layer
		else:
			self.import_embedding()		# 引入预训练好的外部词向量，static，之后不再训练
		outputs = self.layer_text_rnn()
		if self.basic_model == 'BiBasicText':
			bi_outputs = self.layer_reduction_with_max_pooling(outputs)
			output_pcs = self.layer_pcs_with_BiDense()
		elif self.basic_model == 'BiBasicText-1':
			bi_outputs = self.layer_reduction_with_k_max_pooling(outputs)
			output_pcs = self.layer_pcs_with_embedding()
		elif self.basic_model == 'BiBasicText-2':
			bi_outputs = self.layer_reduction_with_max_pooling(outputs)
			output_pcs = self.layer_pcs_with_linearDense()
		elif self.basic_model == 'BiBasicText-3':
			bi_outputs = self.layer_reduction_with_max_pooling(outputs)
			output_pcs = self.layer_pcs_with_embedding()
		outputs = self.layer_fusion(bi_outputs, output_pcs)
		self.layer_output(outputs)
		self.layer_loss()
		self.layer_optimzer()
		self.layer_acc()
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=5)     # 保存最后10个epoch的模型，将它们的平均值作为模型的最终正确率。
		
	
	
