import tensorflow as tf
import os


class DBRNNA(object):
	def __init__(self, Flags, n_labels, vocabulary_size, cost_matrix=None, root_dir='./', lookup_table=None, wv_model=None, config=None):

		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		self.batch_size = tf.placeholder(dtype=tf.float32, shape=[], name='batch_size')
		self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')
		
		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence
		# self.batch_size = self.b_labels.shape[0]#Flags.batch_size
		self.n_layer = Flags.n_layer
		self.lr = Flags.lr
		self.l2 = Flags.l2
		self.basic_model = Flags.basic_model
		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		self.wv_model = wv_model

		self.config = config
		
		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, self.max_sentence + 2], name='batch')
		self.b_features, self.b_labels, self.b_sequence_lengths  =tf.split(self.batch, [self.max_sentence, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))
	
	def import_embedding(self):
		'''
		将外部导入的查找表作为tensorflow训练需要的查找表，注意设置成不可训练。
		TODO：当然也可以设置成可训练，这样子就把词向量作为初值了
		:return:
		'''
		with tf.name_scope('embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			embedding = tf.constant(self.lookup_table, name='embedding')
			# embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32, trainable=False)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  #
	
	def layer_one_hot(self):
		'''
			原始的one-hot编码单词
		'''
		with tf.name_scope('embedding'):
			self.inputs = tf.one_hot(self.b_features, depth=self.vocabulary_size, dtype=tf.float32)
			if self.residual:	# 启用残差网络
				self.inputs = tf.layers.dense(self.inputs, units=self.hidden_size, activation=tf.nn.relu)
				self.inputs = tf.nn.dropout(self.inputs, self.keep_prob)
	
	def layer_embedding(self):
		with tf.name_scope('embedding'):
			embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)

	def get_a_lstm_cell(self):
		cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size)
		cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.keep_prob)
		return cellD
	
	def layer_text_rnn(self):
		with tf.name_scope('bi_rnn'):
			lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs, sequence_length=self.b_sequence_lengths, dtype=tf.float32)
			print('outputs: {}'.format(outputs))
			if self.basic_model == 'DBRNNA':
				with tf.name_scope('attention'):
					# H = outputs[0] + outputs[1]
					# self.bi_outputs = self.layer_attention(H, self.max_sentence)	# sentence vector
					forward = self.layer_attention(outputs[0], self.max_sentence)
					backward = self.layer_attention(outputs[1], self.max_sentence)
				with tf.name_scope('merge'):
					r_forward = tf.concat([outputs[0][:,-1,:], forward], 1)	#[?, 2*embedding_size]
					r_backward = tf.concat([outputs[1][:,-1,:], backward], 1)
					bi_outputs = tf.concat([r_forward, r_backward], 1)		# [?, 4*embedding_size]
					print(bi_outputs)
			elif self.basic_model == 'DBRNNA-1':	# 取消Attention作用，使用最后一个时间步来merge
				with tf.name_scope('merge'):
					bi_outputs = tf.concat([outputs[0][:,-1,:], outputs[1][:,-1,:]], 1)		# [?, 2*embedding_size]
					print(bi_outputs)
			elif self.basic_model == 'DBRNNA-2':	# 使用attention之后代替最后一个时间步的来merge
				with tf.name_scope('attention'):
					# H = outputs[0] + outputs[1]
					# self.bi_outputs = self.layer_attention(H, self.max_sentence)	# sentence vector
					forward = self.layer_attention(outputs[0], self.max_sentence)
					backward = self.layer_attention(outputs[1], self.max_sentence)
				with tf.name_scope('merge'):
					bi_outputs = tf.concat([forward, backward], 1)		# [?, 2*embedding_size]
					print(bi_outputs)
			# else:
			# 	fw_output = tf.expand_dims(outputs[0], axis=-1)
			# 	bw_output = tf.expand_dims(outputs[1], axis=-1)
			# 	# 接最大池化
			# 	fw_output = tf.nn.max_pool(fw_output, ksize=[1, self.max_sentence, 1, 1], strides=[1, 1, 1, 1], padding='VALID')
			# 	bw_output = tf.nn.max_pool(bw_output, ksize=[1, self.max_sentence, 1, 1], strides=[1, 1, 1, 1], padding='VALID')

			# 	print('fw_output: {}'.format(fw_output))
			# 	print('bw_output: {}'.format(bw_output))

			# 	self.bi_outputs = tf.reshape(tf.concat([tf.reshape(fw_output, shape=(-1, self.hidden_size)), tf.reshape(bw_output, shape=(-1, self.hidden_size))], 1), [-1, 2*self.hidden_size]) # 拼接前向输出和后向输出
			# 	# 将前向输出与后向输出相加
			# 	# bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')
			# 	print('bi_outputs: {}'.format(self.bi_outputs))
			# 转换双向LSTM的outputs，to make them 的shape跟单向LSTM的相同，
			outputs = tf.layers.dense(bi_outputs, 1000, activation=tf.nn.relu)
			self.outputs = tf.nn.dropout(outputs, self.keep_prob)

	def layer_attention(self, H, T):
		'''
			基于
			Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification
			一文修改的attention结构
		'''
		# 一维query版基础attention
		W = tf.Variable(tf.random_normal([self.hidden_size], stddev=0.1), name='attention_w')	# (D, 1)
		M = tf.nn.tanh(H)	# (B, T, D)		batch major
		alpha = tf.matmul(tf.reshape(M, [-1, self.hidden_size]), tf.reshape(W, [-1, 1]))	#(B*T, D) × (D, 1)=(B*T, 1)
		alpha = tf.nn.softmax(tf.reshape(alpha, [-1, T]))	#(B, T)
		r = tf.matmul(tf.transpose(H, [0, 2, 1]), tf.expand_dims(alpha, -1))		# H.shape=(B,T,D)，alpha.shape=(B,T)，shape不对应，无法直接矩阵乘，通过tf.transpose将(B,T,D)->(B,D,T), 同(B,T,1)的alpha做矩阵乘
		r = tf.reshape(r, [-1, self.hidden_size])
		output = tf.nn.tanh(r)
		# output = tf.nn.dropout(output, self.keep_prob)	# 可以对attention的输出做Dropout处理
		output = tf.layers.batch_normalization(output, training=self.is_train)
		# tf.nn.batch_normalization

		## 二维Query版基础attention
		# Q = tf.Variable(tf.random_normal([T, self.hidden_size], stddev=0.1), name='attention_query')	# (T,D)
		# M = tf.nn.tanh(H)	# (B,T,D)
		# alpha = tf.matmul(tf.transpose(M, [1, 0, 2]), tf.expand_dims(Q, -1))	# (T, B, 1)
		# alpha = tf.transpose(alpha, [1, 0, 2])		# (B, T, 1)
		# alpha = tf.nn.softmax(tf.reshape(alpha, [-1, T]))	#(B, T)
		# r = tf.matmul(tf.transpose(H, [0, 2, 1]), tf.expand_dims(alpha, -1))		# H.shape=(B,T,D)，alpha.shape=(B,T)，shape不对应，无法直接矩阵乘，通过tf.transpose将(B,T,D)->(B,D,T), 同(B,T,1)的alpha做矩阵乘
		# r = tf.reshape(r, [-1, self.hidden_size])
		# output = tf.nn.tanh(r)
		return output
	

	def layer_output(self):
		with tf.name_scope('output'):
			w_softmax = tf.get_variable('w_softmax', shape=[1000, self.n_labels])
			b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
			self.logits = tf.matmul(self.outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
			print('logits={}'.format(self.logits))
			# self.origin_loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)

	def layer_loss(self):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接
			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss
			self.loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(self.loss))
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)		# 服务于BN
		# with tf.control_dependencies(update_ops):
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_2_indices = tf.nn.top_k(self.logits, k=2, name='top_2_indices')
			_, self.top_3_indices = tf.nn.top_k(self.logits, k=3, name='top_3_indices')
			_, self.top_4_indices = tf.nn.top_k(self.logits, k=4, name='top_4_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_2 = caculate_topK(self.top_2_indices, 2)
			self.acc_top_3 = caculate_topK(self.top_3_indices, 3)
			self.acc_top_4 = caculate_topK(self.top_4_indices, 4)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_2': self.acc_top_2,
				'top_3': self.acc_top_3,
				'top_4': self.acc_top_4,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
		
	def build(self):
		if self.lookup_table is None:
			if self.wv_model == 'one-hot':
				self.layer_one_hot()
			elif self.wv_model == 'embedding':
				print('embedding')
				self.layer_embedding()		# BP训练的embedding layer
		else:
			self.import_embedding()		# 引入预训练好的外部词向量，static，之后不再训练
		self.layer_text_rnn()
		self.layer_output()
		self.layer_loss()
		self.layer_acc()
		self.layer_optimzer()
		
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=5)     # 保存最后10个epoch的模型，将它们的平均值作为模型的最终正确率。
		
	
	
