import tensorflow as tf
import os
import numpy as np


class Transformer(object):
	def __init__(self, Flags, n_labels, vocabulary_size, root_dir='./', lookup_table=None, wv_model=None, config=None):

		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		self.batch_size = tf.placeholder(dtype=tf.int32, name='batch_size')
		self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')

		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence

		self.n_layer = Flags.n_layer		# 参数功能重复了
		self.lr = Flags.lr
		self.l2 = Flags.l2

		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		self.wv_model = wv_model
		self.config = config
		self.Flags = Flags

		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, self.max_sentence + 2], name='batch')
		self.b_features, self.b_labels, self.b_sequence_lengths =tf.split(self.batch, [self.max_sentence, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))

	def layer_embedding(self, sequenceLen, embeddingSize, all_size, inputs):
		'''
			transformer中词的embedding = word‘s trainable embedding + position embedding
			该方法中我们让word's embedding是可训练的，position embedding是原始论文中的sin和cos曲线。
		'''
		with tf.name_scope('embedding'):
			embedding = tf.Variable(tf.random_normal([all_size, embeddingSize], stddev=0.1), dtype=tf.float32, name="lookup")
			word_embedding = tf.nn.embedding_lookup(embedding, inputs)
			print('word_embedding:', word_embedding)
			position_embedding = self._position_embedding(sequenceLen, embeddingSize)
			perfect_embedding = word_embedding + position_embedding		# 仿照论文，对两个embedding进行加和
			# self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是perfect_embedding的shape: {}'.format(perfect_embedding.shape))  # (?, 400, 19704)

			return word_embedding, perfect_embedding
	
	def _position_embedding(self, sequenceLen, embeddingSize):
		with tf.name_scope('position_embedding'):
			print(sequenceLen)
			print(self.batch_size)
			# 生成位置的索引，并扩张到batch中所有的样本上
			positionIndex = tf.tile(tf.expand_dims(tf.range(sequenceLen), 0), [self.Flags.batch_size, 1])	# 这个的地位相当于我们输入的one-hot了，只不过因为是顺序one-hot，所以不需要外部传入

			# 根据正弦和余弦函数来获得每个位置上的embedding的第一部分
			positionEmbedding = np.array([[pos / np.power(10000, (i-i%2) / embeddingSize) for i in range(embeddingSize)] 
										for pos in range(sequenceLen)])

			# 然后根据奇偶性分别用sin和cos函数来包装
			positionEmbedding[:, 0::2] = np.sin(positionEmbedding[:, 0::2])
			positionEmbedding[:, 1::2] = np.cos(positionEmbedding[:, 1::2])

			# 将positionEmbedding转换成tensor的格式
			positionEmbedding_ = tf.cast(positionEmbedding, dtype=tf.float32)

			# 得到三维的矩阵[batchSize, sequenceLen, embeddingSize]
			positionEmbedded = tf.nn.embedding_lookup(positionEmbedding_, positionIndex)
			print('positionEmbedded:{}'.format(positionEmbedded))
		return positionEmbedded

	def import_embedding(self, sequenceLen, embeddingSize, all_size):
		'''
		transformer中词的embedding = word‘s pretrain embedding(如 word2vec、fasttext) + position embedding
		这个只能给text网络使用
		:return:
		'''
		with tf.name_scope('embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			embedding_table = tf.constant(self.lookup_table, name='embedding')
			word_embedding = tf.nn.embedding_lookup(embedding_table, self.b_features)
			position_embedding = self._position_embedding(sequenceLen, embeddingSize)
			perfect_embedding = word_embedding + position_embedding		# 仿照论文，对两个embedding进行加和
			# 不使用原论文提出的正弦余弦位置编码方式，怀疑增加了模型复杂度，可以使用word embedding跟one-hot位置编码拼接的方式来做这个事儿
			# lookupTable = tf.Variable(tf.cast(self.lookup_table, dtype=tf.float32, name="embedding") ,name="embedding")
			# embedded = tf.nn.embedding_lookup(lookupTable, inputs)
			# embeddedWords = tf.concat([embedded, positionEmbedding], -1)		# 把one-hot式的position embedding编码和one-hot式的word embedding直接拼接

			print('这是perfect_embedding的shape: {}'.format(perfect_embedding.shape))  #
		return word_embedding, perfect_embedding
	
	def layer_one_hot(self, sequenceLen, inputs, all_size):
		'''
			单词编码使用one-hot来代替， 
			one-hot式样的position embedding，
			跟position embedding拼接使用(据说在小数据集上比较好用)，或者把position embedding扩大成跟one-hot一样的样子
		'''
		with tf.name_scope('embedding'):
			positionEmbedding = tf.constant(self.fixedPositionEmbedding(self.b_features.get_shape()[0].value, sequenceLen))		# one-hot式样的position embedding 不可训练
			embedded = tf.one_hot(inputs, depth=all_size, dtype=tf.float32)
			# lookupTable = tf.Variable(tf.cast(positionEmbedding, dtype=tf.float32, name="embedding") ,name="embedding")
			# embedded = tf.nn.embedding_lookup(lookupTable, inputs)
			embeddedWords = tf.concat([embedded, positionEmbedding], -1)		# 把one-hot式的position embedding编码和one-hot式的word embedding直接拼接
		return None, embeddedWords
	
	def get_a_lstm_cell(self):
		cell = tf.nn.rnn_cell.GRUCell(self.hidden_size)
		cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.keep_prob)
		return cellD

	def layer_text(self, word_embedding, perfect_embedding):
		self.embeddedWords = perfect_embedding
		with tf.name_scope('text_transformer'):
			for i in range(self.Flags.n_blocks):
				with tf.name_scope('transformer-{}'.format(i+1)):
					multiHeadAtt = self._multiheadAttention(rawKeys=self.b_features, queries=self.embeddedWords, keys=self.embeddedWords, numHeads=self.Flags.n_heads)	# [batch_size, sequence_length, embedding_size]
					# 维度[batch_size, sequence_length, embedding_size]
					self.embeddedWords = self._feedForward(multiHeadAtt, [self.config.filters, self.embedding_size])	#FIXME:这个卷积核数量不对啊，这怎么可能跑得起来
			# outputs = tf.reshape(self.embeddedWords, [-1, self.max_sentence * (self.embedding_size)])	# 这个shape有点诡异啊
			# outputSize = outputs.get_shape()[-1].value
			outputs=tf.reduce_max(self.embeddedWords, axis=1)		# 用最大池化，降低过拟合
			with tf.name_scope("dropout"):
				self.outputs = tf.nn.dropout(outputs, keep_prob=self.keep_prob)
			# with tf.name_scope('transform'):  # transform bi_outputs to the size: hidden_size, in order to multiply active outputs
			# 	if self.primary == 'text':
			# 		self.transform_outputs = tf.layers.dense(inputs=outputs, units=self.hidden_size, activation=tf.nn.relu)

	def layer_output(self):
		with tf.name_scope('output'):
			self.logits = tf.layers.dense(self.outputs, self.n_labels, activation=None)
			# w_softmax = tf.get_variable('w_softmax', shape=[self.hidden_size, self.n_labels])
			# b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
			# self.logits = tf.matmul(self.fusion_outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
			print('logits={}'.format(self.logits))

	def layer_loss(self):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接

			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss
			# self.loss = tf.reduce_mean(losses) +  self.l2_loss + tf.reduce_mean(origin_loss)
			self.loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(self.loss))
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_2_indices = tf.nn.top_k(self.logits, k=2, name='top_2_indices')
			_, self.top_3_indices = tf.nn.top_k(self.logits, k=3, name='top_3_indices')
			_, self.top_4_indices = tf.nn.top_k(self.logits, k=4, name='top_4_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_2 = caculate_topK(self.top_2_indices, 2)
			self.acc_top_3 = caculate_topK(self.top_3_indices, 3)
			self.acc_top_4 = caculate_topK(self.top_4_indices, 4)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_2': self.acc_top_2,
				'top_3': self.acc_top_3,
				'top_4': self.acc_top_4,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
	def _multiheadAttention(self, rawKeys, queries, keys, numUnits=None, causality=False, scope="multiheadAttention", numHeads=None):
		# numHeads = self.config.numHeads
		keepProb = self.config.attention_keepProb
		if numUnits is None:  # 若是没传入值，直接去输入数据的最后一维，即embedding size.
			numUnits = queries.get_shape().as_list()[-1]
		# tf.layers.dense可以做多维tensor数据的非线性映射，在计算self-Attention时，一定要对这三个值进行非线性映射，
		# 其实这一步就是论文中Multi-Head Attention中的对分割后的数据进行权重映射的步骤，我们在这里先映射后分割，原则上是一样的。
		# Q, K, V的维度都是[batch_size, sequence_length, embedding_size]
		Q = tf.layers.dense(queries, numUnits, activation=tf.nn.relu)
		K = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)
		V = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)

		# 将数据按最后一维分割成num_heads个, 然后按照第一维拼接
		# Q, K, V 的维度都是[batch_size * numHeads, sequence_length, embedding_size/numHeads]
		Q_ = tf.concat(tf.split(Q, numHeads, axis=-1), axis=0) 
		K_ = tf.concat(tf.split(K, numHeads, axis=-1), axis=0) 
		V_ = tf.concat(tf.split(V, numHeads, axis=-1), axis=0)

		# 计算keys和queries之间的点积，维度[batch_size * numHeads, queries_len, key_len], 后两维是queries和keys的序列长度
		similary = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))

		# 对计算的点积进行缩放处理，除以向量长度的根号值
		scaledSimilary = similary / (K_.get_shape().as_list()[-1] ** 0.5)
		print(rawKeys)
		# 利用tf，tile进行张量扩张， 维度[batch_size * numHeads, keys_len] keys_len = keys 的序列长度
		keyMasks = tf.tile(rawKeys, [numHeads, 1]) 

		# 增加一个维度，并进行扩张，得到维度[batch_size * numHeads, queries_len, keys_len]
		keyMasks = tf.tile(tf.expand_dims(keyMasks, 1), [1, tf.shape(queries)[1], 1])
		print('keyMasks:{}'.format(keyMasks))
		# tf.ones_like生成元素全为1，维度和scaledSimilary相同的tensor, 然后得到负无穷大的值
		paddings = tf.ones_like(scaledSimilary) * (-2 ** (32 + 1))		# 得到一个负无穷大的值
		print('paddings:{}'.format(paddings))
		# tf.where(condition, x, y),condition中的元素为bool值，其中对应的True用x中的元素替换，对应的False用y中的元素替换
		# 因此condition,x,y的维度是一样的。下面就是keyMasks中的值为0就用paddings中的值替换
		maskedSimilary = tf.where(tf.equal(keyMasks, 0), paddings, scaledSimilary) # 维度[batch_size * numHeads, queries_len, key_len]
		print('maskedSimilary:{}'.format(maskedSimilary))
		# 通过softmax计算权重系数，维度 [batch_size * numHeads, queries_len, keys_len]
		weights = tf.nn.softmax(maskedSimilary)

		# 加权和得到输出值, 维度[batch_size * numHeads, sequence_length, embedding_size/numHeads]
		outputs = tf.matmul(weights, V_)

		# 将多头Attention计算的得到的输出重组成最初的维度[batch_size, sequence_length, embedding_size]
		outputs = tf.concat(tf.split(outputs, numHeads, axis=0), axis=2)
		
		outputs = tf.nn.dropout(outputs, keep_prob=keepProb)

		# 对每个subLayers建立残差连接，即H(x) = F(x) + x
		outputs += queries
		# normalization 层
		outputs = self._layerNormalization(outputs)
		return outputs
	
	def _layerNormalization(self, inputs, scope="layerNorm"):
		# LayerNorm层和BN层有所不同
		epsilon = self.config.epsilon

		inputsShape = inputs.get_shape() # [batch_size, sequence_length, embedding_size]

		paramsShape = inputsShape[-1:]

		# LayerNorm是在最后的维度上计算输入的数据的均值和方差，BN层是考虑所有维度的
		# mean, variance的维度都是[batch_size, sequence_len, 1]
		mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)

		beta = tf.Variable(tf.zeros(paramsShape))

		gamma = tf.Variable(tf.ones(paramsShape))
		normalized = (inputs - mean) / ((variance + epsilon) ** .5)
		outputs = gamma * normalized + beta

		return outputs
	
	def _feedForward(self, inputs, filters, scope="multiheadAttention"):
		# 在这里的前向传播采用卷积神经网络
		
		# 内层
		params = {"inputs": inputs, "filters": filters[0], "kernel_size": 1,
				  "activation": tf.nn.relu, "use_bias": True}
		outputs = tf.layers.conv1d(**params)

		# 外层
		params = {"inputs": outputs, "filters": filters[1], "kernel_size": 1,
				  "activation": None, "use_bias": True}

		# 这里用到了一维卷积，实际上卷积核尺寸还是二维的，只是只需要指定高度，宽度和embedding size的尺寸一致，保证能卷回去
		# 维度[batch_size, sequence_length, embedding_size]
		outputs = tf.layers.conv1d(**params)
		# 残差连接
		print('inputs:,', inputs)
		print('outputs:', outputs)
		outputs += inputs
		# 归一化处理
		outputs = self._layerNormalization(outputs)
		return outputs

	def fixedPositionEmbedding(self, batchSize, sequenceLen):
		'''
			基于one-hot式生成位置嵌入
		'''
		embeddedPosition = []
		for batch in range(batchSize):
			x = []
			for step in range(sequenceLen):
				a = np.zeros(sequenceLen)
				a[step] = 1
				x.append(a)
			embeddedPosition.append(x)
		return np.array(embeddedPosition, dtype="float32")
	def build(self):
	
		if self.lookup_table is None:
			if self.wv_model == 'one-hot':
				word_embedding, perfect_embedding = self.layer_one_hot(self.max_sentence, self.b_features, self.vocabulary_size)
			elif self.wv_model == 'embedding':
				print('embedding')
				word_embedding, perfect_embedding = self.layer_embedding(self.max_sentence, self.embedding_size, self.vocabulary_size, self.b_features)		# BP训练的embedding layer
		else:
			word_embedding, perfect_embedding = self.import_embedding(self.max_sentence, self.embedding_size, self.vocabulary_size)		# 引入预训练好的外部词向量，static，之后不再训练
		# self.layer_text_rnn()
		self.layer_text(word_embedding, perfect_embedding)

		self.layer_output()
		self.layer_loss()
		self.layer_optimzer()
		self.layer_acc()
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=5)     # 保存最后10个epoch的模型，将它们的平均值作为模型的最终正确率。
		
	
	
