import tensorflow as tf
import os
import numpy as np


class AdverModel(object):
	def __init__(self, Flags, n_labels, vocabulary_size, cost_matrix=None, root_dir='./', lookup_table=None, wv_model=None, config=None):

		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		self.batch_size = tf.placeholder(dtype=tf.float32, shape=[], name='batch_size')
		
		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence

		self.n_layer = Flags.n_layer
		self.lr = Flags.lr
		self.l2 = Flags.l2
		self.dever_embedding = Flags.dever_embedding
		self.module = Flags.module
		self.primary = Flags.primary
		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		self.wv_model = wv_model
		self.active_wv_model_type = Flags.active_wv_model_type
		self.attention = Flags.attention
		self.active_size = Flags.active_size
		# self.config = config.transformer
		self.config = config
		self.Flags = Flags
		self.espilon = Flags.espilon

		
		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, self.max_sentence + 28], name='batch')
		self.b_features, self.b_labels, self.b_active_features, self.b_sequence_lengths, self.b_active_actual_lengths  =tf.split(self.batch, [self.max_sentence, 1, 25, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_active_features = tf.cast(self.b_active_features, dtype=tf.int32)
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))
		self.b_active_actual_lengths = tf.reshape(tf.cast(self.b_active_actual_lengths, dtype=tf.int32),shape=(-1,))

	def layer_embedding(self, sequenceLen, embeddingSize, all_size, inputs, sign):
		'''
			transformer中词的embedding = word‘s trainable embedding + position embedding
			该方法中我们让word's embedding是可训练的，position embedding是原始论文中的sin和cos曲线。
		'''
		with tf.name_scope('embedding'):
			embedding = tf.Variable(tf.random_normal([all_size, embeddingSize], stddev=0.1), dtype=tf.float32, name="lookup")
			word_embedding = tf.nn.embedding_lookup(embedding, inputs)
			print('word_embedding:', word_embedding)
			position_embedding = self._position_embedding(sequenceLen, embeddingSize, sign=sign)
			perfect_embedding = word_embedding + position_embedding		# 仿照论文，对两个embedding进行加和
			# self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是perfect_embedding的shape: {}'.format(perfect_embedding.shape))  # (?, 400, 19704)

			return word_embedding, perfect_embedding
	
	def sincosPositionEmbedding(self, sequenceLen, embeddingSize):
		with tf.name_scope('sincos_position_embedding'):
			print(sequenceLen)
			print(self.batch_size)
			# 生成位置的索引，并扩张到batch中所有的样本上
			positionIndex = tf.tile(tf.expand_dims(tf.range(sequenceLen), 0), [self.Flags.batch_size, 1])	# 这个的地位相当于我们输入的one-hot了，只不过因为是顺序one-hot，所以不需要外部传入

			# 根据正弦和余弦函数来获得每个位置上的embedding的第一部分
			positionEmbedding = np.array([[pos / np.power(10000, (i-i%2) / embeddingSize) for i in range(embeddingSize)] 
										for pos in range(sequenceLen)])

			# 然后根据奇偶性分别用sin和cos函数来包装
			positionEmbedding[:, 0::2] = np.sin(positionEmbedding[:, 0::2])
			positionEmbedding[:, 1::2] = np.cos(positionEmbedding[:, 1::2])

			# 将positionEmbedding转换成tensor的格式
			positionEmbedding_ = tf.cast(positionEmbedding, dtype=tf.float32)

			# 得到三维的矩阵[batchSize, sequenceLen, embeddingSize]
			positionEmbedded = tf.nn.embedding_lookup(positionEmbedding_, positionIndex)
			print('positionEmbedded:{}'.format(positionEmbedded))
		return positionEmbedded

	def import_embedding(self, sequenceLen, embeddingSize, all_size, sign):
		'''
		transformer中词的embedding = word‘s pretrain embedding(如 word2vec、fasttext) + position embedding
		这个只能给text网络使用
		:return:
		'''
		with tf.name_scope('import_embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			embedding_table = tf.constant(self.lookup_table, name='lookup')
			word_embedding = tf.nn.embedding_lookup(embedding_table, self.b_features, name='word_embedded')
			position_embedding = self._position_embedding(sequenceLen, embeddingSize, sign)
			perfect_embedding = word_embedding + position_embedding		# 仿照论文，对两个embedding进行加和
			# 不使用原论文提出的正弦余弦位置编码方式，怀疑增加了模型复杂度，可以使用word embedding跟one-hot位置编码拼接的方式来做这个事儿
			# lookupTable = tf.Variable(tf.cast(self.lookup_table, dtype=tf.float32, name="embedding") ,name="embedding")
			# embedded = tf.nn.embedding_lookup(lookupTable, inputs)
			# embeddedWords = tf.concat([embedded, positionEmbedding], -1)		# 把one-hot式的position embedding编码和one-hot式的word embedding直接拼接
			print('这是perfect_embedding的shape: {}'.format(perfect_embedding.shape))  #
		return word_embedding, perfect_embedding
	
	def layer_one_hot(self, sequenceLen, inputs, all_size, sign):
		'''
			单词编码使用one-hot来代替， 
			one-hot式样的position embedding，
			跟position embedding拼接使用(据说在小数据集上比较好用)，或者把position embedding扩大成跟one-hot一样的样子
		'''
		with tf.name_scope('one_hot_embedding'):
			# onehot的位置编码和词向量编码shape不同，无法对应相加，如果实在想连接使用的话，需要以concat的方式
			# positionEmbedding = self.fixedPositionEmbedding(self.b_features.get_shape()[0].value, sequenceLen)	# one-hot式样的position embedding 不可训练
			positionEmbedding = self._position_embedding(sequenceLen, None, sign)
			embedded = tf.one_hot(inputs, depth=all_size, dtype=tf.float32)
			# lookupTable = tf.Variable(tf.cast(positionEmbedding, dtype=tf.float32, name="embedding") ,name="embedding")
			# embedded = tf.nn.embedding_lookup(lookupTable, inputs)
			embeddedWords = tf.concat([embedded, positionEmbedding], -1)		# 把one-hot式的position embedding编码和one-hot式的word embedding直接拼接
			embeddedWords = tf.layers.dense(embeddedWords, self.embedding_size, activation=tf.nn.relu, name='{}_concat_transform'.format(sign)) # shape=[batch, sequenceLen, embedding_size]

		return None, embeddedWords
	
	def get_a_lstm_cell(self):
		cell = tf.nn.rnn_cell.GRUCell(self.hidden_size)
		cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.keep_prob)
		return cellD

	def layer_text_attention(self, word_embedding, perfect_embedding):
		with tf.name_scope('text_attention'):
			embeddedWords = perfect_embedding
			for i in range(self.config.numBlocks):
				with tf.name_scope('attention-{}'.format(i+1)):
					multiHeadAtt = self._multiheadAttention(rawKeys=self.b_features, queries=embeddedWords, keys=embeddedWords, numHeads=self.config.numHeads, scope='text_{}'.format(i+1))	# [batch_size, sequence_length, embedding_size]
					# 维度[batch_size, sequence_length, embedding_size]
					# self.embeddedWords = self._feedForward(multiHeadAtt, [self.config.filters, self.embedding_size])	#FIXME:这个卷积核数量不对啊，这怎么可能跑得起来
					embeddedWords = multiHeadAtt
			# outputs = tf.reshape(self.embeddedWords, [-1, self.max_sentence * (self.embedding_size)])	# 这个shape有点诡异啊
			outputs = tf.reshape(embeddedWords, [-1, self.max_sentence * (embeddedWords.get_shape()[-1].value)])	# 这个shape有点诡异啊
			# outputSize = outputs.get_shape()[-1].value
			with tf.name_scope("dropout"):
				outputs = tf.nn.dropout(outputs, keep_prob=self.keep_prob)
			with tf.name_scope('transform'):  # transform bi_outputs to the size: hidden_size, in order to multiply active outputs
				self.transform_outputs = self._custom_dense(inputs=outputs, units=self.hidden_size, activation=self.config.text_activation, name='text_output_dense')
				# if self.primary == 'text':
				# 	self.transform_outputs = tf.layers.dense(inputs=outputs, units=self.hidden_size, activation=tf.nn.relu)
				# elif self.primary == 'active':
				# 	self.transform_outputs = tf.layers.dense(inputs=outputs, units=self.hidden_size, activation=tf.nn.sigmoid)
				if self.module == 'text':		# 只有明确提出只使用单文本网络，才启用这个if分支
					self.fusion_outputs = self.transform_outputs		# 实际上这种书写方式并不优雅，我应该以形参实参的方式来传递transform_bi_outputs和下面的active_outputs
	
	def layer_active_attention(self, word_embedding=None, perfect_embedding=None):
		with tf.name_scope('active_attention'):
			embeddedWords = perfect_embedding
			for i in range(self.config.numBlocks):
				with tf.name_scope("attention-{}".format(i + 1)):
					# 维度[batch_size, active_size, embedding_size]
					multiHeadAtt = self._multiheadAttention(rawKeys=self.b_active_features, queries=embeddedWords, keys=embeddedWords, numHeads=self.config.active_numHeads, scope='active')
					# 维度[batch_size, active_size, embedding_size]
					# embeddedWords = self._feedForward(multiHeadAtt, [self.config.filters, self.embedding_size])
					embeddedWords = multiHeadAtt
				# active_outputs = tf.reshape(embeddedWords, [-1, self.active_size * (self.embedding_size )])
				active_outputs = tf.reshape(embeddedWords, [-1, self.active_size * (embeddedWords.get_shape()[-1].value)])
			with tf.name_scope("dropout"):
				active_outputs = tf.nn.dropout(active_outputs, keep_prob=self.keep_prob)
			with tf.name_scope('transform'):
				self.active_outputs = self._custom_dense(active_outputs, units=self.hidden_size, activation=self.config.active_activation, name='active_outputs_dense')
			# if self.attention:
			# 	with tf.name_scope('attention'):
			# 		active_outputs = self.layer_attention(active_outputs, self.active_size)	# 通过attention来融合时间步
			# if self.primary == 'text':
			# 	self.active_outputs = tf.layers.dense(active_outputs, units=self.hidden_size, activation=tf.nn.sigmoid)     # 使用sigmoid是为了把加权做的更彻底
			# elif self.primary == 'active':
			# 	self.active_outputs = tf.layers.dense(a
			# ctive_outputs, units=self.hidden_size, activation=tf.nn.relu)     # 
				if self.module == 'active':	# 只有明确提出只使用活跃度网络，才启用这个if分支
					self.fusion_outputs = self.active_outputs
	
	def normal_model(self, word_embedding, perfect_embedding):
		with tf.name_scope('normal_loss'):
			with tf.variable_scope('text', reuse=None):
				self.layer_text_attention(None, perfect_embedding)
				self.logits = self.layer_output()		# 注意最终预测的时候，用的是非扰动部分的logits来做的acc计算
				loss = self.layer_loss(self.logits)
		return loss

	def perturbations_model(self, word_embedding, perfect_embedding, loss):
		with tf.name_scope('perturb_loss'):
			with tf.variable_scope('text', reuse=True):
				#FIXME：首先处理embedding表示
				# perfect_embedding = self._normalize(perfect_embedding, self.config.freqs)	# TODO：扰动单词嵌入还是扰动位置嵌入还是扰动final 嵌入，这也是个问题
				pertur_perfect_embedding = self._add_perturbation(perfect_embedding, loss)
				self.layer_text_attention(None, pertur_perfect_embedding)
				logits = self.layer_output()
				perturb_loss = self.layer_loss(logits)
		return loss + perturb_loss
	def _normalize(self, some_embedding, weights):		
		"""
			标准化处理模型的embedding
		"""
		mean = tf.matmul(weights, some_embedding)
		some_embedding = tf.pow(some_embedding - mean, 2.)
		var = tf.matmul(weights, some_embedding)
		stddev = tf.sqrt(1e-6 + var)
		return (some_embedding - mean) / stddev
	
	def _add_perturbation(self, embedded, loss):
		"""
		添加波动到word embedding
		"""
		grad, = tf.gradients(
			loss,
			embedded,
			aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
		grad = tf.stop_gradient(grad)		# 自此停止梯度更新
		perturb = self.espilon * tf.nn.l2_normalize(grad, dim=1)
		# perturb = self._scaleL2(grad, self.config.model.epsilon)
		return embedded + perturb

	def layer_fusion(self):
		with tf.name_scope('fusion'):		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
			# 将两个神经网络的高层特征通过元素间相乘予以融合
			self.fusion_outputs = tf.multiply(self.transform_outputs, self.active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.add(bi_outputs, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.concat([bi_outputs, active_outputs], axis=1, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = transform_bi_outputs
			print('fusion_outputs={}'.format(self.fusion_outputs))
		# with tf.name_scope('dropout'):
		# 	self.l_dropout = tf.layers.dropout(self.fusion_outputs, self.keep_prob)

	def layer_output(self):
		with tf.name_scope('output'):
			w_softmax = tf.get_variable('w_softmax', shape=[self.hidden_size, self.n_labels])
			b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
			logits = tf.matmul(self.fusion_outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
			print('logits={}'.format(logits))
			return logits

	def layer_loss(self, logits):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接

			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss
			# self.loss = tf.reduce_mean(losses) +  self.l2_loss + tf.reduce_mean(origin_loss)
			loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(loss))
		return loss
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_2_indices = tf.nn.top_k(self.logits, k=2, name='top_2_indices')
			_, self.top_3_indices = tf.nn.top_k(self.logits, k=3, name='top_3_indices')
			_, self.top_4_indices = tf.nn.top_k(self.logits, k=4, name='top_4_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_2 = caculate_topK(self.top_2_indices, 2)
			self.acc_top_3 = caculate_topK(self.top_3_indices, 3)
			self.acc_top_4 = caculate_topK(self.top_4_indices, 4)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_2': self.acc_top_2,
				'top_3': self.acc_top_3,
				'top_4': self.acc_top_4,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
	def _multiheadAttention(self, rawKeys, queries, keys, numUnits=None, causality=False, scope=None, numHeads=None):
		# numHeads = self.config.numHeads
		# keepProb = self.config.attention_keepProb
		if numUnits is None:  # 若是没传入值，直接去输入数据的最后一维，即embedding size.
			numUnits = queries.get_shape().as_list()[-1]
		# tf.layers.dense可以做多维tensor数据的非线性映射，在计算self-Attention时，一定要对这三个值进行非线性映射，
		# 其实这一步就是论文中Multi-Head Attention中的对分割后的数据进行权重映射的步骤，我们在这里先映射后分割，原则上是一样的。
		# Q, K, V的维度都是[batch_size, sequence_length, embedding_size]
		Q = tf.layers.dense(queries, numUnits, activation=tf.nn.relu, name='Q_{}'.format(scope))
		K = tf.layers.dense(keys, numUnits, activation=tf.nn.relu, name='K_{}'.format(scope))
		V = tf.layers.dense(keys, numUnits, activation=tf.nn.relu, name='V_{}'.format(scope))

		# 将数据按最后一维分割成num_heads个, 然后按照第一维拼接
		# Q, K, V 的维度都是[batch_size * numHeads, sequence_length, embedding_size/numHeads]
		Q_ = tf.concat(tf.split(Q, numHeads, axis=-1), axis=0) 
		K_ = tf.concat(tf.split(K, numHeads, axis=-1), axis=0) 
		V_ = tf.concat(tf.split(V, numHeads, axis=-1), axis=0)

		# 计算keys和queries之间的点积，维度[batch_size * numHeads, queries_len, key_len], 后两维是queries和keys的序列长度
		similary = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))

		# 对计算的点积进行缩放处理，除以向量长度的根号值
		scaledSimilary = similary / (K_.get_shape().as_list()[-1] ** 0.5)
		print(rawKeys)
		# 利用tf，tile进行张量扩张， 维度[batch_size * numHeads, keys_len] keys_len = keys 的序列长度
		keyMasks = tf.tile(rawKeys, [numHeads, 1]) 

		# 增加一个维度，并进行扩张，得到维度[batch_size * numHeads, queries_len, keys_len]
		keyMasks = tf.tile(tf.expand_dims(keyMasks, 1), [1, tf.shape(queries)[1], 1])
		print('keyMasks:{}'.format(keyMasks))
		# tf.ones_like生成元素全为1，维度和scaledSimilary相同的tensor, 然后得到负无穷大的值
		paddings = tf.ones_like(scaledSimilary) * (-2 ** (32 + 1))		# 得到一个负无穷大的值
		print('paddings:{}'.format(paddings))
		# tf.where(condition, x, y),condition中的元素为bool值，其中对应的True用x中的元素替换，对应的False用y中的元素替换
		# 因此condition,x,y的维度是一样的。下面就是keyMasks中的值为0就用paddings中的值替换
		maskedSimilary = tf.where(tf.equal(keyMasks, 0), paddings, scaledSimilary) # 维度[batch_size * numHeads, queries_len, key_len]
		print('maskedSimilary:{}'.format(maskedSimilary))
		# 通过softmax计算权重系数，维度 [batch_size * numHeads, queries_len, keys_len]
		weights = tf.nn.softmax(maskedSimilary)

		# 加权和得到输出值, 维度[batch_size * numHeads, sequence_length, embedding_size/numHeads]
		outputs = tf.matmul(weights, V_)

		# 将多头Attention计算的得到的输出重组成最初的维度[batch_size, sequence_length, embedding_size]
		outputs = tf.concat(tf.split(outputs, numHeads, axis=0), axis=2, name='final_attention_outputs')
		
		# outputs = tf.nn.dropout(outputs, keep_prob=keepProb)	# FIXME：这个删掉应该影响不大，回来可以顺便做组对比实验。

		# 对每个subLayers建立残差连接，即H(x) = F(x) + x
		# outputs += queries		# FIXME: 这里加入残差连接是因为模型深度的问题，避免模型太深，梯度消失或者说远距离信息丢失，但是在这里我只有一层，所以个人认为没必要加残差
		# normalization 层
		# outputs = self._layerNormalization(outputs)
		return outputs
	
	def fixedPositionEmbedding(self, sequenceLen, embeddingSize=None):
		'''
			基于one-hot式生成位置嵌入
		'''
		with tf.name_scope('one-hot_position'):
			embeddedPosition = []
			for batch in range(self.Flags.batch_size):
				embeddedPosition.append(np.eye(sequenceLen))
			embeddedPosition = np.array(embeddedPosition, dtype="float32")	# shape=[batch, sequenceLen, sequenceLen]
			embeddedPosition = tf.constant(embeddedPosition, dtype=tf.float32)
			# if self.wv_model == 'one-hot':	# 保证输出的最后是很简单的one-hot形式
				# pass
			# else:# 实现可训练的position embedding嵌入,为每个位置训练一个特殊的向量
			# embeddedPosition = tf.layers.dense(embeddedPosition, embeddingSize, activation=tf.nn.relu) # shape=[batch, sequenceLen, embedding_size]
		return embeddedPosition
	
	def _position_embedding(self, sequenceLen, embeddingSize, sign=None):
		'''
			sign=text，表示调用该方法的是text网络；
			sign=active，表示调用该方法的是active网络；
		'''
		position_embedding = None
		wv_type = None		# 这个表示词向量的嵌入类型
		if sign == 'text':
			position_embedding = self.config.text_position_embedding
			wv_type = self.wv_model
		elif sign == 'active':
			position_embedding = self.config.active_position_embedding
			wv_type = self.active_wv_model_type
		else:
			raise RuntimeError('未指定_position_embedding的调用源！')

		if position_embedding == 'sincos':
			return self.sincosPositionEmbedding(sequenceLen, embeddingSize)
		elif position_embedding == 'one-hot':	#FIXME:如果word embedding部分也是one-hot的话，那么fixedPositionEmbedding不应该返回一个dense
			if wv_type == 'one-hot':	# 向量组合方式为(one-hot wv embedding concat one-hot position)’s embedding
				return self.fixedPositionEmbedding(sequenceLen, embeddingSize)		# 返回未经dense化的position embedding,用来跟one-hot的词向量做concat
			# else:
			# 	raise RuntimeError('未指定_position_embedding的调用源！')
			elif wv_type == 'embedding':	# 向量组合方式将为wv embedding + one-hot position‘s embedding
				return tf.layers.dense(self.fixedPositionEmbedding(sequenceLen, None), embeddingSize, activation=tf.nn.relu, name='{}_one_hot_position'.format(sign)) # shape=[batch, sequenceLen, embedding_size]
			else:
				raise RuntimeError('wv_type不是one-hot或者embedding的其中一个')
	def _custom_dense(self, inputs, units, activation, name):
		'''
			根据传入的不同字符串来决定调用什么类型的激活函数。
			note that tf1.12会自动将str格式的激活函数转成callable的，遗憾的是，服务器上用的是tf1.8，没有这个功能
			所以重写了这个方法。
		'''
		if activation == 'relu':
			return tf.layers.dense(inputs, units, tf.nn.relu, name=name)
		elif activation == 'sigmoid':
			return tf.layers.dense(inputs, units, tf.nn.sigmoid, name=name)
		elif activation == 'tanh':
			return tf.layers.dense(inputs, units, tf.nn.tanh, name=name)

	def build(self):
		
		if 'text' in self.module:		# 引入文本模块
			if self.lookup_table is None:
				if self.wv_model == 'one-hot':
					word_embedding, perfect_embedding = self.layer_one_hot(self.max_sentence, self.b_features, self.vocabulary_size, 'text')
				elif self.wv_model == 'embedding':
					print('embedding')
					word_embedding, perfect_embedding = self.layer_embedding(self.max_sentence, self.embedding_size, self.vocabulary_size, self.b_features, 'text')		# BP训练的embedding layer
			else:
				word_embedding, perfect_embedding = self.import_embedding(self.max_sentence, self.embedding_size, self.vocabulary_size, 'text')		# 引入预训练好的外部词向量，static，之后不再训练
			# 双重
			# self.layer_text_attention(None, perfect_embedding)
			loss = self.normal_model(word_embedding, perfect_embedding)
			self.loss = self.perturbations_model(word_embedding, perfect_embedding, loss)
		# if 'active' in self.module:		# 引入活跃度网络模块
		# 	# self.layer_active_rnn()
		# 	# if self.wv_model == 'one-hot':
		# 	if self.active_wv_model_type == 'one-hot':
		# 		word_embedding, perfect_embedding = self.layer_one_hot(self.active_size, self.b_active_features, self.n_labels, 'active')		# BP训练的one-hot layer
		# 	elif self.active_wv_model_type == 'embedding':
		# 		word_embedding, perfect_embedding = self.layer_embedding(self.active_size, self.embedding_size, self.n_labels, self.b_active_features, 'active')		# BP训练的embedding layer
		# 	self.layer_active_attention(word_embedding, perfect_embedding)
		# if self.module == 'text_active':
		# 	self.layer_fusion()
		# self.layer_output()
		# self.layer_cost()
		# self.layer_loss()

		self.layer_optimzer()
		self.layer_acc()
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=10)     # 保存最后10个epoch的模型，将它们的平均值作为模型的最终正确率。
		
	
	
