import tensorflow as tf
import os


class DeepModel(object):
	def __init__(self, Flags, n_labels, vocabulary_size, cost_matrix=None, root_dir='./', lookup_table=None):
		# max_sentence = Flags.max_sentence
		# active_size = Flags.active_size
		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		# self.b_features = tf.placeholder(dtype=tf.int32, shape=[None, max_sentence], name='place_features')
		# self.b_labels = tf.placeholder(dtype=tf.int32, shape=[None], name='place_labels')
		# 活跃度提取部分的features
		# self.b_active_features = tf.placeholder(dtype=tf.int32, shape=[None, active_size], name='batch_active_features')
		# self.b_sequence_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_sequence_lengths')
		# self.b_active_actual_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name='place_active_actual_lengths')
		# self.cost_matrix = tf.constant(cost_matrix, dtype=tf.float32, name='cost_matrix')
		# self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')
		self.batch_size = tf.placeholder(dtype=tf.float32, shape=[], name='batch_size')
		# self.cnn_window_size = 5
		self.out_channels = 3
		
		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence
		# self.batch_size = self.b_labels.shape[0]#Flags.batch_size
		self.n_layer = Flags.n_layer
		self.lr = Flags.lr
		self.l2 = Flags.l2
		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		
		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, 2028], name='batch')
		self.b_features, self.b_labels, self.b_active_features, self.b_sequence_lengths, self.b_active_actual_lengths  =tf.split(self.batch, [2000, 1, 25, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_active_features = tf.cast(self.b_active_features, dtype=tf.int32)
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))
		self.b_active_actual_lengths = tf.reshape(tf.cast(self.b_active_actual_lengths, dtype=tf.int32),shape=(-1,))
		
		# tf.contrib.rnn.ConvLSTMCell()
		# tf.contrib.rnn.Conv1DLSTMCell

	def layer_embedding(self):
		with tf.name_scope('embedding'):
			embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (?, 400, 19704)
	
	def import_embedding(self):
		'''
		将外部导入的查找表作为tensorflow训练需要的查找表，注意设置成不可训练。
		TODO：当然也可以设置成可训练，这样子就把词向量作为初值了
		:return:
		'''
		with tf.name_scope('embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			embedding = tf.constant(self.lookup_table, name='embedding')
			# embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32, trainable=False)
			self.inputs = tf.nn.embedding_lookup(embedding, self.b_features)
			print('这是input的shape: {}'.format(self.inputs.shape))  #
	
	def get_a_lstm_cell(self):
		# cell = tf.nn.rnn_cell.GRUCell(self.hidden_size)
		# TODO： 这块需要注意，文本特征和活跃度特征是通用的。shape需要修改下。
		cell = tf.contrib.rnn.ConvLSTMCell(2, [5, self.embedding_size, 1], output_channels=self.out_channels, kernel_shape=[3, 3])
		cellD = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=self.keep_prob)
		return cellD
	
	def layer_text_rnn(self):
		
		with tf.name_scope('bi_rnn'):
			print(self.inputs)
			self.inputs = tf.reshape(self.inputs, shape=(-1, self.max_sentence, 5, self.embedding_size, 1))
			print(self.inputs)
			lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cells, lstm_bw_cells, self.inputs, sequence_length=self.b_sequence_lengths, dtype=tf.float32)
			print('outputs: {}'.format(outputs))
			fw_output = outputs[0]
			bw_output = outputs[1]
			# fw_output = tf.expand_dims(outputs[0], axis=-1)
			# bw_output = tf.expand_dims(outputs[1], axis=-1)
			# 接最大池化
			fw_output = tf.nn.max_pool3d(fw_output, ksize=[1, self.max_sentence, 5, 1, 1], strides=[1, 1, 1, 1, 1], padding='VALID')		# 综合成一个核
			bw_output = tf.nn.max_pool3d(bw_output, ksize=[1, self.max_sentence, 5, 1, 1], strides=[1, 1, 1, 1, 1], padding='VALID')
			# tf.nn.max_pool3d
			print('fw_output: {}'.format(fw_output))
			print('bw_output: {}'.format(bw_output))

			self.bi_outputs = tf.reshape(tf.concat([tf.reshape(fw_output, shape=(self.batch_size, self.embedding_size*self.out_channels)), tf.reshape(bw_output, shape=(self.batch_size, self.embedding_size*self.out_channels))], 1), [self.batch_size, 2*self.embedding_size*self.out_channels]) # 拼接前向输出和后向输出
			# 将前向输出与后向输出相加
			# bi_outputs = tf.reshape(tf.add(fw_output, bw_output), [-1, hidden_size], name='bi_fusion_outputs')
			print('bi_outputs: {}'.format(self.bi_outputs))
			# 转换双向LSTM的outputs，to make them 的shape跟单向LSTM的相同，
			with tf.name_scope('transform'):  # transform bi_outputs to the size: hidden_size, in order to multiply active outputs
				self.transform_bi_outputs = tf.layers.dense(inputs=self.bi_outputs, units=self.hidden_size, activation=tf.nn.sigmoid)
				self.fusion_outputs = self.transform_bi_outputs
			print('fusion_outputs:{}'.format(self.fusion_outputs))
	def layer_active_rnn(self):
		with tf.name_scope('rnn_active'):
			inputs_active = tf.one_hot(self.b_active_features, depth=self.n_labels, dtype=tf.float32)
			cells = tf.nn.rnn_cell.MultiRNNCell([self.get_a_lstm_cell() for _ in range(self.n_layer)], state_is_tuple=True)
			active_outputs, active_states = tf.nn.dynamic_rnn(cells, inputs=inputs_active, dtype=tf.float32, sequence_length=self.b_active_actual_lengths)
			print('active_outputs:{}'.format(active_outputs))  # (?, 25, hidden_size)
			
			active_outputs = active_outputs[:, -1, :]
			self.active_outputs = tf.layers.dense(active_outputs, units=self.hidden_size, activation=tf.nn.relu)     # 使用sigmoid是为了把加权做的更彻底
			# self.fusion_outputs = self.active_outputs
	
	def layer_fusion(self):
		with tf.name_scope('fusion'):		# 特征融合, 其实就是两个RNN的输出进行融合, 最终得到的融合特征送到输入层
			# 将两个神经网络的高层特征通过元素间相乘予以融合
			self.fusion_outputs = tf.multiply(self.transform_bi_outputs, self.active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.add(bi_outputs, active_outputs, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = tf.concat([bi_outputs, active_outputs], axis=1, name='fusion_outputs')  # [batch_size, hidden_size]
			# fusion_outputs = transform_bi_outputs
			print('fusion_outputs={}'.format(self.fusion_outputs))
		# with tf.name_scope('dropout'):
		# 	self.l_dropout = tf.layers.dropout(self.fusion_outputs, self.keep_prob)

	def layer_output(self):
		with tf.name_scope('output'):
			w_softmax = tf.get_variable('w_softmax', shape=[self.hidden_size, self.n_labels])
			b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
			self.logits = tf.matmul(self.fusion_outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
			print('logits={}'.format(self.logits))
			# self.origin_loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
		
	def layer_cost(self):
		with tf.name_scope('cost'):
			self.weights = tf.Variable(tf.random_normal([self.n_labels, self.n_labels]), name='cost_matrix')
			# print(self.weights)
			# biases = tf.Variable(tf.zeros([1, units]) + 0.1)
			self.logits = tf.matmul(self.logits, self.weights)  # 每行对应点乘，之后每列求和，化为[1, K]

	def layer_loss(self):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接
			# y = tf.matmul(logits, 1-self.cost_matrix)
			# print(y)
			# y = logits
			# y = tf.nn.softmax(logits)
			# print('y={}'.format(y))
			# y = tf.matmul(y, 1-self.cost_matrix)
			# y = y/tf.reduce_sum(y, axis=1, keep_dims=True)          # 对惩罚加和之后的概率归一化,方便后面计算loss
			# one_labels = tf.one_hot(self.b_labels, depth=developers_size)       # 在计算交叉熵之前应该one-hot化
			# losses = (-tf.reduce_sum(tf.multiply(tf.to_float(one_labels), tf.log(y)), axis=1))
			# print(losses)
			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss
			# self.loss = tf.reduce_mean(losses) +  self.l2_loss + tf.reduce_mean(origin_loss)
			self.loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(self.loss))
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)
			# bool_top_1 = tf.nn.in_top_k(predictions=y, targets=self.b_labels, k=1, name='bool_top_1')
			# bool_top_5 = tf.nn.in_top_k(predictions=y, targets=self.b_labels, k=5, name='bool_top_5')
			#
			# #这里是为了检查都是哪些样本预测出错，所以获取了top5的索引
			# _, self.top_5_indices = tf.nn.top_k(y, k=5)
			# self.acc_top_1 = tf.reduce_mean(tf.cast(bool_top_1, dtype=tf.float32), name="acc_top_1")
			# self.acc_top_5 = tf.reduce_mean(tf.cast(bool_top_5, dtype=tf.float32), name="acc_top_5")
			# print(self.acc_top_1)
			# _, top_1_indices = tf.nn.top_k(y, k=1)
			# _, top_5_indices = tf.nn.top_k(y, k=5)
			#
			# _, top_1 = tf.metrics.precision_at_top_k(self.b_labels, top_1_indices, k=1)
			# _, top_5 = tf.metrics.precision_at_top_k(self.b_labels, top_5_indices, k=5)
			# self.acc_top_1 = top_1 * 1
			# self.acc_top_5 = top_5 * 5

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
	
	def build(self):
		if self.lookup_table is None:
			self.layer_embedding()
		else:
			self.import_embedding()
		self.layer_text_rnn()
		#self.layer_active_rnn()
		#self.layer_fusion()
		self.layer_output()
		# self.layer_cost()
		self.layer_loss()
		self.layer_optimzer()
		self.layer_acc()
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=1)
		
	
	