import tensorflow as tf
import os


class TextCNN(object):
	'''
		TextCNN的双通道CNN架构，LeeCNN正好是它的单通道版本
	'''
	def __init__(self, Flags, n_labels, vocabulary_size, root_dir='./', lookup_table=None, wv_model=None, config=None):

		self.keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
		self.batch_size = tf.placeholder(dtype=tf.float32, shape=[], name='batch_size')
		self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')
		
		self.vocabulary_size = vocabulary_size
		self.embedding_size = Flags.embedding_size
		self.hidden_size = Flags.hidden_size
		self.max_sentence = Flags.max_sentence
		# self.batch_size = self.b_labels.shape[0]#Flags.batch_size
		self.n_layer = Flags.n_layer
		self.lr = Flags.lr
		self.l2 = Flags.l2

		self.n_labels = n_labels
		self.summary_dir = root_dir
		self.lookup_table = lookup_table
		self.wv_model = wv_model

		self.config = config
		self.filter_sizes = [2,3,4]
		self.num_filters = 100
		
		self.batch = tf.placeholder(dtype=tf.float32, shape=[None, self.max_sentence + 2], name='batch')
		self.b_features, self.b_labels, self.b_sequence_lengths  =tf.split(self.batch, [self.max_sentence, 1, 1], axis=1)
		self.b_features = tf.cast(self.b_features, dtype=tf.int32)
		self.b_labels = tf.reshape(tf.cast(self.b_labels, dtype=tf.int32), shape=(-1,))
		self.b_sequence_lengths = tf.reshape(tf.cast(self.b_sequence_lengths, dtype=tf.int32), shape=(-1,))
	
	def import_embedding(self):
		'''
		将外部导入的查找表作为tensorflow训练需要的查找表，注意设置成不可训练。
		TODO：当然也可以设置成可训练，这样子就把词向量作为初值了
		:return:
		'''
		with tf.name_scope('embedding'):
			print('导入预训练的lookup Table')
			# raise self.lookup_table.shape == (self.vocabulary_size, self.embedding_size)
			static_embedding = tf.constant(self.lookup_table, name='static_embedding')
			static_inputs = tf.nn.embedding_lookup(static_embedding, self.b_features)
			# dynamic_embedding = tf.get_variable('embedding', [self.vocabulary_size, self.embedding_size], dtype=tf.float32, trainable=True)
			dynamic_embedding = tf.Variable(self.lookup_table, name='dynamic_lookup')
			dynamic_inputs = tf.nn.embedding_lookup(dynamic_embedding, self.b_features)
			self.inputs = tf.stack([static_inputs, dynamic_inputs], -1)
			print('这是input的shape: {}'.format(self.inputs.shape))  # (batch_size, max_sentence, embedding_size, 2)
	
	def layer_text(self):
		pooled_outputs = []
		# self.inputs = tf.expand_dims(self.inputs, -1)
		for i, filter_size in enumerate(self.filter_sizes):
			with tf.name_scope("conv-maxpool-%s" % filter_size):
				filter_shape = [filter_size, self.embedding_size, 2, self.num_filters]	# 输入通道数为2，输出通道数num_filters
				W_1 = tf.Variable(tf.truncated_normal(
					filter_shape, stddev=0.1), name="W_1")
				b_1 = tf.Variable(tf.constant(
					0.1, shape=[self.num_filters]), name="b_1")
				conv_1 = tf.nn.conv2d(
					self.inputs,
					W_1,
					strides=[1, 1, 1, 1],
					padding="VALID",
					name="conv_1")

				h_1 = tf.layers.batch_normalization(
					conv_1, training=self.is_train, name='bn_{}'.format(filter_size))
				h_1 = tf.nn.relu(tf.nn.bias_add(h_1, b_1), name='relu_1')
				pooled = tf.nn.max_pool(
					h_1,
					ksize=[1, self.max_sentence - filter_size + 1, 1, 1],
					strides=[1, 1, 1, 1],
					padding='VALID',
					name="pool")
				pooled_outputs.append(pooled)
		num_filters_total = self.num_filters * len(self.filter_sizes)
		self.h_pool = tf.concat(pooled_outputs, 3)
		print(self.h_pool)
		self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
		print(self.h_pool_flat)
		# fc layers
		self.fcn_1 = tf.layers.dense(inputs=self.h_pool_flat,
									 units=1000, activation=tf.nn.relu)
		# self.fcn_2 = tf.layers.dense(inputs=self.fcn_1,
		#                              units=1024, activation=tf.nn.relu)
		# Add dropout
		with tf.name_scope("dropout"):
			self.outputs = tf.nn.dropout(self.fcn_1, self.keep_prob)

	def layer_output(self):
		with tf.name_scope('output'):
			w_softmax = tf.get_variable('w_softmax', shape=[1000, self.n_labels])
			b_softmax = tf.get_variable('b_softmax', shape=[self.n_labels])
			self.logits = tf.matmul(self.outputs, w_softmax) + b_softmax      # logits事实上可以看做是一个概率分布
			print('logits={}'.format(self.logits))
			# self.origin_loss = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)

	def layer_loss(self):
		with tf.name_scope('loss'):
			self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])          # 加入全连接
			losses = tf.losses.sparse_softmax_cross_entropy(labels=self.b_labels, logits=self.logits)
			self.losses = losses
			self.l2_loss = self.l2 * self.l2_loss
			self.loss = tf.reduce_mean(losses) +  self.l2_loss
			print('loss={}'.format(self.loss))
	
	def layer_optimzer(self):
		self.global_step = tf.Variable(0, name='global_step', trainable=False)
		self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)		# 服务于BN
		# with tf.control_dependencies(update_ops):
		optimzer = tf.train.AdamOptimizer(learning_rate=self.lr)
		self.train_op = optimzer.minimize(self.loss, global_step=self.global_step)

	def layer_acc(self):
		def caculate_topK(indices, k):
			print(self.b_labels)
			a = indices - tf.reshape(self.b_labels, (-1, 1))
			# b = tf.equal(a, tf.zeros(shape=(self.batch_size, k), dtype=tf.int32))
			b = tf.equal(a, tf.zeros_like(a, dtype=tf.int32))
			return tf.reduce_mean(tf.reduce_sum(tf.cast(b, tf.float32), axis=1), name='top_{}'.format(k))

		with tf.name_scope('accuracy'):
			self.all_probs = tf.nn.softmax(self.logits)      # 记录所有概率，辅助输出

			_, self.top_1_indices = tf.nn.top_k(self.logits, k=1, name='top_1_indices')
			_, self.top_2_indices = tf.nn.top_k(self.logits, k=2, name='top_2_indices')
			_, self.top_3_indices = tf.nn.top_k(self.logits, k=3, name='top_3_indices')
			_, self.top_4_indices = tf.nn.top_k(self.logits, k=4, name='top_4_indices')
			_, self.top_5_indices = tf.nn.top_k(self.logits, k=5, name='top_5_indices')

			self.acc_top_1 = caculate_topK(self.top_1_indices, 1)
			self.acc_top_2 = caculate_topK(self.top_2_indices, 2)
			self.acc_top_3 = caculate_topK(self.top_3_indices, 3)
			self.acc_top_4 = caculate_topK(self.top_4_indices, 4)
			self.acc_top_5 = caculate_topK(self.top_5_indices, 5)
			print(self.acc_top_1)
			print(self.acc_top_5)

			self.metrics = {
				'top_1': self.acc_top_1,
				'top_2': self.acc_top_2,
				'top_3': self.acc_top_3,
				'top_4': self.acc_top_4,
				'top_5': self.acc_top_5
			}
	
	def layer_summaries(self):
		if not os.path.exists(self.summary_dir):
			os.makedirs(self.summary_dir)
		with tf.name_scope('summaries'):
			summary_loss = tf.summary.scalar('loss', self.loss)
			summary_origin_loss = tf.summary.scalar('origin_loss', self.losses)
			summary_l2_loss = tf.summary.scalar('l2_loss', self.l2_loss)
			summary_top1 = tf.summary.scalar('top1_acc', self.acc_top_1)
			summary_top5 = tf.summary.scalar('top5_acc', self.acc_top_5)
			
			self.train_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'train'), tf.get_default_graph())
			self.test_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.test_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'test/'), tf.get_default_graph())
			
			self.val_summary_op = tf.summary.merge([summary_loss, summary_top1, summary_top5, summary_origin_loss, summary_l2_loss])
			self.val_summary_writer = tf.summary.FileWriter(os.path.join(self.summary_dir, 'val/'), tf.get_default_graph())
		
	def build(self):
		
		self.import_embedding()		# 引入预训练好的外部词向量，static，之后不再训练
		self.layer_text()
		self.layer_output()
		self.layer_loss()
		self.layer_acc()
		self.layer_optimzer()
		
		self.layer_summaries()
		self.saver = tf.train.Saver(max_to_keep=5)     # 保存最后10个epoch的模型，将它们的平均值作为模型的最终正确率。
		
	
	
