import tensorflow as tf
from sentiment.data_utils import load_data_and_labels,create_vocabulary,split_train_and_test,data_padding_and_to_ids
import numpy as np

hidden_size = 10
timestep_size = max_len = 35     # 句子最大长度
embedding_size = 200      # word向量的长度
num_classes = 2
epoch = 1
layer_num = 2

# batch_size = tf.placeholder(tf.int32)
batch_size = 100
learning_rate = 0.1
# learning_rate = tf.placeholder(tf.float32)


features, labels = load_data_and_labels()
# features = tf.constant(value=features)
# labels = tf.constant(value=labels)
vocabulary_list, word_vocabulary, index_vocabulary = create_vocabulary(features)
words_as_ids = data_padding_and_to_ids(features, word_vocabulary)
train_features, train_labels, test_features, test_labels = split_train_and_test(words_as_ids, labels)


def train():
	place_features = tf.placeholder(dtype=tf.int32, shape=[None, timestep_size], name="features")
	place_labels = tf.placeholder(dtype=tf.int32, shape=[None], name="labels")
	# 测试过程设为1, 这样就相当于不加dropout层
	place_dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

	dataset = tf.data.Dataset.from_tensor_slices((place_features, place_labels))
	dataset = dataset.shuffle(buffer_size=1000).batch(batch_size=batch_size).repeat(count=epoch)
	iterator = dataset.make_initializable_iterator()  # 创建数据集迭代器, 稍后需要初始化
	# iterator = dataset.make_one_shot_iterator()  # 创建数据集迭代器, 稍后需要初始化

	# print(train_features)
	# 第一维设为None是因为batch-size不确定, 因为最后一个batch的大小往往不是batch_size
	b_features = tf.placeholder(dtype=tf.int32, shape=[None, timestep_size], name='place_features')
	b_labels = tf.placeholder(dtype=tf.int32, shape=[None], name='place_labels')


	# 单词embedding
	embedding = tf.get_variable('embedding', [len(vocabulary_list), embedding_size], dtype=tf.float32)
	inputs = tf.nn.embedding_lookup(embedding, b_features)

	lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, forget_bias=1)
	lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, forget_bias=1)

	lstm_fw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_fw_cell, input_keep_prob=place_dropout_keep_prob)
	lstm_bw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_bw_cell, input_keep_prob=place_dropout_keep_prob)

	# lstm_fw_cells = [lstm_fw_cell for _ in range(layer_num)]
	# lstm_bw_cells = [lstm_bw_cell for _ in range(layer_num)]

	# lstm_fw_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_fw_cell] * layer_num, state_is_tuple=True)
	# lstm_bw_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_bw_cell] * layer_num, state_is_tuple=True)
	# print(inputs)

	# inputs = tf.unstack(inputs, timestep_size, axis=1)
	# print(inputs)


	# 初始状态, 初始状态有问题, 最后一个batch长度不定, 初始化会出现shape对不上的问题
	# 状态的shape应该是[batch_size, cell_fw.state_size]
	# 所以这里zero_state返回的tensor的shape也是[batch_size, cell_fw.state_size]
	# 这里为什么要传入batch-size呢, 很好理解, 因为我们的input shape=[batch, num_steps],
	# 所以我们刚定义好的cell会依次接收这num_steps个输出然后产生最后的state, 但是一个batch内有batch_size个序列
	# 所以需要[batch_size, cell_fw.state_size]来存储整个batch中每个sentence的state;
	# 这么说我对batch的理解有问题;
	# initial_state_fw = lstm_fw_cell.zero_state(batch_size, tf.float32)
	# initial_state_bw = lstm_bw_cell.zero_state(batch_size, tf.float32)
	outputs, _, = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, inputs,
	                                              dtype=tf.float32,
	                                              # initial_state_fw=initial_state_fw,
	                                              # initial_state_bw=initial_state_bw,
	                                              )
	print(outputs[0])
	print(outputs[1])
	# outputs[0] = tf.reshape(outputs[0], [-1, -1, -1, 1])
	# outputs[1] = tf.reshape(outputs[1], [-1, -1, -1, 1])
	# outputs_fw = tf.expand_dims(outputs[0], -1)
	# outputs_bw = tf.expand_dims(outputs[1], -1)
	# print(outputs_1)
	# fw_output = tf.layers.max_pooling2d(outputs_fw, pool_size=[35, 400], strides=2)
	# bw_output = tf.layers.max_pooling2d(outputs_bw, pool_size=[35, 400], strides=2)

	fw_output = tf.layers.max_pooling1d(outputs[0], pool_size=35, strides=1)
	bw_output = tf.layers.max_pooling1d(outputs[1], pool_size=35, strides=1)

	print(fw_output)
	print(bw_output)
	# 拼接前向输出与后向输出
	# fw_output = outputs[0][:,-1,:]
	# bw_output = outputs[1][:,-1,:]
	# output = tf.reshape(tf.concat([fw_output,bw_output], 1), [-1, hidden_size * 2])
	# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])
	output = tf.reshape(tf.concat([fw_output,bw_output], 1), [-1, hidden_size * 2])
	print(output)
	# 定义输出层
	with tf.variable_scope('rnn_softmax', reuse=False):
		w_softmax = tf.get_variable('w_softmax', shape=[hidden_size*2, num_classes])
		b_softmax = tf.get_variable('b_softmax', shape=[num_classes])
	logits = tf.matmul(output, w_softmax) + b_softmax       # 上层输出与权重乘后加上偏置作为输出


	loss = tf.losses.sparse_softmax_cross_entropy(labels=b_labels, logits=logits)
	# 计算评估矩阵
	predicted_classes = tf.argmax(logits, 1)

	accuracy = tf.metrics.accuracy(labels=b_labels, predictions=predicted_classes, name='acc_op')
	metrics = {'accuracy': accuracy}
	tf.summary.scalar('accuracy', accuracy[1])  # 将正确率反馈给tensorboard

	optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
	# 每执行一次train_op就是一次训练步骤, Tensorflow会自动的计算哪些变量是可训练的并且计算他们的梯度
	# 通过global_step这个变量可以九三训练的步数, 每训练一次自动加1
	# tf.train.get_global_step()应该是自动加一的吧;
	train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

	with tf.Session() as sess:

		init_op = tf.group(iterator.initializer, tf.global_variables_initializer(), tf.local_variables_initializer())
		# init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
		feed_dict={
			place_features: train_features,
			place_labels: train_labels,

		}
		sess.run(init_op, feed_dict=feed_dict)
		# batch_features, batch_labels = iterator.get_next()  # 新批次数据
		batch = iterator.get_next()  # 新批次数据

		for step in range(20000):  # 其实循环 120*epoch/batch_size次, 再往下的话, iterator的next()会error
			try:
				# print(batch)    # (<tf.Tensor 'IteratorGetNext:0' shape=(?, 35) dtype=int32>, <tf.Tensor 'IteratorGetNext:1' shape=(?,) dtype=int32>)
				batch_features, batch_labels = sess.run(batch)
				print(batch_features.shape)
				print(batch_labels.shape)
				# train的时候需要fetch train_op指令, 而测试的时候不需要
				# train_op什么也不返回, 只是更新网络中的参数
				_, acc, loss_results = sess.run([train_op, metrics, loss], feed_dict={b_features:batch_features, b_labels:batch_labels, place_dropout_keep_prob: 0.9})
				# _, acc, loss_results = sess.run([train_op, accuracy, loss])
				print('acc: %.4f' % acc['accuracy'][1], 'loss: %.4f' % loss_results)
			except tf.errors.OutOfRangeError:
				print('已完成所有epoch迭代')
				break
		print("下面是验证(比较粗糙):")
		sess.run([iterator.initializer], feed_dict={place_features:test_features, place_labels:test_labels})
		batch = iterator.get_next()  # 新批次数据
		for step in range(100):

			try:
				batch_features, batch_labels = sess.run(batch)
				test_acc, test_loss = sess.run([metrics, loss],
				                               feed_dict={b_features: batch_features, b_labels: batch_labels, place_dropout_keep_prob: 1.0})
				# print('test_acc: %.4f' % test_acc['accuracy'][1], 'test_loss: %.4f' % test_loss)
				print('test_acc: %.4f' % test_acc['accuracy'][1], 'test_loss: %.4f' % test_loss)
			except tf.errors.OutOfRangeError:
				break
if __name__ == '__main__':
    train()