import tensorflow as tf
import util.data_helper as data_helper
import numpy as np
from deeptriage.BiRNN import BiRNN
import os
import time
import datetime
# from DE.manually_specified import get_cost_matrix
import DE.manually_specified as MS
import util.extract_little_developers as little_dataset

'''
这个文件是用来装载模型并且继续训练的？我怎么一点儿印象都没有了，隔了这么一个月
'''
def main(cost_matrix):
	bug_msg_all, _ = data_helper.get_msg_all()
	vocabulary = data_helper.create_vocabulary()

	developers_list = data_helper.create_developers_list()
	time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all)

	# developers_list = little_dataset.implement_create_developers_list()  # 小型数据集的开发者列表调用接口
	# time_windows = little_dataset.implement_train_and_eval_windows()  # 下面这行是在测试算法时,使用抽取的小数据集划分的两个时间窗口,分别为train+eval

	num_classes = len(developers_list)
	vocabulary_size = len(vocabulary)
	developers_size = len(developers_list)


	batch_size = 32
	active_size = 25
	sequence_length = 400
	epoch = 30

	def get_dataset_iterator(single_window, epoch, window_id, shuffle=True):
		dataset = tf.data.Dataset.from_generator(
			generator=lambda: data_helper.dataset_generator(vocabulary, developers_list, bug_msg_all, single_window,
			                                                active_size, window_id), output_types=(tf.int32, tf.int32, tf.int32, tf.int32, tf.int32),
			# output_shapes=(tf.TensorShape([None, timestep_size]), tf.TensorShape([None])))
			# output_shapes=(tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size])))
			# 一维以上（不包括一维），如果不声明大小，需要置为None，如果是一维，不声明大小的话，置为[]即可
			output_shapes=( tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size]), tf.TensorShape([]), tf.TensorShape([])))
		# 如果是训练进程，传入shuffle=True，即确认洗牌；
		# 若是测试进程，传入False，不进行洗牌，这样子方便
		# 如果是训练进程，传入shuffle=True，即确认洗牌；
		# 若是测试进程，传入False，不进行洗牌，这样子方便
		# if shuffle:
		# 	dataset = dataset.shuffle(buffer_size=1000).batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		# else:
		dataset = dataset.batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		# iterator = dataset.make_initializable_iterator()  # 创建数据集迭代器, 稍后需要初始化
		iterator = dataset.make_one_shot_iterator()  # 创建数据集迭代器, 稍后需要初始化
		return iterator

	session_conf = tf.ConfigProto()
	session_conf.gpu_options.allow_growth = True

	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1531984755/checkpoints'
	root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541610441/checkpoints'
	saver = tf.train.import_meta_graph(root + '/model-0.meta')
	sess = tf.Session(config=session_conf)
	# 加载图
	# 有一个比较好玩的API，tf.train.Supervisor
	# 参考文章https://blog.csdn.net/mijiaoxiaosan/article/details/75021279
	# 加载参数, 记得修改checkpoint文件
	saver.restore(sess, root + '/model-0')
	# sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])   # 神啊。。。就因为我加了一行初始化代码。。。。。啊啊啊啊啊

	graph = tf.get_default_graph()


	bi_features = graph.get_tensor_by_name("place_features:0")
	bi_labels = graph.get_tensor_by_name("place_labels:0")
	bi_active_features = graph.get_tensor_by_name("batch_active_features:0")
	b_sequence_lengths = graph.get_tensor_by_name('place_sequence_lengths:0')
	bi_place_dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
	bi_active_actual_lengths = graph.get_tensor_by_name("place_active_actual_lengths:0")
	global_step = graph.get_tensor_by_name("global_step:0")
	# bi_cost_sensitive = graph.get_tensor_by_name("place_cost_sensitive:0")
	c_cost_matrix = graph.get_tensor_by_name('cost_matrix:0')

	y = graph.get_tensor_by_name("loss/Softmax:0")
	# 最后一层需要训练的变量
	# <tf.Variable 'w_softmax:0' shape=(256, 109) dtype=float32_ref>
	# <tf.Variable 'b_softmax:0' shape=(109,) dtype=float32_ref>


	# indices = graph.get_tensor_by_name("output/Relu:0")
	loss = graph.get_tensor_by_name("loss/add:0")
	top1 = graph.get_tensor_by_name("accuracy/acc_top_1:0")
	# top11 = graph.get_tensor_by_name("accuracy/top1:0")

	train_op = graph.get_operation_by_name("Adam")

	# top5_op = graph.get_operation_by_name("top_5")
	top5 = graph.get_tensor_by_name("accuracy/acc_top_5:0")
	metrics = {
		'loss': loss,
		'top_1': top1,
		'top_5': top5
	}

	timestamp = str(int(time.time()))  # 将当前时间戳作为目录name的一部分, 防止重写
	model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
	print('writing to {}'.format(model_dir))

	# summaries for loss and accuracy
	# loss_summary = tf.summary.scalar('loss', loss)
	# top_1_summary = tf.summary.scalar('top_1', top1)
	# top_5_summary = tf.summary.scalar('top_5', top5)
	#
	# # train summaries
	# train_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
	# train_summary_dir = os.path.join(model_dir, 'summaries', 'train')
	# train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
	#
	# # eval summaries
	# eval_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
	# eval_summary_dir = os.path.join(model_dir, 'summaries', 'eval')  # 先前这里写的是train, 所以之前一直只生成train文件夹, 当初真是傻了...
	# eval_summary_writer = tf.summary.FileWriter(eval_summary_dir, sess.graph)
	#
	# # # 通过checkpoints(检查点)来存储模型参数
	# checkpoint_dir = os.path.abspath(os.path.join(model_dir, 'checkpoints'))
	# checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
	# if not os.path.exists(checkpoint_dir):
	# 	os.makedirs(checkpoint_dir)
	# saver = tf.train.Saver(tf.global_variables())
	# saver = tf.train.Saver(max_to_keep=12)
	#
	for window_num in [0]:
	# window_num = 1
		print('window_num:{}'.format(window_num))
		train_iterator = get_dataset_iterator(time_windows[window_num], epoch, 0)
		batch = train_iterator.get_next()

		# acc_list = {'top1':[], 'top5':[]}

		for step in range(1000000):
			try:
				current_step = tf.train.global_step(sess, global_step)

				batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(
					batch)
				if batch_labels.shape[0] != batch_size:
					print('skip small batch')
					continue
				feed_dict = {
					bi_features: batch_features,
					bi_labels: batch_labels,
					bi_active_features: batch_active_features,
					b_sequence_lengths: batch_sequence_lengths,
					bi_active_actual_lengths: batch_active_actual_lengths,
					bi_place_dropout_keep_prob: 0.6,
					c_cost_matrix: cost_matrix
				}
				# _, train_summaries, metrics_1 = sess.run([train_op, train_summary_op, metrics], feed_dict=feed_dict)
				_,  metrics_1 = sess.run([train_op,  metrics], feed_dict=feed_dict)
				# train_summary_writer.add_summary(train_summaries, current_step)

				# for i in range(len(batch_labels)):
				# 	temp = np.argsort(-top_5_indices_1[i], axis=0)[:10]
				# 	predicts.append([' '.join([str(x) for x in temp.tolist()]), batch_labels[i]])
				if current_step % 5 == 0:
					# print('step {}: {}'.format(current_step, metrics_1))
					print('train_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))
				# acc_list['top1'].append(metrics_1['top_1'])
				# acc_list['top5'].append(metrics_1['top_5'])

				# num_list['top1'].append(metrics_1['top_1']*batch_size)
				# num_list['top5'].append(metrics_1['top_5']*batch_size)
			except tf.errors.OutOfRangeError:
				# print('final acc: \n')
				# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# # print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# print('top1_num: {} \t top5_num:{}'.format(sum(num_list['top1']), sum(num_list['top5'])))
				# print(len(acc_list['top1'])*batch_size)
				break
		# saver.save(sess, checkpoint_prefix, global_step=window_num)
		print('window {} finished!'.format(window_num))
		print('开始测试进程！')
		# 走一波测试进程
		eval_iterator = get_dataset_iterator(time_windows[window_num+1], 1, 0)
		batch = eval_iterator.get_next()
		acc_list = {'top1': [], 'top5': []}

		for i in range(100000):
			try:
				current_step = tf.train.global_step(sess, global_step)
				batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(batch)
				if batch_labels.shape[0] != batch_size:
					print('skip small batch')
					continue
				feed_dict = {
					bi_features: batch_features,
					bi_labels: batch_labels,
					bi_active_features: batch_active_features,
					b_sequence_lengths: batch_sequence_lengths,
					bi_active_actual_lengths: batch_active_actual_lengths,
					bi_place_dropout_keep_prob: 1.0,
					c_cost_matrix: cost_matrix
				}
				metrics_1 = sess.run(metrics, feed_dict=feed_dict)
				# eval_summaries, metrics_1 = sess.run([eval_summary_op, metrics], feed_dict=feed_dict)
				# eval_summary_writer.add_summary(eval_summaries, current_step)
				acc_list['top1'].append(metrics_1['top_1'])
				acc_list['top5'].append(metrics_1['top_5'])
				if current_step % 5 == 0:
					# print('step {}: {}'.format(current_step, metrics_1))
					print('eval_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))

			except tf.errors.OutOfRangeError:

				# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# print('top1_num: {} \t top5_num:{}'.format(sum(num_list['top1']), sum(num_list['top5'])))
				# print(len(acc_list['top1'])*batch_size)
				break
			# print('top1: {} \t top5:{}'.format(),
			#                                    sum(acc_list['top5']) / len(acc_list['top5'])))
		acc_top1 = sum(acc_list['top1']) / len(acc_list['top1'])
		acc_top5 = sum(acc_list['top5']) / len(acc_list['top5'])
		print('top1: {} \t top5:{}'.format(acc_top1, acc_top5))
			# top_1, top_5 = sess.run([top1, top5], feed_dict=feed_dict)
			# print('top1: {}, top5:{}'.format(top_1, top_5))
			# with open('../data/predict.txt', 'w') as writer:
			# 	for i in range(len(predicts)):
			# 		writer.write(predicts[i][0])
			# 		writer.write(',{}\n'.format(predicts[i][1]))
	return acc_top1, acc_top5       # 使用哪个标准？

if __name__ == '__main__':
	# cost_matrix = MS.get_cost_matrix()
	# 兼容小型数据集，做到根据小型数据集的规模，自动调整测试进程中代价矩阵的大小
	# developers_list = little_dataset.implement_create_developers_list()
	# cost_matrix = MS.implement_random_cost_matrix(len(developers_list))
	cost_matrix = MS.get_cost_matrix_from_file(642)
	main(cost_matrix)
