import tensorflow as tf
import util.data_helper as data_helper
import numpy as np
from deeptriage.BiRNN_CNN import BiRNN_CNN as Model

# from deeptriage.T_RNN import BiRNN
import util.extract_little_developers as little_dataset
# from DE.manually_specified import get_cost_matrix
import DE.manually_specified as manually_specified
import analysis.extract_specified_combination_sample as PC
import os
import time
import datetime

# Flags = tf.flags.FLAGS        # 这里其实可以使用Flags接收命令行传递参数

def main(cost_matrix=None):
	hidden_size = 256
	timestep_size = sequence_length = 400     # 句子最大长度, 这个稍后要从date_helper传进来, 或者自定义
	embedding_size = 256      # word向量的长度
	# num_classes = 1121
	epoch = 30
	layer_num = 1
	dropout_keep_prob = 0.6
	log_device_placement = False

	# batch_size = tf.placeholder(tf.int32) # 或者把batch_size设成占位
	batch_size = 32
	learning_rate = 0.001

	active_size = 25
	filter_sizes = [1,2,3,4,5,7,9,11]
	num_filters = 128

	bug_msg_all, _ = data_helper.get_msg_all()
	vocabulary = data_helper.create_vocabulary()
	developers_list = data_helper.create_developers_list()
	time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all) # 这行是正常的按照全数据集划分的11个时间窗口
	# time_windows = PC.implement_train_and_eval_windows(time_windows, bug_msg_all)

	# developers_list, trainset_ids, valset_ids = data_helper.extract_small_balance_val_set(time_windows[0], bug_msg_all)


	# developers_list = little_dataset.implement_create_developers_list() # 小型数据集的开发者列表调用接口
	# time_windows = little_dataset.implement_train_and_eval_windows()	# 下面这行是在测试算法时,使用抽取的小数据集划分的两个时间窗口,分别为train+eval

	num_classes = len(developers_list)
	vocabulary_size = len(vocabulary)
	developers_size = len(developers_list)
	print(developers_size)

	# 很尴尬，这块写的耦合性太大了
	def get_dataset_iterator(single_window, epoch, window_id, shuffle=True):
		dataset = tf.data.Dataset.from_generator(
			generator=lambda: data_helper.dataset_generator(vocabulary, developers_list, bug_msg_all, single_window, active_size, window_id),
			output_types=(tf.int32, tf.int32, tf.int32, tf.int32, tf.int32),
			# output_shapes=(tf.TensorShape([None, timestep_size]), tf.TensorShape([None])))
			# output_shapes=(tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size])))
			# 一维以上（不包括一维），如果不声明大小，需要置为None，如果是一维，不声明大小的话，置为[]即可
			output_shapes=(tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size]), tf.TensorShape([]), tf.TensorShape([])))
		# 如果是训练进程，传入shuffle=True，即确认洗牌；
		# 若是测试进程，传入False，不进行洗牌，这样子方便
		if shuffle:
			dataset = dataset.shuffle(buffer_size=1000).batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		else:
			dataset = dataset.batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		# iterator = dataset.make_initializable_iterator()  # 创建数据集迭代器, 稍后需要初始化
		iterator = dataset.make_one_shot_iterator()  # 创建数据集迭代器, 稍后需要初始化
		return iterator

	# 把配置参数写入文件作为记录
	def write_configuration_info_to_file(root_dir):
		with open(os.path.join(root_dir, 'configuration.txt'), 'w') as writer:
			writer.write('hidden_size = {}\n'.format(hidden_size))
			writer.write('embedding_size = {}\n'.format(embedding_size))
			writer.write('epoch = {}\n'.format(epoch))
			writer.write('layer_num = {}\n'.format(layer_num))
			writer.write('batch_size = {}\n'.format(batch_size))
			writer.write('learning_rate = {}\n'.format(learning_rate))

	# 显式的创建会话和图保证资源在不需要的时候合理释放
	with tf.Graph().as_default():
	# with tf.device('/cpu:0'):
		session_conf = tf.ConfigProto(
			log_device_placement = log_device_placement,    # 在指定设备上存储日志文件, 辅助debug
		)
		session_conf.gpu_options.allow_growth=True
		# session_conf.gpu_options.per_process_gpu_memory_fraction=0.4
		sess = tf.Session(config=session_conf)

		with sess.as_default():
			model = Model(sequence_length = sequence_length,
			                  num_classes = num_classes,
			                  vocabulary_size = vocabulary_size,
			                  developers_size = developers_size,
			                  embedding_size=embedding_size,
			                  hidden_size=hidden_size,
			                  batch_size=batch_size,
			                  active_size=active_size,
			                  layer_num=layer_num,
			                  cost_matrix=cost_matrix,
			              num_filters=num_filters,
			              filter_sizes=filter_sizes)
		global_step = tf.Variable(0, name="global_step", trainable=False)
		# 优化损失函数放在外面
		optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)


		from tensorflow.python.ops import clip_ops
		variables = tf.trainable_variables()
		# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='cost')
		# Compute gradients.
		gradients = optimizer.compute_gradients(loss=model.loss, var_list=variables)
		capped_gvs = [(tf.clip_by_value(grad, clip_value_min=-5, clip_value_max=5), var) for grad, var in gradients]      # gradient clip, clip_value_min not >= 0
		grad_sum=tf.summary.scalar("global_norm/gradient_norm", clip_ops.global_norm(list(zip(*capped_gvs))[0]))
		# grad_sum=tf.summary.scalar("global_norm/gradient_norm", clip_ops.clip_by_value(list(zip(*gradients))[0]))

		# 每执行一次train_op就是一次训练步骤, Tensorflow会自动的计算哪些变量是可训练的并且计算他们的梯度
		# 通过global_step这个变量可以计算训练的步数, 每训练一次自动加1
		# tf.train.get_global_step()应该是自动加一的吧;
		# train_op = optimizer.minimize(model.loss, global_step=global_step)
		train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)       #

		# grad_sum = tf.summary.scalar("global_norm/gradient_norm", list(zip(*capped_gvs))[0])

		# 计算最终的top_k正确率
		top_1_acc= model.metrics['top_1']
		top_5_acc= model.metrics['top_5']

		# abspath: 返回绝对路径
		timestamp = str(int(time.time()))       # 将当前时间戳作为目录name的一部分, 防止重写
		model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
		print('writing to {}'.format(model_dir))


		# summaries for loss and accuracy
		loss_summary = tf.summary.scalar('loss', model.loss)
		top_1_summary = tf.summary.scalar('top_1', top_1_acc)
		top_5_summary = tf.summary.scalar('top_5', top_5_acc)

		# train summaries
		train_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary,grad_sum])
		# train_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
		train_summary_dir = os.path.join(model_dir, 'summaries', 'train')
		train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

		# eval summaries
		eval_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
		eval_summary_dir = os.path.join(model_dir, 'summaries', 'eval')     # 先前这里写的是train, 所以之前一直只生成train文件夹, 当初真是傻了...
		eval_summary_writer = tf.summary.FileWriter(eval_summary_dir, sess.graph)

		# # 通过checkpoints(检查点)来存储模型参数
		checkpoint_dir = os.path.abspath(os.path.join(model_dir, 'checkpoints'))
		checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
		if not os.path.exists(checkpoint_dir):
			os.makedirs(checkpoint_dir)
		# saver = tf.train.Saver(tf.global_variables())
		saver = tf.train.Saver(max_to_keep=12)
		write_configuration_info_to_file(model_dir)  # 将配置文件写入文件
		# 初始化全部变量
		sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

		# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541236787/checkpoints'
		# saver = tf.train.import_meta_graph(root + '/model-0.meta')
		# saver.restore(sess, root + '/model-0')

		'''单次训练步'''
		def train_step(model, batch_feature, batch_label, batch_active_feature, batch_sequence_lengths, batch_active_actual_lengths):
			feed_dict = {
				model.b_features: batch_feature,
				model.b_labels: batch_label,
				model.b_active_features: batch_active_feature,
				model.b_sequence_lengths: batch_sequence_lengths,
				model.b_active_actual_lengths: batch_active_actual_lengths,
				model.place_dropout_keep_prob: dropout_keep_prob,
				model.is_train: True,
				# model.cost_matrix:cost_matrix,
			}
			_, step , summaries, metrics, loss = sess.run([train_op, global_step, train_summary_op, model.metrics, model.loss], feed_dict=feed_dict)
			time_str = datetime.datetime.now().isoformat()
			# print(metrics)
			# print('time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3}'.format(time_str, metrics['top_1'], metrics['top_5']*5, loss))
			if step % 20 == 0:
				print('train_time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3}'.format(time_str, metrics['top_1'], metrics['top_5'], loss))
			# write_sentence_to_log('train.log', 'time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3} \n'.format(time_str, metrics['top_1'], metrics['top_5'], loss))
			# write_sentence_to_log('train.log', 'time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3} \n'.format(time_str, metrics['top_1'], metrics['top_5'] * 5, loss))
			train_summary_writer.add_summary(summaries, step)     # 写入tensorboard
			# with open(os.path.join(model_dir, 'train.log'), 'a') as writer:
				# writer.write('\n\n')
				# writer.write('time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3} \n'.format(time_str, metrics['top_1'][0], metrics['top_5'][0]*5, loss))
		'''单次检验步'''
		def eval_step(model, batch_feature, batch_label, batch_active_feature, batch_sequence_lengths, batch_active_actual_lengths):
			feed_dict = {
				model.b_features: batch_feature,
				model.b_labels: batch_label,
				model.b_active_features: batch_active_feature,
				model.b_sequence_lengths: batch_sequence_lengths,
				model.b_active_actual_lengths: batch_active_actual_lengths,
				model.place_dropout_keep_prob: 1.0,
				model.is_train: False
				# model.cost_matrix: cost_matrix,
			}
			step, summaries, metrics, loss, top_5_indices = sess.run([global_step, eval_summary_op, model.metrics, model.loss, model.top_5_indices], feed_dict=feed_dict)
			time_str = datetime.datetime.now().isoformat()
			# if step % 5 == 0:
			# print('eval_time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3}'.format(time_str, metrics['top_1'], metrics['top_5'], loss))
			# write_sentence_to_log('eval.log', 'time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3} \n'.format(time_str, metrics['top_1'], metrics['top_5'], loss))
			eval_summary_writer.add_summary(summaries, step)  # 写入tensorboard
			# with open(os.path.join(model_dir, 'eval.log'), 'a') as writer:
				# writer.write('\n\n')
				# writer.write('time: {0}, top1: {1:.3f}, top5: {2:.3f}, loss: {3} \n'.format(time_str, metrics['top_1'][0], metrics['top_5'][0]*5, loss))
			return top_5_indices,metrics

		def write_sentence_to_log(name, line):
			with open(os.path.join(model_dir, name), 'a') as writer:
				writer.write(line)

		# for i in range(10):
		# for i in [0,1,2,3,4,5,6,7,8]:
		for i in [0]:
			print('第{}个窗口'.format(i))
			# write_sentence_to_log('train.log', '第{}个窗口 \n\n'.format(i))
			# write_sentence_to_log('eval.log', '第{}个窗口 \n\n'.format(i+1))

			# interval = len(time_windows[i]) /
			train_iterator = get_dataset_iterator(time_windows[i], epoch, window_id=i)
			# train_iterator = get_dataset_iterator(trainset_ids, epoch, cost_matrix, window_id=i)
			batch = train_iterator.get_next()  # 新批次数据
			for step in range(10000000):  #再往下的话, iterator的next()会error
				try:
					current_step = tf.train.global_step(sess, global_step)
					batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths= sess.run(batch)
					# print('batch_cost.shape:{}'.format(batch_cost.shape))
					if batch_labels.shape[0] != batch_size:
						continue

					# print(batch_features[0])
					# print('batch_features: {}'.format(len(batch_features)))
					# print('batch_labels: {}'.format(len(batch_labels)))
					train_step(model, batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths)
					# if step % 1000 == 0:
					# 	saver.save(sess, checkpoint_prefix, global_step=step)
						#
					# if step!=0 and step %400 == 0:
					# 	print('eval...')
					# 	eval_iterator = get_dataset_iterator(time_windows[i + 1], 1)  # 测试的话, 走一个epoch就可以了其实....
					# 	batch_eval = eval_iterator.get_next()  # 新批次数据
					# 	for _ in range(300):	# 走20个batch就够了
					# 		# try:
					# 		# current_step = tf.train.global_step(sess, global_step)
					# 		batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(batch_eval)
					# 		if batch_labels.shape[0] != batch_size:
					# 			continue
					# 		eval_step(model, batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths)
					# 		# except tf.errors.OutOfRangeError:
					# 		# 	break
					# 	saver.save(sess, checkpoint_prefix)		# 加入checkpoint
				except tf.errors.OutOfRangeError:
					print('已完成所有epoch迭代')
					break
			#
			saver.save(sess, checkpoint_prefix, global_step=i)
			print("下面是第{}个窗口的验证:".format(i+1))
			# # 第i+1叠数据用来测试
			eval_iterator = get_dataset_iterator(time_windows[i + 1], 1, window_id=i, shuffle=False)  # 测试的话, 走一个epoch就可以了其实....
			# eval_iterator = get_dataset_iterator(valset_ids, 1, cost_matrix, window_id=i, shuffle=False)  # 测试的话, 走一个epoch就可以了其实....
			batch_eval = eval_iterator.get_next()  # 新批次数据
			analysis = []
			acc_list = {'top1': [], 'top5': []}
			num_list = {'top1': [], 'top5': []}
			for _ in range(1000000):  # 走20个batch就够了
				try:
					current_step = tf.train.global_step(sess, global_step) # 将测试的step也加入全局step
					batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(batch_eval)
					if batch_labels.shape[0] != batch_size:
						continue
					batch_top_5, metrics_1 = eval_step(model, batch_features, batch_labels, batch_active_features, batch_sequence_lengths,
				          batch_active_actual_lengths)
					# 将预测的top_5索引 and 真实标签 and 真实活跃度标签都记录下来，用于查看错误
					for k in range(len(batch_top_5)):
						pre_temp = [str(x) for x in batch_top_5[k]]
						temp = [0,0,0]
						temp[0] = ' '.join(pre_temp)
						temp[1] = batch_labels[k]
						temp[2] = ' '.join([str(x) for x in batch_active_features[k]])
						analysis.append(temp)
					acc_list['top1'].append(metrics_1['top_1'])
					acc_list['top5'].append(metrics_1['top_5'])

					num_list['top1'].append(metrics_1['top_1'] * batch_size)
					num_list['top5'].append(metrics_1['top_5'] * batch_size)
				except tf.errors.OutOfRangeError:
					print('eval finish!')
					print('{}\t{}'.format(sum(acc_list['top1']) / len(acc_list['top1']),
					                      sum(acc_list['top5']) / len(acc_list['top5'])))
					break
			# 加入checkpoints
			# path = saver.save(sess, checkpoint_prefix)
			# print('save model checkpoint to {} \n'.format(path))
			# 将用于校验的结果写入文件
			# with open(os.path.join(model_dir, 'analysis_windows_{}.csv'.format(i)), 'w') as writer:
			# 	for j in range(len(analysis)):
			# 		writer.write('{},{},{},{}\n'.format(time_windows[i+1][j],analysis[j][0], analysis[j][1], analysis[j][2]))

if __name__ == '__main__':
    # cost_matrix = manually_specified.get_cost_matrix()
    # developers_list = little_dataset.implement_create_developers_list()
    # developers_list = data_helper.extract_small_balance_val_set()
    # cost_matrix = manually_specified.implement_random_cost_matrix(developer_size=len(developers_list))
    # cost_matrix = manually_specified.implement_random_cost_matrix(developer_size=642)
    cost_matrix = manually_specified.implement_random_cost_matrix(developer_size=342)
    # cost_matrix = manually_specified.get_cost_matrix_from_file(642)
    main(cost_matrix)
    # main()