import tensorflow as tf
import util.data_helper as data_helper
import numpy as np
from deeptriage.BiRNN import BiRNN
import os
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'      # 设定只使用CPU
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('JG-20')     #
# 默认为0，输出所有log信息
# 设置为1，进一步屏蔽INFO信息
# 设置为2：进一步屏蔽warning信息
# 设置为3：进一步屏蔽error信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import time
import datetime
from DE.manually_specified import get_cost_matrix
import util.extract_little_developers as little_dataset
from DE.DE_cost_matrix import read_optimal_cost_matrix

'''
用来加载预处理模型，然后基于差分进化算法进行微调fine_tuning.
warning: 需要加入时间的统计操作，以及对阶段统计的加入
'''
def main(round_id, window_id):
	bug_msg_all, _ = data_helper.get_msg_all()
	vocabulary = data_helper.create_vocabulary()

	developers_list = data_helper.create_developers_list()
	time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all)

	# developers_list = little_dataset.implement_create_developers_list()  # 小型数据集的开发者列表调用接口
	# time_windows = little_dataset.implement_train_and_eval_windows()  # 下面这行是在测试算法时,使用抽取的小数据集划分的两个时间窗口,分别为train+eval

	num_classes = len(developers_list)
	vocabulary_size = len(vocabulary)
	developers_size = len(developers_list)


	batch_size = 32
	active_size = 25
	sequence_length = 400
	epoch = 30

	def get_dataset_iterator(single_window, epoch, round_id, shuffle=True):
		dataset = tf.data.Dataset.from_generator(
			generator=lambda: data_helper.dataset_generator(vocabulary, developers_list, bug_msg_all, single_window, active_size, round_id),
			output_types=(tf.int32, tf.int32, tf.int32, tf.int32, tf.int32),
			# output_shapes=(tf.TensorShape([None, timestep_size]), tf.TensorShape([None])))
			# output_shapes=(tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size])))
			# 一维以上（不包括一维），如果不声明大小，需要置为None，如果是一维，不声明大小的话，置为[]即可
			output_shapes=(tf.TensorShape([sequence_length]), tf.TensorShape([]), tf.TensorShape([active_size]), tf.TensorShape([]), tf.TensorShape([])))
		# 如果是训练进程，传入shuffle=True，即确认洗牌；
		# 若是测试进程，传入False，不进行洗牌，这样子方便
		if shuffle:
			dataset = dataset.shuffle(buffer_size=1000).batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		else:
			dataset = dataset.batch(batch_size=batch_size).repeat(count=epoch)  # delete shuffle
		# iterator = dataset.make_initializable_iterator()  # 创建数据集迭代器, 稍后需要初始化
		iterator = dataset.make_one_shot_iterator()  # 创建数据集迭代器, 稍后需要初始化
		return iterator

	graph = tf.get_default_graph()
	session_conf = tf.ConfigProto()
	session_conf.gpu_options.allow_growth = True
	# cpu_num = 5
	# session_conf = tf.ConfigProto(device_count = {'CPU':cpu_num},
	#                               inter_op_parallelism_threads=cpu_num,
	#                               intra_op_parallelism_threads=cpu_num,
	#                               log_device_placement=True)
	sess = tf.Session(config=session_conf, graph=graph)
	# sess.run(tf.global_variables_initializer())         # 需要这个初始化么。。。。

	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1531984755/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1536155599/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1538017693/checkpoints'
	root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541610441/checkpoints'
	# root = '../data/checkpoints2'
	saver = tf.train.import_meta_graph(root + '/model-0.meta')
	# 加载图
	# 有一个比较好玩的API，tf.train.Supervisor
	# 参考文章https://blog.csdn.net/mijiaoxiaosan/article/details/75021279
	# 加载参数, 记得修改checkpoint文件


	bi_features = graph.get_tensor_by_name("place_features:0")
	bi_labels = graph.get_tensor_by_name("place_labels:0")
	bi_active_features = graph.get_tensor_by_name("batch_active_features:0")
	b_sequence_lengths = graph.get_tensor_by_name('place_sequence_lengths:0')
	bi_place_dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
	bi_active_actual_lengths = graph.get_tensor_by_name("place_active_actual_lengths:0")
	logits = graph.get_tensor_by_name("output/add:0")
	global_step = graph.get_tensor_by_name("global_step:0")
	loss = graph.get_tensor_by_name("loss/add:0")
	top1 = graph.get_tensor_by_name("accuracy/acc_top_1:0")
	top5 = graph.get_tensor_by_name("accuracy/acc_top_5:0")
	new_train_op = graph.get_operation_by_name("Adam")

	def new_layer(inputs, units, activation_function=None):
		'''
		尝试自定义一个新层
		:param inputs: 
		:param units: 
		:param activation_function: 
		:return: 
		'''
		weights = tf.Variable(tf.random_normal([developers_size]))
		biases = tf.Variable(tf.zeros([1, units]) + 0.1)
		# wx_plus_b = tf.matmul(inputs, weights) + biases
		# inputs = tf.reshape(tf.tile(inputs, [1, developers_size]), shape=[batch_size, developers_size])
		# inputs = tf.reshape(tf.tile(inputs, [1, developers_size]), shape=[batch_size, developers_size])
		outputs = tf.multiply(inputs, weights)  # 每行对应点乘，之后每列求和，化为[1, K]
		print(outputs)
		if activation_function is None:
			outputs = outputs
		else:
			outputs = activation_function(outputs)
		return outputs

	# 新加入代价敏感
	# with tf.name_scope('cost'):
	#
	# 	logits = new_layer(logits, developers_size)
	# 	l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
	# 	loss = tf.losses.sparse_softmax_cross_entropy(labels=bi_labels, logits=logits)
	# 	loss = tf.reduce_mean(loss) + 0.001 * l2_loss

		# print(loss)


	# with tf.name_scope('accuracy'):
	# 	bool_top_1 = tf.nn.in_top_k(predictions=logits, targets=bi_labels, k=1, name='bool_top_1')
	# 	bool_top_5 = tf.nn.in_top_k(predictions=logits, targets=bi_labels, k=5, name='bool_top_5')
	# 	# 这里是为了检查都是哪些样本预测出错，所以获取了top5的索引
	# 	# _, self.top_5_indices = tf.nn.top_k(logits, k=5)
	# 	acc_top_1 = tf.reduce_mean(tf.cast(bool_top_1, dtype=tf.float32), name="acc_top_1")
	# 	acc_top_5 = tf.reduce_mean(tf.cast(bool_top_5, dtype=tf.float32), name="acc_top_5")


	# init = tf.variables_initializer(output_vars)        # 初始化部分变量
	# sess.run(init)
	# optimizer = tf.train.AdamOptimizer(learning_rate=0.001, name='AdamOptimizer')
	# output_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='cost')
	# output_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
	# new_train_op = optimizer.minimize(loss, var_list=output_vars)

	# from tensorflow.python.ops import clip_ops
	# variables = tf.trainable_variables()
	# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='cost')
	# gradients = optimizer.compute_gradients(loss=loss, var_list=output_vars)
	# capped_gvs = [(tf.clip_by_value(grad, clip_value_min=-5, clip_value_max=5), var) for grad, var in gradients]  # gradient clip, clip_value_min not >= 0
	# grad_sum = tf.summary.scalar("global_norm/gradient_norm", clip_ops.global_norm(list(zip(*capped_gvs))[0]))
	# new_train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)

	sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

	saver.restore(sess, root + '/model-0')      # 初始化所有的变量之后，再restore
	# sess.run(tf.initializers.variables(var_list=output_vars))


	metrics = {
		'top_1':top1,
		'top_5':top5,
		'loss':loss
	}



	# a = set(tf.all_variables())
	# top11 = graph.get_tensor_by_name("accuracy/top1:0")
	# init = tf.variables_initializer(set(tf.all_variables()) - a)
	# sess.run(init)
	# 注意作用域scope的重要性
	# all_vars = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
	# print(all_vars)
	# var_n = []
	# for g, v in all_vars:
	# 	if 'w_softmax:0' in v.name or 'b_softmax:0' in v.name:
	# 		var_n.append((g, v))
	# all_vars = graph.get_c


	# grads = optimizer.compute_gradients(loss, var_list=[w_softmax, b_softmax])
	# grads = optimizer.compute_gradients(loss, var_list=all_vars)
	# new_train_op = optimizer.apply_gradients(grads)
	# optimizer = tf.train.AdamOptimizer(learning_rate=0.001, name='Adam')
	# new_train_op = optimizer.minimize(loss, var_list=all_vars)





	# sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])   # 神啊。。。就因为我加了一行初始化代码。。。。。啊啊啊啊啊

	timestamp = str(int(time.time()))  # 将当前时间戳作为目录name的一部分, 防止重写
	model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
	print('writing to {}'.format(model_dir))

	# summaries for loss and accuracy
	loss_summary = tf.summary.scalar('loss', loss)
	top_1_summary = tf.summary.scalar('top_1', top1)
	top_5_summary = tf.summary.scalar('top_5', top5)
	#
	# # train summaries
	train_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
	train_summary_dir = os.path.join(model_dir, 'summaries', 'train')
	train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
	#
	# # eval summaries
	eval_summary_op = tf.summary.merge([loss_summary, top_1_summary, top_5_summary])
	eval_summary_dir = os.path.join(model_dir, 'summaries', 'eval')  # 先前这里写的是train, 所以之前一直只生成train文件夹, 当初真是傻了...
	eval_summary_writer = tf.summary.FileWriter(eval_summary_dir, sess.graph)

	# # 通过checkpoints(检查点)来存储模型参数
	checkpoint_dir = os.path.abspath(os.path.join(model_dir, 'checkpoints'))
	checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
	if not os.path.exists(checkpoint_dir):
		os.makedirs(checkpoint_dir)
	saver = tf.train.Saver(max_to_keep=12)
	#
	for window_num in [0]:
	# window_num = 1
	# 	print('window_num:{}'.format(window_num))
		train_iterator = get_dataset_iterator(time_windows[window_id], epoch=epoch, round_id=round_id)
		batch = train_iterator.get_next()

		# acc_list = {'top1':[], 'top5':[]}

		for step in range(1000000):
			try:
				current_step = tf.train.global_step(sess, global_step)

				batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(batch)
				if batch_labels.shape[0] != batch_size:
					print('skip small batch')
					continue
				feed_dict = {
					bi_features: batch_features,
					bi_labels: batch_labels,
					bi_active_features: batch_active_features,
					b_sequence_lengths:batch_sequence_lengths,
					bi_active_actual_lengths:batch_active_actual_lengths,
					bi_place_dropout_keep_prob: 0.6,
				}
				# _,  metrics_1 = sess.run([new_train_op, metrics], feed_dict=feed_dict)
				_, train_summaries,  metrics_1 = sess.run([new_train_op, train_summary_op, metrics], feed_dict=feed_dict)
				# train_summaries,  metrics_1 = sess.run([train_summary_op, metrics], feed_dict=feed_dict)
				train_summary_writer.add_summary(train_summaries, current_step)
				if current_step % 5 == 0:
					# print('step {}: {}'.format(current_step, metrics_1))
					# print('train_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))
					logger.info('train_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))


			# for i in range(len(batch_labels)):
				# 	temp = np.argsort(-top_5_indices_1[i], axis=0)[:10]
				# 	predicts.append([' '.join([str(x) for x in temp.tolist()]), batch_labels[i]])
				# acc_list['top1'].append(metrics_1['top_1'])
				# acc_list['top5'].append(metrics_1['top_5'])

				# num_list['top1'].append(metrics_1['top_1']*batch_size)
				# num_list['top5'].append(metrics_1['top_5']*batch_size)
			except tf.errors.OutOfRangeError:
				# print('final acc: \n')
				# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# # print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# print('top1_num: {} \t top5_num:{}'.format(sum(num_list['top1']), sum(num_list['top5'])))
				# print(len(acc_list['top1'])*batch_size)
				break
		saver.save(sess, checkpoint_prefix, global_step=window_id)
		# print('window {} finished!'.format(window_num))
		print('开始测试进程！')
		# 走一波测试进程
		eval_iterator = get_dataset_iterator(time_windows[window_id+1], epoch=1, round_id=round_id)
		batch = eval_iterator.get_next()
		acc_list = {'top1': [], 'top5': []}

		for i in range(100000):
			try:
				current_step = tf.train.global_step(sess, global_step)
				batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths = sess.run(batch)
				if batch_labels.shape[0] != batch_size:
					# print('skip small batch')
					continue
				feed_dict = {
					bi_features: batch_features,
					bi_labels: batch_labels,
					bi_active_features: batch_active_features,
					b_sequence_lengths: batch_sequence_lengths,
					bi_active_actual_lengths: batch_active_actual_lengths,
					bi_place_dropout_keep_prob: 1.0,
				}
				# metrics_1 = sess.run(metrics, feed_dict=feed_dict)
				eval_summaries, metrics_1 = sess.run([eval_summary_op, metrics], feed_dict=feed_dict)
				eval_summary_writer.add_summary(eval_summaries, current_step)
				acc_list['top1'].append(metrics_1['top_1'])
				acc_list['top5'].append(metrics_1['top_5'])
				if i % 5 == 0:
					logger.info('eval_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))
					# print('step {}: {}'.format(current_step, metrics_1))
				# 	print('eval_time:, top1: {:.3f}, top5: {:.3f}, loss: {}'.format(metrics_1['top_1'], metrics_1['top_5'], metrics_1['loss']))

			except tf.errors.OutOfRangeError:

				# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
				# print('top1_num: {} \t top5_num:{}'.format(sum(num_list['top1']), sum(num_list['top5'])))
				# print(len(acc_list['top1'])*batch_size)
				break
			# print('top1: {} \t top5:{}'.format(),
			#                                    sum(acc_list['top5']) / len(acc_list['top5'])))
		acc_top1 = sum(acc_list['top1']) / len(acc_list['top1'])
		acc_top5 = sum(acc_list['top5']) / len(acc_list['top5'])
		print('top1: {} \t top5:{}'.format(acc_top1, acc_top5))
			# top_1, top_5 = sess.run([top1, top5], feed_dict=feed_dict)
			# print('top1: {}, top5:{}'.format(top_1, top_5))
			# with open('../data/predict.txt', 'w') as writer:
			# 	for i in range(len(predicts)):
			# 		writer.write(predicts[i][0])
			# 		writer.write(',{}\n'.format(predicts[i][1]))
	return acc_top1, acc_top5       # 使用哪个标准？

if __name__ == '__main__':
	# cost_matrix = get_cost_matrix()
	# cost_matrix = read_optimal_cost_matrix()
	# main(cost_matrix)
	main(round_id=0, window_id=0)
