import tensorflow as tf
import util.data_helper as data_helper
from DE.manually_specified import get_cost_matrix
from DE.manually_specified import implement_random_cost_matrix
import DE.manually_specified as MS
import analysis.extract_specified_combination_sample as PC
from sklearn.metrics import precision_score
from new_deep.get_batch_iterator import get_dataset_iterator
import numpy as np
import os
import time
import datetime

bug_msg_all, _ = data_helper.get_msg_all()
vocabulary = data_helper.create_vocabulary()
developers_list = data_helper.create_developers_list()

num_classes = len(developers_list)
vocabulary_size = len(vocabulary)
developers_size = len(developers_list)
# time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all)
time_windows = data_helper.split_dataset_by_eight_to_two(bug_msg_all)

batch_size = 32
active_size = 25
sequence_length = 400

def main(round_id, window_id, cost_matrix):
	session_conf = tf.ConfigProto()
	session_conf.gpu_options.allow_growth = True

	# window_num = 10
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1531984755/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1540458882/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1540796042/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541236787/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541610207/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541610441/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1541729455/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1542199056/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1543198861/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1543535837/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/deeptriage/runs/1543411277/checkpoints'
	# root = '/home/wanglinhui/PycharmProjects/2LSTM/new_deep/runs/15454/71358/checkpoints'
	root = '/home/wanglinhui/PycharmProjects/2LSTM/new_deep/runs/1545588335/checkpoints'


	saver = tf.train.import_meta_graph(root + '/model-{}.meta'.format(0))#round_id))#7))
	sess = tf.Session(config=session_conf)
	# 加载图

	# 加载参数, 记得修改checkpoint文件

	# sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])   # 神啊。。。就因为我加了一行初始化代码。。。。。啊啊啊啊啊

	graph = tf.get_default_graph()
	sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

	bi_features = graph.get_tensor_by_name("place_features:0")
	bi_labels = graph.get_tensor_by_name("place_labels:0")
	bi_active_features = graph.get_tensor_by_name("batch_active_features:0")
	b_sequence_lengths = graph.get_tensor_by_name('place_sequence_lengths:0')
	bi_place_dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
	bi_active_actual_lengths = graph.get_tensor_by_name("place_active_actual_lengths:0")
	c_cost_matrix = graph.get_tensor_by_name('cost_matrix:0')
	# bi_top5_indices = graph.get_tensor_by_name('accuracy/TopKV2:1')

	loss = graph.get_tensor_by_name("loss/add:0")
	y = graph.get_tensor_by_name('loss/MatMul:0')
	# y = graph.get_tensor_by_name('cost/MatMul:0')

	# indices = graph.get_tensor_by_name("output/Relu:0")
	# top1 = graph.get_tensor_by_name("accuracy/acc_top_1:0")
	top1 = graph.get_tensor_by_name("accuracy/top_1:0")
	# top11 = graph.get_tensor_by_name("accuracy/top1:0")
	# print(top11)
	# top1_op = graph.get_operation_by_name("top_1")
	# top5_op = graph.get_operation_by_name("top_5")
	# top5 = graph.get_tensor_by_name("accuracy/acc_top_5:0")
	top5 = graph.get_tensor_by_name("accuracy/top_5:0")
	metrics = {
		'y': y,
		'top_1': top1,
		'top_5': top5
	}

	saver.restore(sess, root + '/model-{}'.format(0))#round_id))#7))
	# print(top1)
	# print(top1_op)

	# metrics = graph.get_tensor_by_name("metrics:0")

	# print('window_num:{}'.format(window_num))
	# 不参与训练。
	eval_iterator = get_dataset_iterator(time_windows[window_id], 1, round_id, vocabulary, developers_list, bug_msg_all,
	                                     sequence_length, active_size, batch_size,
	                                     shuffle=False)  # 测试的话, 走一个epoch就可以了其实....
	batch = eval_iterator.get_next()

	acc_list = {'top1':[], 'top5':[]}
	num_list = {'top1':[], 'top5':[]}
	predicts = []

	origin_prob = []
	analysis = []
	labels = []
	punish_labels = []
	for step in range(100000):
		try:
			batch_features, batch_labels, batch_active_features, batch_sequence_lengths, batch_active_actual_lengths= sess.run(batch)
			if batch_labels.shape[0] != batch_size:
				print('skip small batch')
				continue
			feed_dict = {
				bi_features: batch_features,
				bi_labels: batch_labels,
				bi_active_features: batch_active_features,
				b_sequence_lengths:batch_sequence_lengths,
				bi_active_actual_lengths:batch_active_actual_lengths,
				bi_place_dropout_keep_prob: 1.0,
				c_cost_matrix:cost_matrix
			}
			metrics_1 = sess.run(metrics, feed_dict=feed_dict)
			# print(metrics_1)
			print('{}\t{}'.format(metrics_1['top_1'], metrics_1['top_5']))

			for i in range(len(batch_labels)):          # 记录每个batch中的原始概率
				origin_prob.append('{},{}'.format(' '.join(list(map(str, metrics_1['y'][i]))), batch_labels[i]))

			# for k in range(len(batch_top_5)):       # 记录每个样本的top5序列，用来分析试验结果
			# 	punish_labels.append(batch_top_5[k][0])
			# 	labels.append(batch_labels[k])
			# 	pre_temp = [str(x) for x in batch_top_5[k]]
			# 	temp = [0, 0, 0]
			# 	temp[0] = ' '.join(pre_temp)
			# 	temp[1] = batch_labels[k]
			# 	temp[2] = ' '.join([str(x) for x in batch_active_features[k]])
			# 	analysis.append(temp)
			# labels += batch_labels

			acc_list['top1'].append(metrics_1['top_1'])
			acc_list['top5'].append(metrics_1['top_5'])

			num_list['top1'].append(metrics_1['top_1']*batch_size)
			num_list['top5'].append(metrics_1['top_5']*batch_size)
		except tf.errors.OutOfRangeError:
			# print('final acc: \n')
			# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
			print('all:{}\t{}'.format('%.4f' % (sum(acc_list['top1']) / len(acc_list['top1'])), '%.4f' % (sum(acc_list['top5']) / len(acc_list['top5']))))
			# print('{}\t{}'.format(sum(num_list['top1']) / (len(acc_list['top1'])*batch_size), sum(num_list['top5']) / (len(acc_list['top5'])*batch_size)))    # same as the previous line
			# print(precision_score(labels, punish_labels, average='macro'))
			# print('top1: {} \t top5:{}'.format(sum(acc_list['top1']) / len(acc_list['top1']), sum(acc_list['top5']) / len(acc_list['top5'])))
			# print('top1_num: {} \t top5_num:{}'.format(sum(num_list['top1']), sum(num_list['top5'])))
			# print(len(acc_list['top1'])*batch_size)
			break
		# with open('../data/predict.txt', 'w') as writer:
		# 	for i in range(len(predicts)):
		# 		writer.write(predicts[i][0])
		# 		writer.write(',{}\n'.format(predicts[i][1]))
	# print('all:{}\t{}'.format('%.4f' % (sum(acc_list['top1']) / len(acc_list['top1'])),
	# 	                          '%.4f' % (sum(acc_list['top5']) / len(acc_list['top5']))))
		with open(os.path.join(root, 'origin_prob_{}.txt'.format(window_id)), 'w') as writer:            # 保存原始概率和对应的真实标签
			[writer.write('{}\n'.format(origin_prob[i])) for i in range(len(origin_prob))]

		# 将用于校验的结果写入文件
		# with open(os.path.join(root, 'analysis_windows_{}.csv'.format(window_id)), 'w') as writer:
		# 	for j in range(len(analysis)):
		# 		writer.write('{},{},{},{}\n'.format(time_windows[window_id][j], analysis[j][0], analysis[j][1], analysis[j][2]))
if __name__ == '__main__':

	# cost_matrix = MS.get_cost_matrix_from_file(developers_size)
	cost_matrix = implement_random_cost_matrix(developers_size)
	# main(round_id=7, window_id=8,  cost_matrix=cost_matrix)
	# main(round_id=8, window_id=9,  cost_matrix=cost_matrix)
	# main(round_id=9, window_id=10,  cost_matrix=cost_matrix)
	main(round_id=0, window_id=1, cost_matrix=cost_matrix)