import datetime
import os
#from tensorflow.python.client import timeline
import sys
import time
sys.path.append('.')
import tensorflow as tf
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix

from src import data_iterator, debug_info
from src.data_helper import DataHelper

from util.PATH import PATH
from wordvec import word_2_vec
#from util import ELM


tf.flags.DEFINE_integer('hidden_size', 100, '每层隐层的神经元数量')
tf.flags.DEFINE_integer('max_sentence', 200, '每个句子的最大单词数')
tf.flags.DEFINE_integer('embedding_size', 100, '单词转化的词向量的维度')
tf.flags.DEFINE_integer('epoch', 50, 'epoch num')
tf.flags.DEFINE_integer('n_layer', 1, '隐层的层数')
tf.flags.DEFINE_float('keep_prob', 0.3, '神经元保留的概率')
tf.flags.DEFINE_integer('batch_size', 32, 'batch size')
tf.flags.DEFINE_float('lr', 0.001, 'learning rate')
tf.flags.DEFINE_integer('active_size', 25, 'active size')
tf.flags.DEFINE_float('l2', 0.001, 'l2正则化系数')
tf.flags.DEFINE_string('dataset', 'GCC', '数据集名字')
tf.flags.DEFINE_string('wv_model_name',None, '需要导入的事先训练好的词向量模型的name，default=None，表示不启用事先训练好的词向量模型')
tf.flags.DEFINE_string('wv_model_type', 'embedding', '如word2vec、 fasttext、 one-hot、embedding，启用的词向量模型的类型，embedding是指自带的embedding layer')
tf.flags.DEFINE_string('is_test', None, '是否启用测试，如果启用测试，该参数接收模型文件夹名，即对应的timestamp；default=None，启用训练，模型文件夹以当前时间戳建立')
tf.flags.DEFINE_string('gpu_id', '0', '选择使用的GPU的id')
tf.flags.DEFINE_string('basic_model', 'BiCSDNN', 
										# 'DBRNNA, 表示启用DBRNN-A模型，\
										# DBRNNA-1,表示启用1号修改型，其实就是取消Attention层，直接取最后一个时间步，\
										# DBRNNA-2,表示启用2号修改型，其实就是不使用RNN的输出，直接取前向和后向的attention输出来作为最后一个时间步来merge，\
										# DBRNNA-3,表示启用3号修改型，其实就是将Attention中的一维attention换成了二维attention，\
										# LeeCNN,表示启用LeeCNN模型，其实就是TextCNN+word2vec；\
										# TextRCNN，表示启用原始的TextRCNN模型；\
										# ImprovedTextRCNN,表示启用改进后的TextRCNN，池化层后接BN和一个全连接；\
										# TextCNN, 表示启用TextCNN模型(多通道版本);\
										# ConvLSTM, 表示启用ConvLSTM(单文本)模型；\
										# ChsConvLSTM, 表示启用ChsConvLSTM(单文本)模型；\
										# Transformer, 表示启用Transformer(单文本)模型；\
										# SelfAttention, 表示启用SelfAttention(单文本)模型；\
										# SelfAttention-1, 表示启用SelfAttention-1(单文本)模型，即用单维度attention的方式抽取时间步；\
										# SelfAttention-2, 表示启用SelfAttention-2(单文本)模型，即用k-max pooling的方式抽取时间步；\
										# AdverSelf, 表示启用AdverSelfAttention(单文本)模型；\
										# BiBasicText, 表示启用BiBasicText(单文本)模型；\
										# BiBasicText-1, 表示启用BiBasicText(单文本)模型的改进型，即将第一个dense换成了嵌入层；\
										# BiBasicText-2, 表示启用BiBasicText(单文本)模型的二号改进型，即将第一个dense中的激活函数换成线性激活函数，模拟embedding；\
										# TriBasicText-2, 表示启用TrBasicText(单文本)模型的改进型，产品和组件分开建模，分别走两个全连接提取特征；\
										# TriBasicText-3, 表示启用TrBasicText(单文本)模型的改进型，产品和组件分开建模，先走一个embedding层，再走一个dense；\
										# SWEM-aver, 表示启用基于平均池化的SWEM模型；\
										# SWEM-max, 表示启用基于最大池化的SWEM模型；\
										# SWEM-concat, 表示启用基于连接池化的SWEM模型；\
										# SWEM-hier, 表示启用基于层级池化的SWEM模型；\
										'BasicText-3, 表示启用TrBasicText-3(单文本)模型的改进型; 属于模型的正常改进型\
						')
tf.flags.DEFINE_integer('ratio', 1, "1:启用82活跃度提取，3：启用811活跃度提取")
tf.flags.DEFINE_float('gpu_percent', 0.2, "显卡占用的百分比")
tf.flags.DEFINE_bool('dynamic_length', False, "True: 根据dataset选择动态的max sentence；False：为所有dataset统一选择一个max sentence，即200")
tf.flags.DEFINE_string('tag', None, '一个辅助标记，用来标识时间戳文件夹和本机记录的关系')
tf.flags.DEFINE_integer('n_blocks', 1, "表示self-attention块的数量，或者说层数")
tf.flags.DEFINE_integer('n_heads', 5, "表示Transformer块中多头注意力的数量，或者说head数量")
tf.flags.DEFINE_bool('residual', False, "是否为RNN启用残差结构")
tf.flags.DEFINE_string('split', 'category', "表示数据集的分割方式，time表示按照时间顺序8:2分割；category表示按照类别8:2划分")
tf.flags.DEFINE_string('fusion', 'mul', "表示两种高层特征的连接方式，mul：元素对应乘积；add：元素对应相加；concat：元素拼接;maxpooling;attention;meanpooling")
tf.flags.DEFINE_bool('seed', True, 'True:固定随机种子；False：不固定随机种子')
tf.flags.DEFINE_string('retrain', None, '指定继续训练时的基础模型，以timestamp的形式指定基础模型的保存文件夹')
Flags = tf.flags.FLAGS

if Flags.seed:
	import numpy as np
	np.random.seed(2020)
	#tf.random.set_random_seed(2020)
	tf.set_random_seed(2020)

from src.config import Config
config = Config().model		# 一些额外的Transformer

if Flags.is_test is None:
	timestamp = str(int(time.time()))  # 将当前时间戳作为目录name的一部分, 防止重写
	model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
	print('writing to {}'.format(model_dir))
	if not os.path.exists(model_dir):
		os.makedirs(model_dir)
else:
	model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', Flags.is_test))		# 读取保存的模型文件
	if not os.path.exists(model_dir):
		raise FileNotFoundError 

if Flags.retrain is not None:		# 读取基础模型
	basic_model_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', Flags.retrain))
	if not os.path.exists(basic_model_dir):
		raise FileNotFoundError
else:
	raise RuntimeError('请确认是否真的不需要retrain的基础模型，之后注释本行代码')
if Flags.tag is not None:
	cur_dir = os.path.abspath(os.path.curdir)
	with open(os.path.join(cur_dir, 'corresponding.log'), 'a') as writer:		# 用来在服务器端标识某个时间戳文件夹是属于哪个命令产生的
		# writer.write('Life is difficult.')
		writer.write('{}\t{}\n'.format(str(timestamp), Flags.tag))
if Flags.dynamic_length:	# 实现每个数据集的RNN处理时，时间步长度==各自的平均文本长度，尝试加速运算。
	lengths = {			# 每个数据集的平均文本长度
		'Mozilla': 50,
		"Eclipse": 83,
		"Netbeans": 72,
		"OpenOffice": 56,
		"GCC": 173,
	}
	# Flags.max_sentence = lengths[Flags.dataset]
	Flags.max_sentence = lengths[Flags.dataset.split('_')[0]]
else:
	pass

PATH = PATH(Flags.dataset)
data_helper = DataHelper(PATH)
bug_msg_all, _ = data_helper.get_msg_all()      # In fact, 可以将这些变量一起
pcs = data_helper.create_pcs()
products = data_helper.create_products()
components = data_helper.create_components()
config.pcs_size = len(pcs)
config.product_size = len(products)
config.component_size = len(components)
debug_info.write_debug_info_to_file(model_dir, "开始读取词向量")
if Flags.wv_model_type == 'word2vec':
	import wordvec.word_2_vec as embedding
	# 读取模型，导入vocabulary和lookup_table
	vocabulary, lookup_table = embedding.read_embedding_model(Flags.wv_model_name, Flags.dataset)        # 这个vocabulary中包含了预训练的vocabulary和微调的vocabulary，but测试集中仍会出现未登录词，记得处理
elif Flags.wv_model_type == 'fasttext':
	import wordvec.fast_text as embedding
	# 读取模型，导入vocabulary和lookup_table
	vocabulary, lookup_table = embedding.read_embedding_model(Flags.wv_model_name, Flags.dataset)        # 这个vocabulary中包含了预训练的vocabulary和微调的vocabulary，but测试集中仍会出现未登录词，记得处理
elif Flags.wv_model_type == 'one-hot':
	vocabulary = data_helper.create_vocabulary()
	lookup_table = None
elif Flags.wv_model_type == 'embedding':
	vocabulary = data_helper.create_vocabulary()
	config.freqs = data_helper.create_vocabulary_freq()
	lookup_table = None


debug_info.write_debug_info_to_file(model_dir, "词向量读取完毕")

# todo: 查一遍测试集中有多少单词是未登录词，规范起见，全部设置成一个unk标记

developers = data_helper.create_developers_list()
# 1 : 使用8:2
# 2表示使用基于6:2:2的数据分割比例
# 3：使用8:1:1
# 当然，这个修改完毕之后，还需要改main方法中的time_windows的获取
# if Flags.basic_model == 'DBRNNA':
if 'DBRNNA' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.DBRNNA import DBRNNA as DeepModel
	config.has_BN = True
elif Flags.basic_model == 'LeeCNN':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.LeeCNN import LeeCNN as DeepModel
	config.has_BN = True
elif Flags.basic_model == 'TextRCNN':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.TextRCNN import TextRCNN as DeepModel
	config.has_BN = False
elif Flags.basic_model == 'ImprovedTextRCNN':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.TextRCNN import TextRCNN as DeepModel
	config.has_BN = True
elif Flags.basic_model == 'TextCNN':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.TextCNN import TextCNN as DeepModel
	config.has_BN = True
elif Flags.basic_model == 'ConvLSTM':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_of_convLSTM_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)      #
	from baseline.ConvLSTM import ConvLSTM as DeepModel
	config.has_BN = False
elif Flags.basic_model == 'ChsConvLSTM':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.ChsConvLSTM import ChsConvLSTM as DeepModel
	config.has_BN = False
elif Flags.basic_model == 'Transformer':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.Transformer import Transformer as DeepModel
	config.has_BN = False
elif 'SelfAttention' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.SelfAttention import SelfAttention as DeepModel
	config.has_BN = False
elif Flags.basic_model == 'AdverSelf':
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.AdverSelfAttention import AdverSelfAttention as DeepModel
	config.has_BN = False
elif 'BiBasicText' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText_with_product_components(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.BiBasicText import BiBasicText as DeepModel
	config.has_BN = False
elif 'TriBasicText' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText_with_TriBasicText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.TriBasicText import TriBasicText as DeepModel
	config.has_BN = False
elif 'SWEM' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from baseline.SWEM import SWEM as DeepModel
	config.has_BN = False
elif 'SingleCSDNN' in Flags.basic_model:
	prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText_with_TriBasicText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	from imbalance.SingleCSDNN import SingleCSDNN as DeepModel
	config.has_BN = False
elif 'BasicText' in Flags.basic_model:
	# prepared_datas 能不能定义成三维的，或者外层套一个循环
	# prepared_datas, idx2bugid = data_helper.prepare_tf_input_datas_for_singleText_with_TriBasicText(vocabulary, developers, bug_msg_all, Flags.max_sentence)
	#np.savez('{}_pi.npz'.format(Flags.dataset), p=prepared_datas, i=idx2bugid)
	pis = np.load('{}_pi.npz'.format(Flags.dataset))
	prepared_datas = pis['p']
	idx2bugid = pis['i'].tolist()
	from binary.BasicText import BasicText as DeepModel
	config.has_BN = False


debug_info.write_debug_info_to_file(model_dir, "数据预备完成")

n_labels = len(developers)
vocabulary_size = len(vocabulary)
developers_size = len(developers)

if Flags.split == 'time':
	# time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all) # 这行是正常的按照全数据集划分的11个时间窗口
	# time_windows = data_helper.split_dataset_by_eight_to_two(bug_msg_all)
	# time_windows = data_helper.split_dataset_by_six_two_two(bug_msg_all)
	time_windows = data_helper.split_dataset_by_eight_one_one(bug_msg_all)
elif Flags.split == 'category':
	time_windows = data_helper.read_dataset_by_category()		# time_window是bugid的集合
else:
	raise RuntimeError('split参数指定错误！')

def main():
	# 有多少类，循环多少次
	for i in range(1, n_labels):
		# 将prepared_datas处理成只含01标签的，0表示本类，1表示others
		# -4位表示label_id
		subed = [0 for _ in range(len(prepared_datas[0]))]
		subed[-4] = i
		binary_datas = np.array(prepared_datas).copy()		# deepcopy
		binary_datas = binary_datas - subed
		# 统计-4列，为0的继续为0，不为0的属于others，置为1
		def func(x):
			if x[-4] != 0:
				x[-4] = 1
		list(map(func, binary_datas))		# label_id位置， if=0 不作处理，if ！=0 一律转成1
		binary_model_dir = os.path.join(model_dir, str(i))		# 为当前类新建一个存储用的文件夹
		if not os.path.exists(binary_model_dir):
			os.makedirs(binary_model_dir)
		single_main(binary_datas, binary_model_dir)
	vote(time_windows[0], sign='train_time')		# 投票记录布拉布拉
	vote(time_windows[2], sign='val_time')
	vote(time_windows[1], sign='test_time')
def vote_main():
	vote(time_windows[0], sign='train_time')		# 投票记录布拉布拉
	vote(time_windows[2], sign='val_time')
	vote(time_windows[1], sign='test_time')

def single_main(binary_datas, model_dir):
	
	write_configuration_info_to_file(model_dir)
	
	os.environ['CUDA_VISIBLE_DEVICES'] = Flags.gpu_id
	# with tf.device('/cpu:0'):
	# with tf.device('/job:localhost/replica:0/task:0/device:XLA_GPU:0'):
	with tf.Graph().as_default():
		session_conf = tf.ConfigProto(
			log_device_placement=False,  # 在指定设备上存储日志文件, 辅助debug
			allow_soft_placement=True,
		)
		# session_conf.gpu_options.allow_growth = True
		session_conf.gpu_options.per_process_gpu_memory_fraction = Flags.gpu_percent
		sess = tf.Session(config=session_conf)
		with sess.as_default():
			# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
			# run_metadata = tf.RunMetadata()
			# if Flags.wv_model_name is not None:		# 含有预训练模型
				# model = DeepModel(Flags, n_labels=n_labels, vocabulary_size=vocabulary_size, root_dir=model_dir, lookup_table=lookup_table)
			# else:
				# model = DeepModel(Flags, n_labels=n_labels, vocabulary_size=vocabulary_size, root_dir=model_dir, wv_model=Flags.wv_model_type)
			model = DeepModel(Flags, n_labels=2, vocabulary_size=vocabulary_size, root_dir=model_dir, wv_model=Flags.wv_model_type, lookup_table=lookup_table, config=config)
			model.build()
			debug_info.write_debug_info_to_file(model_dir, "静态图构建完成")
			# graph_save_dir = os.path.join(model_dir, 'checkpoints/')		# 继续训练，更换这里
			graph_save_dir = os.path.join(basic_model_dir, 'checkpoints/')		# 继续训练，更换这里
			# ckpt = tf.train.get_checkpoint_state(graph_save_dir)
			# if ckpt:
			# 	# 恢复模型
			# 	model.saver.restore(sess, ckpt.model_checkpoint_path)
			# 	# 读取保存的所有模型
			# 	# model.saver.restore(sess, ckpt.all_model_checkpoint_paths)
			# 	sess.run(model.global_step.initializer)
			# 	test(time_windows[0], model, sess, binary_datas, model_dir, sign='train_time')		# 跑一遍训练集
			# 	test(time_windows[2], model, sess, binary_datas, model_dir, sign='val_time')	# 运行验证集
			# else:
			# 	# 走训练
			# 	sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
			# 	debug_info.write_debug_info_to_file(model_dir, "开始训练")
			# 	train(time_windows, model, sess, graph_save_dir, binary_datas, model_dir)#, options, run_metadata)
			if Flags.retrain is not None:
				sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
				ckpt = tf.train.get_checkpoint_state(graph_save_dir)
				model.saver.restore(sess, ckpt.model_checkpoint_path)
				debug_info.write_debug_info_to_file(model_dir, "开始继续训练")
				train(time_windows, model, sess, graph_save_dir, binary_datas, model_dir)#, options, run_metadata)
			print('开始测试')
			# TODO： 测试咋写，两种测试，一种是单类的测试，一种是全类测试，需要等所有分类器训练完成后，使用prepared_datas进行投票
			test(time_windows[1], model, sess, binary_datas, model_dir,)	# 测试似乎只需要在最后单走一遍，测试需要把所有样本在所有模型上都试一遍
			test(time_windows[0], model, sess, binary_datas, model_dir, sign='train_time')	# 测试似乎只需要在最后单走一遍，测试需要把所有样本在所有模型上都试一遍

def calculate_criteria(y_true, y_pred):
	#print(np.shape(y_true))
	#print(np.shape(y_pred))
	f1 = f1_score( y_true, y_pred, average='macro' )
	p = precision_score(y_true, y_pred, average='macro')
	r = recall_score(y_true, y_pred, average='macro')
	confusion = confusion_matrix(y_true, y_pred, list(range(developers_size)))
	return f1, p, r, confusion

def train(time_windows, model, sess, graph_save_dir, binary_datas, model_dir):#, options, run_metadata):
	# 每次需要保存output的w和b，覆盖保存一次就得了
	# inputs = time_windows[0]
	min_loss = 1000000
	c_0 = np.reshape(np.argwhere(binary_datas[:,-4] == 0), (-1,))		# 返回索引,默认是少数类
	c_1 = np.reshape(np.argwhere(binary_datas[:,-4] == 1), (-1,))
	
	c_1 = np.random.choice(c_1, len(c_0))	# 通过下采样，使两类数量比达到1：:1
	# 索引集合需要在idx2bugid中转成bugid集合，代替inputs
	c_0_bugids = np.array(idx2bugid)[c_0]
	c_1_bugids = np.array(idx2bugid)[c_1]
	print(c_0_bugids.shape)
	inputs = np.hstack((c_0_bugids, c_1_bugids))
	#print(inputs)
	#print(time_windows[0])
	# max_top5 = 0        # 以top5为实现early-stopping的媒介
	for i in range(Flags.epoch):
		#debug_info.write_debug_info_to_file(model_dir, "开始训练第{}个epoch".format(i+1))
		logs = []
		all_top1 = []
		all_top5 = []
		all_loss = []
		all_y_predict = []
		all_y_true = []
		# train_logits = np.empty(shape=(0, developers_size))		# 记录所有概率
		# train_labels = np.empty(shape=(0,))		# 记录对应顺序的所有label
		train_iterator = data_iterator.generator_batch(inputs, Flags.batch_size, binary_datas, idx2bugid, data_helper, is_shuffle=True)
		# start_time = time.time()
		# for b_words, b_labels, b_actives, b_l_sentences, b_l_actives in train_iterator:
		for batch in train_iterator:
			# print(type(batch))
			# print(batch)
			#batch_end_time = time.time()
			#print('batch组装时间：{}'.format(batch_end_time-start_time))
			#debug_info.write_debug_info_to_file(model_dir, "开始feed batch")
			feed_dict = {
				# model.b_features: b_words,
				# model.b_labels: b_labels,
				# model.b_active_features: b_actives,
				# model.b_sequence_lengths: b_l_sentences,
				# model.b_active_actual_lengths: b_l_actives,
				model.batch:batch,
				model.batch_size:Flags.batch_size,
				model.keep_prob: Flags.keep_prob,
				model.is_train: True,
			}
			if not config.has_BN:	# 如果没有BN，则不需要更新update_ops指令
				_, step, summaries, metrics, loss, top_1_indices, w_output, b_output = sess.run(
					[model.train_op, 
					model.global_step, 
					model.train_summary_op, 
					model.metrics, 
					model.loss,
					model.top_1_indices,
					model.w_output,
					model.b_output], 
					feed_dict=feed_dict)
			else:
				_, _, step, summaries, metrics, loss = sess.run(
					[model.train_op, 
					model.update_ops,
					model.global_step, 
					model.train_summary_op, 
					model.metrics, 
					model.loss], 
					feed_dict=feed_dict)
			#run_end_time = time.time()
			#print('batch运行时间：{}'.format(run_end_time - batch_end_time))
			time_str = datetime.datetime.now().isoformat()
			
			# if Flags.hasELM:
				# labels = batch[:, 400]
				# train_logits = np.append(train_logits, logits, axis=0)
				# train_labels = np.append(train_labels, labels, axis=0)
			if step % 20 == 0:
				logstr = '{0}_train_time: {1}, top1: {2:.3f}, loss: {3}'.format(str(i), time_str, metrics['top_1'], loss)
				print(logstr)
				logs.append(logstr)
			# if len(logs) == 100:        # 每100条写入一次
				# logs = []       # 之所以将logs写成这种机制，是为了节省空间
			model.train_summary_writer.add_summary(summaries, step)
			all_top1.append( metrics['top_1'])
			all_top5.append( metrics['top_5'])
			all_loss.append(loss)
			labels = batch[:, Flags.max_sentence]
			all_y_true = np.append(all_y_true, labels, axis=0)
			top_1_indices = np.reshape(top_1_indices, (-1,))
			all_y_predict = np.append(all_y_predict, top_1_indices, axis=0)
		f1, p, r, confusion = calculate_criteria(all_y_true, all_y_predict)
		with open(os.path.join(model_dir, 'log.txt'), 'a') as writer:
			writer.write('\n'.join(logs))
			writer.write('\n')
			time_str = datetime.datetime.now().isoformat()
			# 记录训练集的结果
			logstr = '{0}: {1}, top1: {2:.3f}, top5: {3:.3f}, loss: {4}'.format('all_train_time', time_str, sum(all_top1)/len(all_top1), sum(all_top5)/len(all_top5), sum(all_loss)/len(all_loss))
			writer.write('{}\n'.format(logstr))
			logstr = '{0}: {1}, f1: {2:.3f}, precision: {3:.3f}, recall: {4:.3f}'.format('train_im', time_str, f1, p, r)
			writer.write('{}\n'.format(logstr))
		np.savez(os.path.join(model_dir, 'output_wb.npz'), w_output=w_output, b_output=b_output)

		# FIXME:为8:2关闭了验证集功能
		c_loss= test(time_windows[2], model, sess, binary_datas, model_dir, sign='val_time')    # 拿验证集来做early-stopping
		test(time_windows[1], model, sess, binary_datas, model_dir, sign='test_time')	
		# if Flags.hasELM:

			# c_loss, test_logits, test_labels = test(time_windows[1], model, sess, sign='test_time')        # TODO：每个epoch后都走一遍测试集，效仿不纯洁的early-stopping
			# test(time_windows[1], model, sess, sign='test_time')
		# 	add_ELM(train_logits, train_labels, test_logits, test_labels, i)
		# if c_loss < min_loss:    # 当前loss更低，说明此时模型的拟合能力最好。
			# min_loss = c_loss		# ：如果后面10个epoch内，loss的波动很小，比如说一直在上下0.5的范围内波动，那就可以实现早停了
			# model.saver.save(sess, os.path.join(graph_save_dir, 'model-{}'.format(i)))  # 只保存最好的模型
		# model.saver.save(sess, os.path.join(graph_save_dir, 'model-{}'.format(i)))  # 每个epoch保存一次，saver那里限制了只保存最新的10个checkpoints
			
def test(inputs, model, sess, binary_datas, model_dir, sign='test_time'):
	'''
	这个方法可以根据传入数据的不同，做不同的事，传入测试集可以做测试，传入验证集做验证
	:param inputs:
	:param model:
	:param sess:
	:param sign:
	:return:
	'''
	if sign == 'test_time' or sign == 'train_time':
		summary_op = model.test_summary_op
		summary_writer = model.test_summary_writer
	elif sign == 'val_time':
		summary_op = model.val_summary_op
		summary_writer = model.val_summary_writer
	# all_top1 = []
	# all_top5 = []
	# all_topk = [[], [], [], [], []]
	all_loss = []
	all_y_predict = []
	all_y_true = []
	all_logits = np.empty(shape=(0, 2))
	all_labels = np.empty(shape=(0,))
	# current_index = 0
	for i in range(1):
		test_iterator = data_iterator.generator_batch(inputs, Flags.batch_size, binary_datas, idx2bugid, data_helper, is_shuffle=False)
		# for b_words, b_labels, b_actives, b_l_sentences, b_l_actives in test_iterator:
		for batch in test_iterator:
			feed_dict = {
				# model.b_features: b_words,
				# model.b_labels: b_labels,
				# model.b_active_features: b_actives,
				# model.b_sequence_lengths: b_l_sentences,
				# model.b_active_actual_lengths: b_l_actives,
				model.batch:batch,
				model.batch_size: Flags.batch_size,     # 只走一个batch，显存不够
				model.keep_prob: 1,
				model.is_train: False,
			}
			# if Flags.hasELM:
				# step, summaries, metrics, loss, logits = sess.run(
					# [model.global_step, summary_op, model.metrics, model.loss, model.logits], feed_dict=feed_dict)
			step, summaries, metrics, loss, top_1_indices, logits = sess.run(
					[model.global_step, summary_op, model.metrics, model.loss, model.top_1_indices, model.logits], feed_dict=feed_dict)

			# all_top1.append(metrics['top_1'])		# 我这块代码是不是写错了。。。哦傻了，没错，一个batch的确是出来一个top值，太紧张了
			# all_top5.append(metrics['top_5'])
			
			# for k in range(5):
				# all_topk[k].append(metrics['top_{}'.format(k+1)])
			all_loss.append(loss)
			labels = batch[:, Flags.max_sentence]
			all_y_true = np.append(all_y_true, labels, axis=0)
			top_1_indices = np.reshape(top_1_indices, (-1,))
			all_y_predict = np.append(all_y_predict, top_1_indices, axis=0)
			# if Flags.hasELM:
				# labels = batch[:, 400]
				# all_logits = np.append(all_logits, logits, axis=0)
				# all_labels = np.append(all_labels, labels, axis=0)
			all_logits = np.append(all_logits, logits, axis=0)
			all_labels = np.append(all_labels, labels, axis=0)
			# if Flags.is_test is not None:
			# 	with open(os.path.join(model_dir, '{}.txt'.format(sign)),  'a') as writer:
			# 		for j in range(Flags.batch_size):
			# 			writer.write('{},{}\n'.format(inputs[current_index + j], ' '.join(list(map(str, all_probs[j])))))	# 第一列是bugid，后面是概率矩阵
			# current_index += Flags.batch_size
		f1, p, r, confusion = calculate_criteria(all_y_true, all_y_predict)
		np.savez(os.path.join(model_dir, '{}_predict_true.npz'.format(sign)), y_true=all_y_true, y_predict=all_y_predict)
		# if step % 1 == 0:
		if Flags.is_test is None:	# 只有训练的时候才记录log输出文件，测试的时候不需要
			summary_writer.add_summary(summaries, step)
			time_str = datetime.datetime.now().isoformat()
			# logstr = '{0}: {1}, top1: {2:.3f}, top5: {3:.3f}, loss: {4}'.format(sign, time_str, sum(all_topk[0])/len(all_topk[0]), sum(all_topk[4])/len(all_topk[4]), sum(all_loss)/len(all_loss))
			# print(logstr)
			with open(os.path.join(model_dir, 'log.txt'), 'a') as writer:
				# writer.write(logstr)
				# writer.write('\n')
				logstr = '{0}: {1}, f1: {2:.3f}, precision: {3:.3f}, recall: {4:.3f}'.format('{}_im'.format(sign.split('_')[0]), time_str, f1, p, r)
				writer.write('{}\n'.format(logstr))
			# 记录top1～top5
			# with open(os.path.join(model_dir, 'topK.txt'), 'a') as writer:
			# 	writer.write('{}:{}'.format(sign, ','.join(list(map(lambda top: '{:.3f}'.format(sum(top)/len(top)), all_topk)))))
			# 	writer.write('\n')
		else:
			with open(os.path.join(model_dir, 'test_log.txt'), 'w') as writer:
				time_str = datetime.datetime.now().isoformat()
				logstr = '{0}: {1}, f1: {2:.3f}, precision: {3:.3f}, recall: {4:.3f}'.format('{}_im'.format(sign.split('_')[0]), time_str, f1, p, r)
				writer.write('{}\n'.format(logstr))
			# 记录下混淆矩阵
			# with open(os.path.join(model_dir, 'confusion_matrix.txt'), 'w') as writer:
				# for i in range(len(confusion)):
					# writer.write('{}\n'.format('\t'.join(list(map(str, confusion[i])))))
			with open(os.path.join(model_dir, 'confusion_matrix_{}.txt'.format(sign)), 'w') as writer:
				for i in range(len(confusion)):
					writer.write('{}\n'.format('\t'.join(list(map(str, confusion[i])))))
			
			with open(os.path.join(model_dir, '{}_logits.txt'.format(sign)), 'w') as writer:		# 保存原始概率文件
				for j in range(len(all_logits)):
					writer.write('{},{}\n'.format(' '.join(list(map(str, all_logits[j]))), str(all_labels[j])))
	return sum(all_loss)/len(all_loss)#metrics['top_5']#
	# if Flags.hasELM:
		# return sum(all_loss)/len(all_loss), all_logits, all_labels
	
def vote_test(model_dir, time_windows, binary_datas):
	# wait after all category model  were finished, 执行一次本方法，做一次集中投票，计算总的confusion matrix
	# 最好应该是每个类别的模型训练完毕后，让所有样本跑一遍，记录下相关数据
	for i in range(n_labels):
		# 首先应该恢复本类的模型,
		with tf.Graph().as_default():
			session_conf = tf.ConfigProto(
				log_device_placement=False,  # 在指定设备上存储日志文件, 辅助debug
				allow_soft_placement=True,
			)
			# session_conf.gpu_options.allow_growth = True
			session_conf.gpu_options.per_process_gpu_memory_fraction = Flags.gpu_percent
			sess = tf.Session(config=session_conf)
			with sess.as_default():
				# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
				# run_metadata = tf.RunMetadata()
				# if Flags.wv_model_name is not None:		# 含有预训练模型
					# model = DeepModel(Flags, n_labels=n_labels, vocabulary_size=vocabulary_size, root_dir=model_dir, lookup_table=lookup_table)
				# else:
					# model = DeepModel(Flags, n_labels=n_labels, vocabulary_size=vocabulary_size, root_dir=model_dir, wv_model=Flags.wv_model_type)
				model = DeepModel(Flags, n_labels=2, vocabulary_size=vocabulary_size, root_dir=model_dir, wv_model=Flags.wv_model_type, lookup_table=lookup_table, config=config)
				model.build()
				graph_save_dir = os.path.join(basic_model_dir, 'checkpoints/')		# 继续训练，更换这里
				ckpt = tf.train.get_checkpoint_state(graph_save_dir)
				sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
				model.saver.restore(sess, ckpt.model_checkpoint_path)
				# 读取保存的ouput层w和b，通过tf.assign将其注入静态图
				# 然后走测试
				output_wb = np.load(os.path.join(model_dir, 'output_wb.npz'))
				w_output = output_wb['w_output']
				b_output = output_wb['b_output']
				
				update_w = tf.assign(model.w_output, w_output)
				update_b = tf.assign(model.b_output, b_output)

				sess.run([update_w, update_b])		# 注入保存的数据

				# 将所有样本输入本模型，返回01类别
				# 走一遍测试,主要目的是为了在每个文件夹下生成predict_true.npz文件，
				test(time_windows[2], model, sess, binary_datas, model_dir, sign='val_time')    # 拿验证集来做early-stopping
				test(time_windows[1], model, sess, binary_datas, model_dir, sign='test_time')	

def vote(inputs, sign):
	votes_for_all_samples = np.zeros(shape=(len(inputs), n_labels))
	all_y_true = np.zeros(shape=(len(inputs),))
	for i in range(1, n_labels):
		binary_model_dir = os.path.join(model_dir, str(i))		# 为当前类新建一个存储用的文件夹
		if not os.path.exists(binary_model_dir):
			raise RuntimeError('类别{}的模型文件未找到'.format(i))
		predict_true = np.load(os.path.join(binary_model_dir, '{}_predict_true.npz'.format(sign)))
		y_predict = predict_true['y_predict']	# 此时都是01分类
		y_true = predict_true['y_true']		# 这个也是01分类，需要还原，还原or一步读取/

		for j in range(len(y_true)):
			if y_predict[j] == 0:	# 说明预测类别是i分类
				votes_for_all_samples[j][i] += 1
				all_y_true[j] = i		 # 记录该样本的真实类别
			else:					# 说明预测类别是others分类
				votes_for_all_samples[j] += 1
				votes_for_all_samples[j][i] -= 1
	all_y_predict = np.argmax(votes_for_all_samples, axis=1)	# 统计每个样本的最大投票类别
	f1, p, r, confusion = calculate_criteria(all_y_true, all_y_predict)
	# 记录结果
	with open(os.path.join(model_dir, 'vote.txt'), 'a') as writer:		# 记录投票的精确率系列结果
		time_str = datetime.datetime.now().isoformat()
		logstr = '{0}: {1}, f1: {2:.3f}, precision: {3:.3f}, recall: {4:.3f}'.format('{}_im'.format(sign.split('_')[0]), time_str, f1, p, r)
		writer.write('{}\n'.format(logstr))
	# 统计top的acc
	topK = [0, 0, 0, 0 ,0]
	
	for i in range(len(all_y_true)):
		#order = np.argsort(all_y_predict[i])
		order = np.argsort(votes_for_all_samples[i])
		for k in range(1, 6):
			if all_y_true[i] in order[-k:]:
				topK[k-1] += 1
	topK_acc = np.array(topK) / len(all_y_true)
	with open(os.path.join(model_dir, 'vote.txt'), 'a') as writer:
		logstr = '{0}: {1}, top1: {2:.3f}, top2: {3:.3f}, top3: {4:.3f}, top4: {5:.3f}, top5: {6:.3f}'.format(sign, time_str, topK_acc[0], topK_acc[1], topK_acc[2], topK_acc[3], topK_acc[4],)
		writer.write('{}\n'.format(logstr))

def check_vote(inputs, sign):
	votes_for_all_samples = np.zeros(shape=(len(inputs), n_labels))
	all_y_true = np.zeros(shape=(len(inputs),))
	for i in range(1, n_labels):
		binary_model_dir = os.path.join(model_dir, str(i))		# 为当前类新建一个存储用的文件夹
		if not os.path.exists(binary_model_dir):
			raise RuntimeError('类别{}的模型文件未找到'.format(i))
		predict_true = np.load(os.path.join(binary_model_dir, '{}_predict_true.npz'.format(sign)))
		y_predict = predict_true['y_predict']	# 此时都是01分类
		y_true = predict_true['y_true']		# 这个也是01分类，需要还原，还原or一步读取/

		for j in range(len(y_true)):
			if y_predict[j] == 0:	# 说明预测类别是i分类
				votes_for_all_samples[j][i] += 1
				#all_y_true[j] = i		 # 记录该样本的真实类别
			else:					# 说明预测类别是others分类
				votes_for_all_samples[j] += 1
				votes_for_all_samples[j][i] -= 1
			if y_true[j] == 0:
				all_y_true[j] = i
	#print(votes_for_all_samples)
	np.savetxt(os.path.join(model_dir, '{}_vote_all_samples.txt'.format(sign)), votes_for_all_samples)	
	all_y_predict = np.argmax(votes_for_all_samples, axis=1)	# 统计每个样本的最大投票类别
	with open(os.path.join(model_dir, '{}_true_pre.txt'.format(sign)), 'w') as writer:
		for i in range(len(all_y_predict)):
			writer.write('{}\t{}\n'.format(all_y_predict[i], all_y_true[i]))
	f1, p, r, confusion = calculate_criteria(all_y_true, all_y_predict)
	# 记录结果
	with open(os.path.join(model_dir, 'vote.txt'), 'a') as writer:		# 记录投票的精确率系列结果
		time_str = datetime.datetime.now().isoformat()
		logstr = '{0}: {1}, f1: {2:.3f}, precision: {3:.3f}, recall: {4:.3f}'.format('{}_im'.format(sign.split('_')[0]), time_str, f1, p, r)
		print(logstr)
		#writer.write('{}\n'.format(logstr))
	# 统计top的acc
	topK = [0, 0, 0, 0 ,0]
	
	for i in range(len(all_y_true)):
		#order = np.argsort(all_y_predict[i])
		order = np.argsort(votes_for_all_samples[i])
		for k in range(1, 6):
			if all_y_true[i] in order[-k:]:
				topK[k-1] += 1
	topK_acc = np.array(topK) / len(all_y_true)
	with open(os.path.join(model_dir, 'vote.txt'), 'a') as writer:
		logstr = '{0}: {1}, top1: {2:.3f}, top2: {3:.3f}, top3: {4:.3f}, top4: {5:.3f}, top5: {6:.3f}'.format(sign, time_str, topK_acc[0], topK_acc[1], topK_acc[2], topK_acc[3], topK_acc[4],)
		print(logstr)
		#writer.write('{}\n'.format(logstr))






def write_configuration_info_to_file(root_dir): # 把配置参数写入文件作为记录
	with open(os.path.join(root_dir, 'configuration.txt'), 'w') as writer:
		# writer.write('dataset = {}\n'.format(PATH.name))
		# writer.write('hidden_size = {}\n'.format(Flags.hidden_size))
		# writer.write('embedding_size = {}\n'.format(Flags.embedding_size))
		# writer.write('epoch = {}\n'.format(Flags.epoch))
		# writer.write('layer_num = {}\n'.format(Flags.n_layer))
		# writer.write('batch_size = {}\n'.format(Flags.batch_size))
		# writer.write('learning_rate = {}\n'.format(Flags.lr))
		# writer.write('keep_prob = {}\n'.format(Flags.keep_prob))
		# writer.write('l2 = {}\n'.format(Flags.l2))
		# writer.write('wv_model_name = {}\n'.format(Flags.wv_model_name))
		# writer.write('wv_model_type = {}\n'.format(Flags.wv_model_type))
		for name, value in Flags.__flags.items():
			writer.write('{}={}\n'.format(name, value.value))
	#if Flags.tag is not None:
	#	cur_dir = os.path.abspath(os.path.curdir)
	#	with open(os.path.join(cur_dir, 'corresponding.log'), 'a') as writer:		# 用来在服务器端标识某个时间戳文件夹是属于哪个命令产生的
			# writer.write('Life is difficult.')
	#		writer.write('{}\t{}\n'.format(str(timestamp), Flags.tag))




def add_ELM(train_logits, train_labels, test_logits, test_labels, epoch):
	
	with open(os.path.join(model_dir, 'train_logits_epoch={}.txt'.format(epoch)), 'a') as writer:		# 保存原始概率文件
			for j in range(len(train_labels)):
				writer.write('{},{}\n'.format(' '.join(list(map(str, train_logits[j]))), str(train_labels[j])))
	with open(os.path.join(model_dir, 'test_logits_epoch={}.txt'.format(epoch)), 'a') as writer:		# 保存原始概率文件
			for j in range(len(test_labels)):
				writer.write('{},{}\n'.format(' '.join(list(map(str, test_logits[j]))), str(test_labels[j])))
	topK_acc = ELM.ELM(train_logits, train_labels, test_logits, test_labels)		# 返回经修改后的top1-5的准确率
	with open(os.path.join(model_dir, 'topK.txt'), 'a') as writer:
		writer.write('{}:{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},'.format('ELM_time', topK_acc[0],topK_acc[1],topK_acc[2],topK_acc[3],topK_acc[4], ))
		writer.write('\n')

if __name__ == '__main__':
	# print(Flags.active_size)
	main()
	#vote_main()
	#check_vote(time_windows[0], sign='train_time')
	#check_vote(time_windows[2], sign='val_time')
	#check_vote(time_windows[1], sign='test_time')
