
import tensorflow as tf
import re
import numpy as np
import random
import datetime
import os
# from sklearn.feature_extraction.text import CountVectorizer

# import util.PATH as PATH

'''
设想的需要的功能不少, 
1. get_msg_all, 然后将其按照时间(论文里说明是按照提交时间, 那不就是按照bug_id?)排序, 平均分成11份:
第一叠数据用于训练, 下一叠用来测试, 以此类推, 10叠训练10叠测试, 取10叠执行结果的平均值来作为最总的评估指标;
	1.1. 这里按照bug_id进行排序
2. 填充文本数据至等长
3. 提供词汇表
4. 不将数据一次性写入内存, 一个一个batch写入好了, 使用tf.data.Dataset.from_generator()来实现
5. 关于assign_to开发者的索引化或者one-hot编码
	5.1. 若当前开发者不存在于开发者列表developer文件中, 置当前开发者id为-1
'''

# Processing tokens, 在词汇表中添加辅助性的token(标记)
# <GO>用来分隔问题与回复
# Token<PAD>用来补齐问题或回复
'''原来是这样做的，0代表padding，即填充值；1代表没有出现过的单词或者开发者。
但是后来发现特意设置1，是不需要的，反而会额外造成准确率的虚高。所以如果这样的单词，不予处理。
如果出现这样的开发者，这是不可能的，因为使用的开发者列表包含了全部开发者，不需要考虑这种情况。'''

class DataHelper():
	
	def __init__(self, PATH):
		self._PAD = b"_PAD"  # padding?, 用_PADK来填充数据至等长
		# _GO = b"_GO"    # <start>
		# _EOS = b"_EOS"  # <end>, end of sentence
		self._UNK = b"_UNK"      # 把没有出现过的词统计为unk(unknown token)
		# _START_VOCAB = [_PAD, _GO, _EOS, _UNK]
		# _START_VOCAB = [_PAD, _UNK]
		self._START_VOCAB = [self._PAD]
		
		self.PATH = PATH
		self.PAD_ID = 0
		# GO_ID = 1
		# EOS_ID = 2
		# UNK_ID = 1

	# max_doc_len =400
	# mozilla <=400 99.2%, <=500, 99.4%

	# 读取所有的msg, 并按照id顺序升序排列
	# 原顺序为: bug_id    assign_to   resolution  dup_id  creation_ts delta_ts    product component
	# 读取时删除resolution和dup_id, 改为:
	# key=bug_id, value= {assign_to, creation_ts, delta_ts, product,component}
	def get_msg_all(self):
		'''
		key=bug_id, value= {assign_to, creation_ts, delta_ts, product, component}
		:return:
		'''
		bug_msg_all = {}
		with open(self.PATH.path_bug_msg_all, 'r') as reader:
			for line in reader.readlines():
				temp = line.strip().split('\t')
				# 处理时间成时间戳格式, 原始格式为'1999-03-12 17:33 -0400'
				# 观察了下数据集, creation_ts的格式始终为: 1999-03-12 17:33 -0400
				# 而delta_ts的格式为: 1999-04-21 11:23:22 -0400'
				temp[2] = ' '.join(temp[2].split(' ')[:2])
				temp[2] = datetime.datetime.strptime(temp[2], '%Y-%m-%d %H:%M').timestamp()
				temp[3] = ' '.join(temp[3].split(' ')[:2])
				temp[3] = datetime.datetime.strptime(temp[3], '%Y-%m-%d %H:%M:%S').timestamp()
				bug_msg_all[int(temp[0])] = temp[1:]        # 这个int相当重要
		# 将字典按照bug_id排序出一个列表, 由远及近
		sorted_bugs = sorted(bug_msg_all.items(), key=lambda item: item[0])
		# 这里在后续调用的时候一般只接收前面一个位置，不接受后面，是因为原始文件中，所有的bug报告都是按照bugid排好的
		return bug_msg_all, sorted_bugs
	#
	def calculate_doc_max_len(self):
		'''
		计算语料库中单个文档单词的最大长度, 用于后续的padding
		这个最好先独立计算出来, 免得每次跑着费劲, 而且还要筛选
		:return:
		'''
		names = os.listdir(self.PATH.path_corpus)
		lens = {}
		for name in names:
			file = self.PATH.path_corpus + name
			c_len = len(open(file, 'r').readlines())
			print(c_len)
			lens[name] = c_len
		return lens
	
	# tf.data.Dataset.from_generator()中用到的生成器
	# datas_ids: 是分割出来的一个窗口的索引
	# 其实该方法就是以一个窗口的数据作为数据集, 进行yield, 事实上, 一个窗口也就1万条数据, 似乎没必要写成生成器.......
	def dataset_generator(self, vocabulary, developers_list, bugs_msg_all, datas_ids, max_doc_len, active_size, window_id):
		'''
		顺手将单词和assignee索引化了
		:param vocabulary:
		:param developers_list:
		:param bugs_msg_all:
		:param datas_ids:
		:param active_size:
		:param cost_matrix:
		:param window_id:
		:return:
		'''
		sentences = []
		labels = []
		actives = []
		l_sentences = []
		l_actives = []
		for idx in datas_ids:
			words = []
			#
			with open(self.PATH.path_corpus + str(idx), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			label = bugs_msg_all[idx][0]       # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue                # 如果开发者不存在于开发者列表，进行下次循环。
			active_to_ids, length_of_actives = self.get_active_sequence_vec_by_bug_id(idx, developers_list, active_size, window_id)
			sentences.append(words_to_ids)
			labels.append(label_id)
			actives.append(active_to_ids)
			l_sentences.append(length_of_sentence)
			l_actives.append(length_of_actives)
		return sentences, labels, actives, l_sentences, l_actives
	
	def prepare_tf_input_datas(self, vocabulary, developers_list, bugs_msg_all, max_doc_len, active_size, window_id):
		'''
		将batch组装需要的数据事先转换完毕，置于内存，以空间换时间,
		所有样本的batch组装形式，即[n_samples, [sentences, labels, actives, l_sentences, l_actives]]
		:return: 	dict-like, key=bugid, value=[sentences, labels, actives, l_sentences, l_actives]
				idx2bugid, list-type, bugid和索引的双向映射
		'''
		# prepared_datas = {}
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like
		sentences = []
		labels = []
		actives = []
		l_words = []
		l_actives = []
		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			active_to_ids, length_of_actives = self.get_active_sequence_vec_by_bug_id(bugid, developers_list, active_size, window_id)
			# prepared_datas[bugid] = [words_to_ids, label_id, active_to_ids, length_of_sentence, length_of_actives]
			# sentences.append(words_to_ids)
			# labels.append(label_id)
			# actives.append(active_to_ids)
			# l_words.append(length_of_sentence)
			# l_actives.append(length_of_actives)
			idx2bugid.append(bugid)
			temp += words_to_ids

			temp.append(label_id)
			temp += active_to_ids
			temp.append(length_of_sentence)
			temp.append(length_of_actives)
			prepared_datas.append(temp)
		# return [np.array(words), np.array(labels), np.array(actives), np.array(l_words), np.array(l_actives)], idx2bugid
		# return [sentences, labels, actives, l_words, l_actives], idx2bugid
		return prepared_datas, idx2bugid

	def prepare_tf_input_datas_for_singleText(self, vocabulary, developers_list, bugs_msg_all, max_doc_len):
		'''
			跟上面的prepare_tf_input_datas方法功能差不多，唯一的一点是，本方法删掉了活跃度的相关数据组装；
			for singleText，活跃度数据不用加入，为了节省程序运行时间，所以没有必要再使用prepare_tf_input_datas方法。
			能省一点时间是一点。
		'''
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like
		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			idx2bugid.append(bugid)
			temp += words_to_ids

			temp.append(label_id)
			temp.append(length_of_sentence)
			prepared_datas.append(temp)
		return prepared_datas, idx2bugid
	
	def create_pcs(self,):
		'''
			产品和组件列表
		'''
		pcs = []
		with open(os.path.join(self.PATH.root, 'product_components.txt'), 'r') as reader:
			for line in reader.readlines():
				pcs.append(line.strip())
		return pcs
	
	def create_products(self,):
		'''
			读取总的产品列表
		'''
		products = []
		with open(os.path.join(self.PATH.root, 'products.txt'), 'r') as reader:
			for line in reader.readlines():
				products.append(line.strip())
		return products
	
	def create_components(self,):
		components = []
		with open(os.path.join(self.PATH.root, 'components.txt'), 'r') as reader:
			for line in reader.readlines():
				components.append(line.strip())
		return components


	def prepare_tf_input_datas_for_singleText_with_product_components(self, vocabulary, developers_list, bugs_msg_all, max_doc_len):
		'''
			跟上面的prepare_tf_input_datas方法功能差不多，唯一的一点是，本方法删掉了活跃度的相关数据组装；
			for singleText，活跃度数据不用加入，为了节省程序运行时间，所以没有必要再使用prepare_tf_input_datas方法。
			能省一点时间是一点。
		'''
		pcs = self.create_pcs()
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like
		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			product = bugs_msg_all[bugid][3]
			component = bugs_msg_all[bugid][4]
			idx_pc = pcs.index('{}_{}'.format(product, component))		# 其产品和组件组合的索引
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			idx2bugid.append(bugid)
			temp += words_to_ids

			temp.append(label_id)
			temp.append(length_of_sentence)
			temp.append(idx_pc)
			prepared_datas.append(temp)
		return prepared_datas, idx2bugid
	
	
	def prepare_tf_input_datas_for_singleText_with_TriBasicText(self, vocabulary, developers_list, bugs_msg_all, max_doc_len):
		'''
			为TriBasicText模型组装的batch方法，与
			prepare_tf_input_datas_for_singleText_with_product_components
			唯一不同的一点是，产品和组件是分开组装的，不是拼接成字符串组装
		'''
		# pcs = self.create_pcs()
		products = self.create_products()
		components = self.create_components()
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like
		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			product = bugs_msg_all[bugid][3]
			component = bugs_msg_all[bugid][4]
			idx_product = products.index(product)
			idx_component = components.index(component)
			# idx_pc = pcs.index('{}_{}'.format(product, component))		# 其产品和组件组合的索引
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			idx2bugid.append(bugid)
			temp += words_to_ids

			temp.append(label_id)
			temp.append(length_of_sentence)
			# temp.append(idx_pc)
			temp.append(idx_product)
			temp.append(idx_component)
			prepared_datas.append(temp)
		return prepared_datas, idx2bugid

	def prepare_tf_input_datas_of_convLSTM_for_singleText(self, vocabulary, developers_list, bugs_msg_all, max_doc_len):
		'''
		将batch组装需要的数据事先转换完毕，置于内存，以空间换时间,
		所有样本的batch组装形式，即[n_samples, [sentences, labels,l_sentences]]
		:return: 	dict-like, key=bugid, value=[sentences, labels, l_sentences]
				idx2bugid, list-type, bugid和索引的双向映射
		'''
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like

		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			# 将[max_doc_len]扩展成为[max_doc_len*5]，即[max_doc_len, 5],按照时间滑窗的方式，window_size=5, window_stride=1,
			
			window_words_to_ids = []
			for i in range(len(words_to_ids)):
				if i+5 >= len(words_to_ids):
					n_others = (i+5) - len(words_to_ids)
					window_words_to_ids += [words_to_ids[j] for j in range(i, len(words_to_ids))]
					window_words_to_ids += [0 for j in range(0, n_others)]
					# window_words_to_ids.append(words_to_ids[j] for j in range(i, len(words_to_ids)))
					# window_words_to_ids.append(0 for j in range(0, n_others))
				else:
					# window_words_to_ids.append(words_to_ids[j] for j in range(i, i+5))		# HACK:这里限制了每个时间步的二维步长为5
					window_words_to_ids += [words_to_ids[j] for j in range(i, i+5)]
			# print(len(window_words_to_ids))
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			idx2bugid.append(bugid)
			temp += window_words_to_ids

			temp.append(label_id)
			temp.append(length_of_sentence)
			prepared_datas.append(temp)
		return prepared_datas, idx2bugid
	
	def prepare_tf_input_datas_for_convLSTM(self, vocabulary, developers_list, bugs_msg_all, max_doc_len, active_size, window_id):
		'''
		将batch组装需要的数据事先转换完毕，置于内存，以空间换时间,
		所有样本的batch组装形式，即[n_samples, [sentences, labels, actives, l_sentences, l_actives]]
		:return: 	dict-like, key=bugid, value=[sentences, labels, actives, l_sentences, l_actives]
				idx2bugid, list-type, bugid和索引的双向映射
		'''
		prepared_datas = []
		idx2bugid = []      # 做bugid到索引的映射，因为需要双向映射，所以就没有dict-like
		sentences = []
		labels = []
		actives = []
		l_words = []
		l_actives = []
		for bugid in bugs_msg_all.keys():
			temp = []
			words = []
			with open(self.PATH.path_corpus + str(bugid), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			words_to_ids, length_of_sentence = self.data_padding_and_to_ids(words, vocabulary, max_doc_len)
			# 将[400]扩展成为[2000]，即[400, 5],按照时间滑窗的方式，window_size=5, window_stride=1,
			
			window_words_to_ids = []
			for i in range(len(words_to_ids)):
				if i+5 >= len(words_to_ids):
					n_others = (i+5) - len(words_to_ids)
					window_words_to_ids += [words_to_ids[j] for j in range(i, len(words_to_ids))]
					window_words_to_ids += [0 for j in range(0, n_others)]
					# window_words_to_ids.append(words_to_ids[j] for j in range(i, len(words_to_ids)))
					# window_words_to_ids.append(0 for j in range(0, n_others))
				else:
					# window_words_to_ids.append(words_to_ids[j] for j in range(i, i+5))		# HACK:这里限制了每个时间步的二维步长为5
					window_words_to_ids += [words_to_ids[j] for j in range(i, i+5)]
			# print(len(window_words_to_ids))
			label = bugs_msg_all[bugid][0]  # assign_to
			try:
				label_id = developers_list.index(label)
			except ValueError:
				continue  # 如果开发者不存在于开发者列表，进行下次循环。
			active_to_ids, length_of_actives = self.get_active_sequence_vec_by_bug_id(bugid, developers_list, active_size, window_id)
			idx2bugid.append(bugid)
			temp += window_words_to_ids

			temp.append(label_id)
			temp += active_to_ids
			temp.append(length_of_sentence)
			temp.append(length_of_actives)
			prepared_datas.append(temp)
		return prepared_datas, idx2bugid

	def data_generator_by_prepared_datas(self, prepared_datas, inputs_ids, idx2bugid):
		'''
		根据准备好的数据组装一个batch，相对原始的data_generator()，减少了很多IO交互。
		2020-01-09 15:15:13 好在这个方法可以通用啊哈哈哈哈，要不我还要为刚加的prepare_tf_input_datas_for_singleText方法重新配一个generator逻辑
		:param prepared_datas:
		:param inputs_ids:该batch中所有样本的bugid
		:return:
		'''
		# one_hot = np.zeros(shape=(len(inputs_ids), len(idx2bugid)))
		one_hot = []
		# print(prepared_datas.shape)
		for i in range(len(inputs_ids)):        # prepared_datas 是dict-like
			# one_hot[i, idx2bugid.index(inputs_ids[i])] = 1
			one_hot.append(idx2bugid.index(inputs_ids[i]))
		# sentences = []
		# labels = []
		# actives = []
		# l_sentences = []
		# l_actives = []
		# print(one_hot.shape)
		# print(prepared_datas[0])
		# print(np.array(prepared_datas[0]).shape)
		# print(prepared_datas[:,0].reshape(len(idx2bugid), -1))
		# sentences = np.matmul(one_hot, prepared_datas[0])
		# labels = np.matmul(one_hot, prepared_datas[1])
		# actives = np.matmul(one_hot, prepared_datas[2])
		# l_sentences = np.matmul(one_hot, prepared_datas[3])
		# l_actives = np.matmul(one_hot, prepared_datas[4])
		
		# sentences = np.take(prepared_datas[0], one_hot, 0)
		# labels = np.take(prepared_datas[1], one_hot, 0)
		# actives = np.take(prepared_datas[2], one_hot, 0)
		# l_sentences = np.take(prepared_datas[3], one_hot, 0)
		# l_actives = np.take(prepared_datas[4], one_hot, 0)
		
		# result = np.take(prepared_datas, inputs_ids, 0)
		result = np.take(prepared_datas, one_hot, 0)

		# print(sentences.shape)
		# print(sentences)
		# for idx in inputs_ids:
		# 	temp = prepared_datas[idx]
		# 	sentences.append(temp[0])
		# 	labels.append(temp[1])
		# 	actives.append(temp[2])
		# 	l_sentences.append(temp[3])
		# 	l_actives.append(temp[4])
		# return sentences, labels, actives, l_sentences, l_actives
		# return result[:,:400], result[:,400], result[:,401:426], result[:,426], result[:,427]       # 组装花费时间仍然很高，但是相对其他方式好了很多
		return result
	# 原本想根据单词出现的文档数来构建索引, 但是麻烦
	# 所以直接使用前一阶段的文件顺序保存了
	def create_vocabulary(self):
	
		vocabulary = [] + self._START_VOCAB      # 先将前面的辅助性标记放入vocabualry中
		with open(self.PATH.path_vocabulary, 'r') as reader:
		# with open('../data/windows/window_0_vocabulary.txt', 'r') as reader:
			for line in reader.readlines():
				vocabulary.append(line.strip())
		return vocabulary
	
	def create_vocabulary_freq(self,):
		'''
			词汇表的词频(文档频率)
		'''
		freqs = []
		with open(os.path.join(self.PATH.root, 'freq.txt'), 'r') as reader:
		# with open('../data/windows/window_0_vocabulary.txt', 'r') as reader:
			for line in reader.readlines():
				freqs.append(int(line.strip()))
		# n_docs = len(os.listdir(self.PATH.path_corpus))		# 文档总数
		# freqs = list(map(lambda x: x/n_docs, freqs))		# 统计频率而不是频数
		n_all = sum(freqs)
		freqs = list(map(lambda x: x/n_all, freqs))
		return freqs
	
	# 针对一个doc的数据填充, 将单词转换成数字索引
	def data_padding_and_to_ids(self, feature, vocabulary, max_doc_len):
		# 返回单词在词汇表中的value(or索引), 如果单词不在词汇表, 返回UNK_ID=3
		# ids = [word_vocabulary.get(word, UNK_ID) for word in sentence]
		ids = []
		for word in feature:
			try:
				ids.append(vocabulary.index(word))
			except ValueError:
				continue                # TODO: OOV，如果真的出现词汇表中没有的单词，忽略,事实上，应该添加上UNK标记才好。
		if len(ids) > max_doc_len:      # 长于max_doc_len的步长都切掉
			ids = ids[:max_doc_len]
		'''注意这里在ids之后填充0, 这样做来避免太长的0影响本身单词的记忆效果'''
		words_as_ids = ids+[self.PAD_ID] * (max_doc_len - len(ids))      # 用PAD_ID来填充数据
		return words_as_ids, len(ids)		# 第二个返回值是真实长度。
	
	# 将数据集按照时间窗口分割成11份
	# 直观点说, 就是把bug_id集合按顺序分成等量的11份
	def split_dataset_by_time_windows(self, bug_msg_all):
		bug_ids = sorted(bug_msg_all.keys())       # 升序排列
		# 将bug_ids分成等量的11份
		delta = int(len(bug_ids) / 11)
		# 最后一位是步长
		return [bug_ids[i:i+delta] for i in range(0, len(bug_ids), delta)]
	
	def create_developers_list(self):
		developers_list = [] + self._START_VOCAB     # 开发者也需要包含这两个, 0代表没有开发者带来的填充, 1代表历史记录中不存在的开发者
		with open(self.PATH.path_developer, 'r') as reader:
		# with open('../data/windows/developer_window_0.txt', 'r') as reader:
			for line in reader.readlines():
				developers_list.append(line.strip())
		return developers_list
	
	# 根据当前bug_id找到相应的开发者活跃序列文件读取
	# 这里我在预处理阶段, 就把每个bug对应的开发者活跃序列提取保存成了文件
	# 相较于程序运行过程中处理, 前者自然更节省运行时间, 但是无疑灵活度降低, 而且前者模块分的太多, 使得项目臃肿了很多, 一饮一啄吧...
	def get_active_sequence_vec_by_bug_id(self, current_id, developers_list, active_size, window_id):
		'''
		:param current_id:
		:param developers_list:
		:param active_size:
		:param window_id:当前10轮增量实验处于的轮数，每个轮数对应了不同的活跃度文件夹，为了方便起见实现预处理好了都。
		:return: 返回填充好的列表和填充之前的实际长度
		'''
		active_vec = []         # 替换成数字索引得到的vector向量
		with open(self.PATH.path_active_list + str(window_id) + '/' + str(current_id), 'r') as reader:
			for line in reader.readlines():
				w = line.strip()
				try:
					active_vec.append(developers_list.index(w))     # 将开发者替换成在developers_list中的索引
				except ValueError:
					continue            # 如果开发者不存在，忽略
		padding_active_vec =  active_vec + [self.PAD_ID]*(active_size - len(active_vec))
		return padding_active_vec, len(padding_active_vec)
		# return padding_active_vec, len(active_vec)
	
	def extract_small_balance_val_set(self, bug_ids, bug_msg_all):
		'''
		从bug_ids代表的原始训练集中，抽取一个小型的平衡数据集，所谓的平衡数据集，是指包含所有开发者，
		且每个开发者有一定数量的样本，比如说5个？（这意味着原始训练集必须是预训练好的，最小的开发者修复bug数量=10）
		稍做一些思维发散，其实我觉得测试集也应该处理成数据平衡。原先受数据制约太严重了。
		麻烦的是，重新预处理完成后，我需要重新记录vocabulary和developers列表，还有分别对应的活跃度列表。
		这标志着我可能需要重新整合之前的代码。。。。。。
		然后返回被切割完的新训练集和小型平衡数据集。
		:param bug_ids:
		:param bug_msg_all: key=bug_id, value= {assign_to   creation_ts delta_ts    product component}
		:return:
		'''
		fixed_by_developers = {}
		for i in range(len(bug_ids)):       # 统计每个开发者修复的所有bug的id
			devr = bug_msg_all[bug_ids[i]][0]
			if devr not in fixed_by_developers.keys():
				fixed_by_developers[devr] = []
			fixed_by_developers[devr].append(bug_ids[i])
		# for key in fixed_by_developers.keys():
		# 	print(len(fixed_by_developers[key]))
		# 统计每个开发者修复的数量，删除修复数量少于10的开发者及他们修复的bug
		developers = [] + self._START_VOCAB     # 保存预处理以后的开发者列表
		trainset_ids = []   # 保存预处理+抽取之后的训练集ids
		valset_ids = []     # 验证集，或者说是每个类别样本=5的平衡验证集
		for devr, ids in fixed_by_developers.items():
			if len(ids) >= 10:
				developers.append(devr)
				trainset_ids += ids[0:-10]
				valset_ids += ids[-10:]     # 后5个抽取作为验证集，其余为训练集
			else:
				developers.append(devr)
				trainset_ids += ids
				valset_ids += ids
	
		# 保存开发者列表、训练集ids、测试集ids到文件
		# 处理词汇表,暂不处理，使用全部的词汇表
		#
	
		# 处理活跃度列表
		return developers, trainset_ids, valset_ids
	
	def split_dataset_by_six_two_two(self, bug_msg_all):
		'''
		2019-10-16 18:55:19 修改，按照6:2:2划分训练集、验证集和测试集
		:param bug_msg_all:
		:return:
		'''
		bug_ids = sorted(bug_msg_all.keys())  # 升序排列
		delta = int(len(bug_ids)/10)
		train_set = bug_ids[:int(delta*6)]
		val_set = bug_ids[int(delta*6) : delta*8]
		eval_set = bug_ids[delta*8:]

		# train_set = train_set[(len(train_set) % 32):]      # 删掉多余的，即不满足batch_size的
		# val_set = val_set[(len(val_set) % 32):]
		# eval_set = eval_set[(len(eval_set) % 32):]
		return [train_set, eval_set, val_set]       # 注意顺序，是训练集、测试集、验证集排列
	
	def split_dataset_by_eight_one_one(self, bug_msg_all):
		'''
		:param bug_msg_all:
		:return:
		'''
		bug_ids = sorted(bug_msg_all.keys())  # 升序排列
		delta = int(len(bug_ids)/10)
		train_set = bug_ids[:int(delta*8)]
		val_set = bug_ids[int(delta*8) : delta*9]
		eval_set = bug_ids[delta*9:]

		# train_set = train_set[(len(train_set) % 32):]      # 删掉多余的，即不满足batch_size的
		# val_set = val_set[(len(val_set) % 32):]
		# eval_set = eval_set[(len(eval_set) % 32):]
		return [train_set, eval_set, val_set]       # 注意顺序，是训练集、测试集、验证集排列
	
	def split_dataset_by_eight_to_two(self, bug_msg_all):
		'''
		按照8:2的比例划分训练集和测试集,此时验证集就是测试集。
		:param bug_msg_all:
		:return:
		'''
		bug_ids = sorted(bug_msg_all.keys())  # 升序排列
		delta = int(len(bug_ids) * 0.8)     # 取整个数据集的前80%作为训练集
		train_set = bug_ids[:delta]           # 划分
		val_set = bug_ids[delta:]
		eval_set = bug_ids[delta:]
	
		# train_set = train_set[(len(train_set) % 32):]      # 删掉多余的，即不满足batch_size的
		# eval_set = eval_set[(len(eval_set) % 32):]
		# val_set = val_set[(len(val_set) % 32):]
		return [train_set, eval_set, val_set]       # 注意顺序，是训练集、测试集
	
	def read_dataset_by_category(self,):
		'''
			读取保存好的按照类别8:2划分的训练集文件、验证集文件和测试集文件。
		'''
		def func(sign):
			datas = []
			with open(os.path.join(self.PATH.root, '{}.txt'.format(sign)), 'r') as reader:
				for bugid in reader.readlines():
					datas.append(int(bugid.strip()))
			return datas
		train_set = func('train')
		test_set = func('test')
		val_set = func('val')
		return train_set, test_set, val_set
	
	def __split_dataset_by_category(self, bug_msg_all):
		'''
		按照类别划分，每个类别取80%做训练集，20%做测试集。
		:param bug_msg_all:
		:return:
		'''
		fixed_by_developers = {}
		# for i in range(len(bug_msg_all)):  # 统计每个开发者修复的所有bug的id
		for bugid, value in bug_msg_all.items():
			devr = value[0]
			if devr not in fixed_by_developers.keys():
				fixed_by_developers[devr] = []
			fixed_by_developers[devr].append(bugid)
		train_ids = []
		eval_ids = []
		for devr, value in fixed_by_developers.items():
			# value = sorted(value)       # 将每类按照bugid排序，就算没有这句，value其实也是具有顺序的。dict虽然是无序的，但是它每次无序都一样
			delta = int(len(value)*0.8)
			train_ids += value[:delta]
			eval_ids += value[delta:]
		# 按bugid分别排序训练集和测试集，这样做有个好处，就是按照固定形式将类别数据打散
		return [sorted(train_ids), sorted(eval_ids)]



if __name__ == '__main__':
	pass
	# dataset_generator()
	# print(calculate_doc_max_len())
	# lens = calculate_doc_max_len()
	# with open('../data/lens_Eclipse.txt', 'w') as writer:
	# 	for key in lens.keys():
	# 		writer.write(key + '\t' + str(lens[key]) +'\n')
	
	# bug_msg_all, sorted_bugs = get_msg_all()
	# train, eval, val = split_dataset_by_eight_to_two(bug_msg_all)
	# print(len(train))
	# print(len(eval))
	# print(len(val))
