from gensim.models import FastText
import sys
sys.path.append('./')
import os
import numpy as np
import argparse
import datetime

from util.PATH import PATH
from src.data_helper import DataHelper

# root_path = '/document/Bug_msg_new'
root_path = '/home/dataset/Bug_msg_new'
save_path = 'data/fasttext/'
if not os.path.exists(save_path):
	os.makedirs(save_path)

'''
也就是说，通过fasttext和Word2vec的对比，我们能看出来，OOV对正确率的影响？
'''

def pretrain_char_embedding(model_name, sg=0, hs=1):
	'''
	预训练字符级别的embedding表
	:return:
	'''
	if int(sg) == 0:
		window = 5
	elif int(sg) == 1:
		window = 10
	else:
		raise TypeError
	# print(sg, hs, window)
	
	sentences = []
	for foldername in ['Mozilla', 'Eclipse', 'Gnome']:
		print('{}:数据集{}读取开始'.format(datetime.datetime.now().isoformat(), foldername))
		corpus_path = '{}/{}/keywords_stemed'.format(root_path, foldername)
		# corpus_path = '/media/PartFive/Bug_msg_new/{}/keywords_stemed'.format(foldername)
		for filename in os.listdir(corpus_path):
			with open(os.path.join(corpus_path, filename), 'r') as reader:
				sentences.append([line.strip() for line in reader.readlines()])
	# print(len(sentences))
	
	# sentences = [['I', 'love', 'you'],['do', 'you', 'like','me']]
	print('{}:开始词向量训练'.format(datetime.datetime.now().isoformat()))
	model = FastText(sentences,  size=100, window=window, min_count=5, iter=20, min_n=3, max_n=6,word_ngrams=1, sg=sg, hs=hs)
	# model = Word2Vec(sentences, size=300, iter=20, min_count=5, sg=sg, hs=hs, window=window)
	model.save(os.path.join(save_path, model_name))
	# print(model.wv.vectors_ngrams)
	# print(model.wv.vectors_vocab)
	# print(model.wv.vectors.dtype)
	# print(model.wv.vocab)
	# 也就是说，fasttext中没有存储char-n-gram的词，只是保存了他们的bucket，直接按照bucket来找


def incremental_training(dataset, model_name):
	'''
	模型的增量学习,fasttext在微调的时候，
	:param dataset:用来增量学习的数据集的name
	:param model_name 预训练模型的name
	:return:
	'''
	Path = PATH(dataset)  # 提取微调要用到的训练预料
	data_helper = DataHelper(Path)
	bug_msg_all, _ = data_helper.get_msg_all()
	train_windows = data_helper.split_dataset_by_eight_one_one(bug_msg_all)[0]
	
	sentences = []  # 微调所需的语料集合
	corpus = '/home/dataset/PartFive/{}/corpus_modified/'.format(dataset)
	# corpus = '/document/Bug_msg/PartFive/{}/corpus_modified/'.format(dataset)
	for bugid in train_windows:  # 使用指定数据集的训练语料，注意是训练语料，来做训练
		with open(os.path.join(corpus, str(bugid)), 'r') as reader:
			sentences.append([line.strip() for line in reader.readlines()])
	# print(sentences)
	model = FastText.load(os.path.join(save_path, model_name))
	model.build_vocab(sentences=sentences, update=True)
	model.train(sentences=sentences, total_examples=model.corpus_count, epochs=model.iter)
	model.save(os.path.join(save_path, '{}_new_{}'.format(dataset, model_name)))


def read_embedding_model(model_name, dataset):
	'''
	可以读取一下验证集和测试集，rich下词向量的表现
	就是将验证集和测试集中的OOV通过fasttext的oov机制，找到一个相似的向量来表示
	然后将新单词和新向量表示加到模型的输出上。
	
	:param model_name:
	:return:vocabulary,  lookup_table
	'''
	
	Path = PATH(dataset)  # 提取微调要用到的训练预料
	data_helper = DataHelper(Path)
	bug_msg_all, _ = data_helper.get_msg_all()
	time_windows = data_helper.split_dataset_by_eight_one_one(bug_msg_all)
	
	fasttest = FastText.load(os.path.join(save_path, model_name))
	# index2word = fasttest.wv.index2word
	# vectors = fasttest.wv.vectors
	index2word = []
	vectors = np.empty(shape=(0, len(fasttest.wv.vectors[0])))
	# print(vectors.shape)
	count = 0
	for i in range(0,3):
		for idx in time_windows[i]:
			with open(Path.path_corpus + str(idx), 'r') as reader:
				for line in reader.readlines():
					word = line.strip()
					if word not in index2word:
						# 筛选词汇表和词向量矩阵,从预训练语料中得到的词汇表太大了，需要消耗太多CPU来组装batch
						# 只保留在训练集、验证集、测试集中出现过的单词，以及他们的向量表示
						try:
							# 将oov单词添加进词汇表和对应的向量表
							vectors = np.append(vectors, [fasttest.wv.word_vec(word)], axis=0)
							index2word.append(word)
						except KeyError:        # 如果OOV单词的所有n-gram都没有在已训练预料中出现过
							count += 1
							continue
	# vectors.dtype = np.float32      # np.append之后，返回的array会被转成64格式的，如果原先是int，则会转成int64，否则转成float64，但是我们后面需要float32，所以这里需要转一下
	vectors = vectors.astype(np.float32)
	# print(fasttest.wv.vectors)           # 这个实际上是ndarray格式的lookup_table
	# print(fasttest.wv.index2word)        # 每个index对应的词,这个实际上就相当于vocabulary
	# print(word_vecs.wv.vocab)  # 查看Word和vector的对应关系
	# print(word_vecs.wv.vectors.shape)
	# print(fasttest.wv.most_similar('lovf'))
	# print(fasttest.wv.vectors_ngrams)
	# print(vectors.shape)
	# print(vectors)
	# print(index2word)
	# print(count)
	return index2word, vectors

if __name__ == '__main__':
	parser = argparse.ArgumentParser(description='helloworld')
	parser.add_argument('name', help='模型名字')
	parser.add_argument('-sg', dest='sg', help="启用skip-gram模型", default='0')
	parser.add_argument('-hs', dest='hs', help="启用Hierarchical Softmax方法", default='1')
	parser.add_argument('--ft', dest='train', action='store_false', help='微调训练')
	parser.add_argument('--pt', dest='train', action='store_true', help='预训练')
	parser.add_argument('-dataset', dest='dataset', help='微调所需的数据集的名字')
	parser.set_defaults(train=True)

	args = parser.parse_args()
	if args.train:  # 预训练语料
		pretrain_char_embedding(args.name, args.sg, args.hs)
	else:
		incremental_training(args.dataset, args.name)
	# pretrain_char_embedding('GCC')
