import os
from gensim.models import Word2Vec
import sys
import argparse

sys.path.append('./')

from util.PATH import PATH
from src.data_helper import DataHelper

'''
基于单词级别的嵌入表示
'''
# 以微调的Mozilla数据集举例，一共5万多的单词，而其中Mozilla数据集中出现过的单词有
# corpus_path = '/document/Bug_msg/Mozilla/keywords_stemed'

# save_path = '../data/'
save_path = 'data/word2vec'
if not os.path.exists(save_path):
	os.makedirs(save_path)

def pretrain_char_embedding(model_name, sg=0, hs=1):
	'''
	预训练字符级别的embedding表
	:return:
	'''
	if int(sg) == 0:
		window = 5
	elif int(sg) == 1:
		window = 10
	else:
		raise TypeError
	# print(sg, hs, window)
	
	sentences = []
	for foldername in ['Mozilla', 'Eclipse', 'Gnome']:
		corpus_path = '/home/dataset/Bug_msg_new/{}/keywords_stemed'.format(foldername)
		for filename in os.listdir(corpus_path):
			with open(os.path.join(corpus_path, filename), 'r') as reader:
				sentences.append([line.strip() for line in reader.readlines()])
	print(len(sentences))
	
	# sentences = [['A']]
	# word2vec 需要的语料格式，二维列表，行代表一个句子，列代表一个单词，如此如此这般这般
	# sg=0,cbow,sg=1:skip-gram
	# hs=0:进行负采样，hs=1采用分层softmax
	model = Word2Vec(sentences, size=100, iter=20, min_count=5, sg=sg, hs=hs, window=window)
	model.save(os.path.join(save_path, model_name))



def incremental_training(dataset, model_name):
	'''
	模型的增量学习
	:param dataset:用来增量学习的数据集的name
	:param model_name 预训练模型的name
	:return:
	'''
	Path = PATH(dataset)        # 提取微调要用到的训练预料
	data_helper = DataHelper(Path)
	bug_msg_all, _ = data_helper.get_msg_all()
	train_windows = data_helper.split_dataset_by_eight_to_two(bug_msg_all)[0]
	
	
	sentences = []      # 微调所需的语料集合
	corpus = '/home/dataset/PartFive/{}/corpus_modified/'.format(dataset)
	# corpus = '/document/Bug_msg/PartFive/{}/corpus_modified/'.format(dataset)
	for bugid in train_windows:     # 使用指定数据集的训练语料，注意是训练语料，来做训练
		with open(os.path.join(corpus, str(bugid)), 'r') as reader:
			sentences.append([line.strip() for line in reader.readlines()])
	# print(sentences)
	model = Word2Vec.load(os.path.join(save_path, model_name))
	model.build_vocab(sentences=sentences, update=True)
	model.train(sentences=sentences, total_examples=model.corpus_count, epochs=model.iter)
	model.save(os.path.join(save_path, '{}_new_{}'.format(dataset, model_name)))


def read_embedding_model(model_name, datasetname):
	'''
	
	:param model_name:
	:return:vocabulary,  lookup_table
	'''
	word_vecs = Word2Vec.load(os.path.join(save_path, model_name))
	# print(word_vecs.wv.vectors)           # 这个实际上是ndarray格式的lookup_table
	# print(word_vecs.wv.index2word)        # 每个index对应的词,这个实际上就相当于vocabulary
	# print(word_vecs.wv.vocab)  # 查看Word和vector的对应关系
	# print(word_vecs.wv.vectors.shape)
	return word_vecs.wv.index2word, word_vecs.wv.vectors
	


def pre_train_model(model_name):
	return Word2Vec.load(os.path.join(save_path, model_name))


if __name__ == '__main__':
	parser = argparse.ArgumentParser(description='helloworld')
	parser.add_argument('name', help='模型名字')
	parser.add_argument('-sg', dest='sg', help="启用skip-gram模型", default='0')
	parser.add_argument('-hs', dest='hs', help="启用Hierarchical Softmax方法", default='1')
	parser.add_argument('--ft', dest='train', action='store_false', help='微调训练')
	parser.add_argument('--pt', dest='train', action='store_true', help='预训练')
	parser.add_argument('-dataset', dest='dataset', help='微调所需的数据集的名字')
	parser.set_defaults(train=True)
	
	args = parser.parse_args()
	if args.train:      # 预训练语料
		pretrain_char_embedding(args.name, args.sg, args.hs)
	else:
		incremental_training(args.dataset, args.name)
	# read_char_embedding('Mozilla+Eclipse_cbow+hs')
