from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
import lda
import numpy as np
import os
from sklearn.externals import joblib  #也可以选择pickle等保存模型，请随意
import time
import random
import scipy.stats as stats

import util.PATH as PATH

# MODEL_PATH = '../data/1LDA/{}/'.format(str(time.time()))

# T = len(get_developers())

random.seed(1)
class GibbsLDA(object):

	def __init__(self, T, n_iteration, alpha, beta, name):
		'''
		:param T: 
		:param n_iteration: 
		:param alpha: 
		:param beta: 
		:param name: 数据集的name, for Example, 'Eclipse', 'Mozilla', ....
		'''
		self.T = T
		self.n_iteration = n_iteration
		self.alpha = alpha
		self.beta = beta
		self.name = name
		self.MODEL_PATH = PATH.project_root + 'data/' + name
		if not os.path.exists(self.MODEL_PATH):
			os.makedirs(self.MODEL_PATH)


	def _get_developers(self):
		developers = []
		with open(PATH.path_developer, 'r') as reader:
			for line in reader.readlines():
				developers.append(line.strip())
		return developers

	def _get_bugs_msg(self):
		# 0: bug_id , 1: 代表assign_to的字符串, 7: 代表component字段
		# 这里使用7号字段来重现实验, 借此检验代码正确与否
		bugs_msg = {}     # key:id, value: assign_to
		with open(PATH.path_small_bug_msg_all, 'r') as reader:
		# with open(PATH.path_bug_msg_all, 'r') as reader:
			for line in reader.readlines():
				words = line.strip().split('\t')
				# bugs_msg[words[0]] = words[7]
				bugs_msg[words[0]] = words[1]
		return bugs_msg

	# 将输入的bugid-label键值对, 按照label分组
	# 记录each label下所有的bugid
	def _group_by_label(self, labels):
		labels_groups = {}      # key=label, value=[bugid], 即该由label指代的开发者修复的所有bug的id
		for key, value in labels.items():
			if value in labels_groups.keys():
				labels_groups[value].append(key)
			else:
				labels_groups.setdefault(value, [key])
		return labels_groups

	# 根据传过来的bugids集合来计算features列表以及相应的label列表
	def _get_docs_list(self, label_to_bugids, all_labels):
		path_doc = '/home/wanglinhui/Desktop/二师兄/small_docs/'   # 小数据集, 测试用
		# path_doc = PATH.path_filtered_corpus

		docs_list = []
		label_list = []
		bugids = []
		for value in label_to_bugids.values():
			bugids = bugids + value
		random.shuffle(bugids)

		for i in range(len(bugids)):
			label_list.append(all_labels[bugids[i]])        # 按顺序保存label列表

		for name in bugids:
			# doc_lists.append(os.path.join(path_doc, name))
			words = []
			with open(os.path.join(path_doc, name), 'r') as reader:
				for line in reader.readlines():
					words.append(line.strip())
			# CountVectorizer需要的输入是有严格形式要求的
			# 输入整体是一个一维列表, 子元素是一个doc, doc内单词以空格隔开
			docs_list.append(' '.join(words))
		return docs_list, label_list

	# 创建词向量工具并且保存创建的对象
	# 一般用于第一次运行
	def _create_vectorizer_and_save(self, docs_list):

		# 切记, train和eval过程一定要使用同一个tf_vectorizer对象,
		# 因为涉及到vocabulary等信息, fit过程中会自己统计vocabulary存入tf_vectorizer对象
		# 所以一定要传给eval过程, 先前一直没有注意到这一点,导致正确率一直不高.....
		tf_vectorizer = CountVectorizer()
		tf = tf_vectorizer.fit_transform(docs_list)     # tf为训练

		# sava model
		# joblib.dump(tf, os.path.join(MODEL_PATH, 'tf'))
		# joblib.dump(tf_vectorizer, os.path.join(MODEL_PATH, 'tf_vectorizer'))
		return tf_vectorizer, tf

	# 从磁盘中加载保存的工具对象
	def _load_vectorizer_from_save(self):
		tf_vectorizer = joblib.load(os.path.join(self.MODEL_PATH, 'tf_vectorizer'))
		tf = joblib.load(os.path.join(self.MODEL_PATH, 'tf'))
		return tf_vectorizer, tf

	# 从每个label中抽取20%作为测试集, 其余的作为验证集
	def split_train_and_eval_set(self):
		bugs_msg = self._get_bugs_msg()
		label_to_bugids = self._group_by_label(bugs_msg)  # 488个label
		# 将数据集按照label划分成不同数量的训练集和测试集
		train_label_to_bugids = {}  # key=label, value=[bugids]
		eval_label_to_bugids = {}
		for key, value in label_to_bugids.items():
			size = int(len(value) * 0.2)
			evals = random.sample(value, size)  # 从value中随机不重复取出size个元素组成的切片
			eval_label_to_bugids[key] = evals
			train_label_to_bugids[key] = [x for x in label_to_bugids[key] if x not in evals]

		train_docs_list, train_label_list = self._get_docs_list(train_label_to_bugids, bugs_msg)    # 注意这里label列表并没有索引化, 还是相应的字符串
		eval_docs_list, eval_label_list = self._get_docs_list(eval_label_to_bugids, bugs_msg)

		return train_docs_list, train_label_list, eval_docs_list, eval_label_list, eval_label_to_bugids
	'''sign=0, '''
	def create_LDA_model(self, sign, docs_list):
		model = None
		if sign == 0:       # 第一次运行, 执行模型训练步骤

			tf_vectorizer, tf = self._create_vectorizer_and_save(docs_list)
			# tf_vectorizer, tf = load_vectorizer_from_save()
			model = lda.LDA(n_topics=self.T, n_iter=self.n_iteration, alpha=self.alpha, eta=self.beta, random_state=None, refresh=5)
			# fit接收的shape应该是(n_samples, n_features), 跟tf一致
			# tf.toarray()是稠密矩阵, tf算是稀疏矩阵
			model.fit(tf)  # fit之前要对829万单词分别进行初始化, 这个比较耗时
			joblib.dump(model, os.path.join(self.MODEL_PATH, 'model_{}_T{}_N{}_a{}_b{}'.format(self.name, self.T, self.n_iteration, self.alpha, self.beta)))       # 将训练好的模型持久化到本地

		elif sign == 1:     # 不是第一次运行, 读取保存的持久化模型
			# 主要在想这个问题: 这次训练的tf_vectorizer和下次训练的tf_vectorizer是否一样
			# 毕竟, model是用的上次训练的tf_vectorizer来fit的
			# 尝试了几次, 每次得到的正确率是一样的, 所以对应的tf_vectorizer应该也一样........
			tf_vectorizer, _= self._create_vectorizer_and_save(docs_list)
			model = joblib.load(os.path.join(self.MODEL_PATH, 'model_{}_T{}_N{}_a{}_b{}'.format(self.name, self.T, self.n_iteration, self.alpha, self.beta)))
		return model, tf_vectorizer     # tf_vectorizer中包含了vocabualy信息.
	'''计算各种评价标准'''
	def _calculate_metrics(self, eval_label_list, predictions, eval_label_to_bugids, fn):
		n_categorized_correctly = 0
		n_s = {}
		for i in range(len(eval_label_list)):
			label = eval_label_list[i]
			# if label == predictions[i]:
			if fn(label, predictions[i]):
				n_categorized_correctly += 1  # 计算分类正确的个数, 其实应该分类别一一统计, 如果类别是开发者的话, 讲真还是用top表较好
				if label in n_s.keys():
					n_s[label] += 1
				else:
					n_s[label] = 1

		print('n_categorized_correctly: {}'.format(n_categorized_correctly))
		print('最终的正确率为: {}'.format(n_categorized_correctly / len(eval_label_list)))
		'''
		with open(os.path.join(MODEL_PATH, 'acc_T{}_N{}.txt'.format(self.T, self.n_iteration)), 'w') as writer:
			writer.write('最终的正确率为: {} \n'.format(n_categorized_correctly / len(eval_label_list)))
			# 输出各分类的正确率
			if eval_label_to_bugids != None:
				for key in n_s.keys():  # 不在n_s的keys()里面的label意味着正确率=0
					acc = n_s[key] / len(eval_label_to_bugids[key])
					print('类别 {} 的正确率为: {}'.format(key, acc))
					writer.write('类别 {} 的正确率为: {} \n'.format(key, acc))
		'''
	'''计算正确率'''
	def calculate_acc(self, eval_label_list, predictions, eval_label_to_bugids=None):
		fn = lambda a, b: a == b
		self._calculate_metrics(eval_label_list, predictions, eval_label_to_bugids, fn)

	'''计算topK正确率
	predictions格式: [{}]: shape=[n_eval_samples], key=label, value=概率;
	@:param reverse=False, dict排序得到的是否反转, 默认False, 即由小到大排列, 取前K个; True, 由大到小排列, 取前K个, 对KL散度来说, 需要取前K个最小的'''
	def calculate_topK(self, eval_label_list, predictions, K, eval_label_to_bugids=None,  reverse=False):
		# 取出前K个元素(元组)每个的0索引位置元素, 就是之前的key
		# fn = lambda a, b: a in sorted(b, key=lambda x:x[1], reverse=True)[:K, 0]
		fn = lambda a, b: a in sorted(b, key=b.get, reverse=reverse)[:K]   #
		self._calculate_metrics(eval_label_list, predictions, eval_label_to_bugids, fn)

	'''计算svm的topK'''
	def calculate_svm_topK(self, eval_label_list, predictions, K, index_to_class, eval_label_to_bugids=None):
		def fn(a, b):
			c = dict(enumerate(b))      # 转换成key=index的词典形式
			return a in [index_to_class[index] for index in sorted(c, key=c.get, reverse=True)[:K]]     # 将前K大的value对应的index转换成class形式
		self._calculate_metrics(eval_label_list, predictions, eval_label_to_bugids, fn)

	'''将每个文档预测的前10个结果写入文件'''
	def write_predictions_to_file(self, name, eval_label_list, predictions, index_to_class=None, reverse=False):
		# 存放前10个预测结果的文档
		file = os.path.join(self.MODEL_PATH, 'predict_{}_T{}_N{}_a{}_b{}.csv'.format(name, self.T, self.n_iteration, self.alpha, self.beta))
		with open(file, 'w') as writer:
			for i in range(len(eval_label_list)):
				if index_to_class is not None:      # 针对SVM
					prediction = dict(enumerate(predictions[i]))
					top10 = [index_to_class[index] for index in sorted(prediction, key=prediction.get, reverse=True)[:10]]  # 由大到小取出前10个, 从索引转换成developer's name的格式
				else:               # 针对非SVM
					top10 = sorted(predictions[i], key=predictions[i].get, reverse=reverse)[:10]
				writer.write(' '.join(top10))
				writer.write(',{}\n'.format(eval_label_list[i]))
