from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
import lda
import numpy as np
import os
from sklearn.externals import joblib  #也可以选择pickle等保存模型，请随意
import time
from sklearn.metrics import classification_report
import random
import scipy.stats as stats

from baseline.basic.LDA_model import GibbsLDA
# import util.PATH as PATH
from baseline.basic.dataset import Dataset as Dataset

import util.PATH as PATH


def main(name, train_ids, test_ids, data_helper):
	T = 100
	n_iteration = 100
	alpha = 50 / T
	beta = 0.1
	# min_df: df是文档频率
	developers_list = data_helper.create_developers_list()
	bug_msg_all, _ = data_helper.get_msg_all()
	dataset = Dataset(name)
	mLDA = GibbsLDA(T, n_iteration, alpha, beta, name)
	train_docs_list, train_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, train_ids)
	eval_docs_list, eval_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, test_ids)
	model, tf_vectorizer = mLDA.create_LDA_model(1, train_docs_list)

	train_doc_topic = model.doc_topic_        # shape = (n_samples, n_topics), ndarray
	# print(train_doc_topic)

	# 计算每个标注类别下的所有文档的平均doc-topic概率, 作为训练集的P(D|z)
	# 标注类别是指: assignee字段 or component字段
	# shape=[n_categories, n_topics], 应该是可以理解成:每个类别(开发者or component)下, 每个主题的分布情况
	each_label_centroids = {}
	label_to_doctopics = {}
	for i in range(len(train_label_list)):
		label = train_label_list[i]
		if label in label_to_doctopics.keys():
			label_to_doctopics[label].append(train_doc_topic[i])
		else:
			label_to_doctopics.setdefault(label, [train_doc_topic[i]])
	# 计算每个标注的平均doc-topic分布
	print('正在计算每个标注的平均doc-topic分布:')
	print('len(label_to_doctopics.keys()): {}'.format(len(label_to_doctopics.keys())))	# 488正常
	for key, value in label_to_doctopics.items():
		temp = [0 for _ in range(T)]
		size = len(value)
		# print(size)
		for i in range(size):
			temp = temp + value[i]
		if size > 0:
			temp = map(lambda x: x / size, temp)
		each_label_centroids[key] = list(temp)

	# 将value整合成列表输入transform

	tf_eval = tf_vectorizer.transform(eval_docs_list)  # tf为训练
	eval_doc_topic = model.transform(tf_eval, max_iter=n_iteration)       # 要求是array类型
	#
	KLs = []        # shape=[n_eval_samples, n_labels]
	predictions = []        # 取KL散度最小的类别作为预测的label
	print('正在计算KL散度:')
	for px in eval_doc_topic:
		KL_with_each_label = {}
		min_KL = 1000000        # 将初始值设的足够大
		min_key = 0
		# 计算每个测试集bug同每个分类的doc_topic的kl散度
		for key, qx in each_label_centroids.items():
			kl = stats.entropy(px, qx)     # 计算KL散度
			KL_with_each_label[key] = kl
			if kl < min_KL:     # 更新最小值和对应的键
				min_KL = kl
				min_key = key
		KLs.append(KL_with_each_label)
		predictions.append(min_key)
	# print('predictions:{}'.format(predictions))
	# print('KLs:{}'.format(KLs))
	# print('eval_label_list:{}'.format(eval_label_list))
	# 计算正确率recall
	# mLDA.calculate_acc(eval_label_list, predictions, eval_label_to_bugids)
	# 计算topK
	# mLDA.calculate_topK(eval_label_list, KLs, 3, eval_label_to_bugids)
	results = []
	for i in range(1, 6):
		acc = mLDA.calculate_topK(eval_label_list, KLs, i)
		results.append(acc)
	print('\t'.join(list(map(str, results))))
	mLDA.write_predictions_to_file('{}_{}'.format(name, 'LDAKL'), eval_label_list, KLs, reverse=False)






if __name__ == '__main__':
	pass
	# start_time = time.clock()
	# bug_msg_all, _ = data_helper.get_msg_all()
	# vocabulary = data_helper.create_vocabulary()
	# developers_list = data_helper.create_developers_list()
	# time_windows = data_helper.split_dataset_by_eight_to_two(bug_msg_all)

	# T = 100
	# n_iteration = 100
	# alpha = 50 / T
	# beta = 0.1

	# # parser = argparse.ArgumentParser()
	# # parser.add_argument('index', help='指定数据集的索引')
	# # args = parser.parse_args()

	# name = PATH.root.split('/')[-2]

	# main(name, time_windows[0], time_windows[1])
	# print('花费时间: {}'.format(time.clock() - start_time))
