import os
import time
from sklearn.feature_extraction.text import CountVectorizer

from baseline.LDA_model import GibbsLDA
import util.PATH as PATH
import baseline.dataset as dataset
import util.data_helper as data_helper

MODEL_PATH = '../data/1LDA/{}/'.format(str(time.time()))
if not os.path.exists(MODEL_PATH):
	os.makedirs(MODEL_PATH)

# T = 20          # 100个主题对应10000个样本，本文中，主题数是根据样本数大致确定的。
# n_iteration = 100
# # alpha = 50/T
# alpha = 0.01
# beta = 0.01

# theta = 0.1     # P(dev | topic)中的参数, 是对developer的interest和expertise的加权平均
# 需推荐的开发者集合数量越小，theta应该越大。

def main(name, train_ids, test_ids):
	mLDA = GibbsLDA(T, n_iteration, alpha, beta, name)
	# train_docs_list, train_label_list, eval_docs_list, eval_label_list, eval_label_to_bugids = mLDA.split_train_and_eval_set()
	train_docs_list, train_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, train_ids)
	eval_docs_list, eval_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, test_ids)
	model, tf_vectorizer = mLDA.create_LDA_model(1, train_docs_list)
	print('验证集的size={}'.format(len(eval_label_list)))

	train_doc_topic = model.doc_topic_

	tf_eval = tf_vectorizer.transform(eval_docs_list)  # tf为训练
	eval_doc_topic = model.transform(tf_eval, max_iter=n_iteration)  # 要求是array类型

	# 计算每个bug所属的topic, 即topic概率最大的;
	train_doc_belong_topic = [train_doc_topic[i].argmax() for i in range(len(train_doc_topic))]

	# 计算N_{dev}: 即每个dev解决过的历史bugs数.
	# 计算N_{topic}: 该topic下包含的历史bugs数

	# 计算N_{dev, topic}: 该dev解决过的topic型的bugs数.

	# 每个topic下的所有bugs的索引, 这个索引是从train_doc_topic来的
	# 写成dict还是list比较好, 算了都一样, topic本身也是int

	docs_under_each_topic = [[] for _ in range(T)]
	for i in range(len(train_doc_belong_topic)):
		topic = train_doc_belong_topic[i]
		docs_under_each_topic[topic].append(i)      # 意味着将第i篇bug的索引放入指定topic列表中

	# 计算每个dev解决过的所有bug索引
	docs_under_each_dev = {}    # key=dev's name, value=[doc_index_1, doc_index_2, ...]
	for i in range(len(train_label_list)):
		dev = train_label_list[i]
		if dev in docs_under_each_dev.keys():
			docs_under_each_dev[dev].append(i)
		else:
			docs_under_each_dev.setdefault(dev, [i])
	# 通过求docs_under_each_dev与docs_under_each_topic的交集, 得到N_{dev, topic}
	# N_dev_topic: key = dev, value=[topic_0, topic_1, ....]
	N_dev_topic = {}
	for dev, docs in docs_under_each_dev.items():
		temp = []
		for topic in range(T):
			#
			intersect = set(docs).intersection(set(docs_under_each_topic[topic]))
			temp.append(len(intersect))
		N_dev_topic[dev] = temp

	# 计算P(dev->topic), 即P_dev_interest, the dev积极解决this topic下bug的兴趣
	P_dev_interest = {}     # key=dev's name, value=[n_topics]
	for dev, docs in N_dev_topic.items():
		for topic in range(T):
			N_dev = len(docs_under_each_dev[dev])       # this dev fix的bugs的数目
			temp = N_dev_topic[dev][topic] / N_dev
			if dev in P_dev_interest.keys():
				P_dev_interest[dev].append(temp)
			else:
				P_dev_interest.setdefault(dev, [temp])
	# 计算P(topic->dev), 即P_dev_expertise, the dev在解决this topic下问题的expertise
	P_dev_expertise = {}    # key=dev's name, value=[n_topics]
	for dev, n_docs in N_dev_topic.items():
		for topic in range(T):
			N_topic = len(docs_under_each_topic[topic])     # this topic下bugs的数目
			temp = n_docs[topic] / (N_topic+1)
			if dev in P_dev_expertise.keys():
				P_dev_expertise[dev].append(temp)
			else:
				P_dev_expertise.setdefault(dev, [temp])

	# 计算P(dev | topic)
	# P(dev | topic) = θ * P(dev->topic) + (1-θ) * P(topic -> dev)
	# TODO：准备for循环theta
	P_dev_topic = {}    # key=dev's name, value=[n_topics]
	for dev, interests in P_dev_interest.items():
		temp = []
		expertises = P_dev_expertise[dev]       # list, 在各个topic下的expertise
		for topic in range(T):
			temp.append(theta * interests[topic] + (1 - theta) * expertises[topic])
		P_dev_topic[dev] = temp

	# 计算P(dev | bug) : this dev 是this bug的候选者的概率
	# P(dev|bug) = \sum_{topic} P(topic|bug)*P(dev|topic)
	# P(topic|bug) : this bug属于this topic的概率
	# P(dev|topic) : this dev作为this topic下一个候选者的概率
	P_devs_bugs = []        # 所有devs对所有bugs修复的概率, 子元素是一个个P_devs_bug
	for i in range(len(eval_doc_topic)):
		P_devs_bug = {}     # 所有devs对this bug修复的概率
		for dev in P_dev_topic.keys():
			temp = 0
			for topic in range(T):
				P_topic_bug = eval_doc_topic[i][topic]
				temp += P_dev_topic[dev][topic] * P_topic_bug
			P_devs_bug[dev] = temp
		P_devs_bugs.append(P_devs_bug)

	# 计算TopK
	# mLDA.calculate_topK(eval_label_list, P_devs_bugs,  5, eval_label_to_bugids, reverse=True)
	mLDA.calculate_topK(eval_label_list, P_devs_bugs, 5,  reverse=True)
	mLDA.write_predictions_to_file('{}_{}'.format(name, 'DERTOM'), eval_label_list, P_devs_bugs, reverse=True)

if __name__ == '__main__':
	start_time = time.clock()

	bug_msg_all, _ = data_helper.get_msg_all()
	vocabulary = data_helper.create_vocabulary()
	developers_list = data_helper.create_developers_list()
	time_windows = data_helper.split_dataset_by_time_windows(bug_msg_all)

	T = int(len(bug_msg_all.keys()) / 100)  # 按照原文的理解，每100个样本对应一个主题。
	n_iteration = 100
	alpha = 0.01
	beta = 0.01
	theta = 0.1     # 原文将theta从0-1，每隔0.1都试了一遍, 每个数据集对应的最优theta都不同。

	# parser = argparse.ArgumentParser()
	# parser.add_argument('index', help='指定数据集的索引')
	# args = parser.parse_args()

	name = PATH.root.split('/')[-2]
	print('数据集:{}'.format(name))
	main(name, time_windows[0], time_windows[1])
	print('花费时间: {}'.format(time.clock() - start_time))