from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
import time
import numpy as np
import argparse
# import sys
# sys.path.append('/home/wanglinhui/code/WorkSpace_CODE/WorkSpace_Python/contrast_algorithm_new') # 声明模块的搜索路径, 用于cmd运行

from baseline.LDA_model import GibbsLDA
import util.PATH as PATH
import baseline.dataset as dataset
import util.data_helper as data_helper



'''
SVM-LDA实际上就是将来自LDA的doc_topic矩阵作为特征向量来输入SVM进行分类;
所以说, 这个过程中LDA的作用感觉就是特征提取
'''
def svm_main(name, T, n_iteration, train_ids, test_ids):
	'''
	@:parameter name   数据集的name, 只作为生成最终文件的name用 
	@:parameter dataset_path 数据集的路径
	'''
	mLDA = GibbsLDA(T, n_iteration, alpha, beta, name)
	# train_docs_list, train_label_list, eval_docs_list, eval_label_list, eval_label_to_bugids = mLDA.split_train_and_eval_set()
	train_docs_list, train_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, train_ids)
	eval_docs_list, eval_label_list = dataset.get_features_and_labels(developers_list, bug_msg_all, test_ids)
	model, tf_vectorizer = mLDA.create_LDA_model(0, train_docs_list)

	train_doc_topic = model.doc_topic_

	tf_eval = tf_vectorizer.transform(eval_docs_list)  # tf为训练
	eval_doc_topic = model.transform(tf_eval, max_iter=n_iteration)  # 要求是array类型

	clf = svm.LinearSVC()       # 导入模型
	print('train_doc_topic: {}'.format(train_doc_topic.shape))
	print('train_label_list: {}'.format(np.array(train_label_list)))
	print('eval_doc_topic: {}'.format(eval_doc_topic.shape))
	print('eval_label_list: {}'.format(np.array(eval_label_list).shape))

	clf.fit(train_doc_topic, np.array(train_label_list))      # 训练模型


	# 计算acc
	# predictions = clf.predict(eval_doc_topic)
	# mLDA.calculate_acc(eval_label_list, predictions, eval_label_to_bugids)
	# mLDA.calculate_acc(eval_label_list, predictions)

	# print('predictions: {}'.format(predictions))
	# print('eval_label_list: {}'.format(eval_label_list))


	all_probability = clf.decision_function(eval_doc_topic)  # shape=[n_eval_samples, n_labels], label的顺序是?

	# print('all_probability: {}'.format(all_probability))
	# print('clf.classes_: {}'.format(clf.classes_))        #
	# mLDA.calculate_topK(eval_label_list, all_probability, eval_label_to_bugids, 3)
	mLDA.calculate_svm_topK(eval_label_list, all_probability, 5, clf.classes_)    # clf.classes_, shape=[n_labels], 是label名和索引的映射
	mLDA.write_predictions_to_file('{}_{}'.format(name, 'LDASVM'), eval_label_list,all_probability, clf.classes_)


if __name__ == '__main__':
	start_time = time.clock()

	bug_msg_all, _ = data_helper.get_msg_all()
	vocabulary = data_helper.create_vocabulary()
	developers_list = data_helper.create_developers_list()
	time_windows = data_helper.split_dataset_by_eight_to_two(bug_msg_all)

	T = 100
	n_iteration = 100
	alpha = 50/T
	beta = 0.1


	# parser = argparse.ArgumentParser()
	# parser.add_argument('index', help='指定数据集的索引')
	# args = parser.parse_args()

	name = PATH.root.split('/')[-2]
	print('数据集:{}'.format(name))
	svm_main(name, T, n_iteration, time_windows[0], time_windows[1])
	print('花费时间: {}'.format(time.clock() - start_time))