from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
import time
import numpy as np

from KL_LDA.LDA_model import GibbsLDA

T = 50
n_iteration = 1
alpha = 50/T
beta = 0.1
'''
SVM-LDA实际上就是将来自LDA的doc_topic矩阵作为特征向量来输入SVM进行分类;
所以说, 这个过程中LDA的作用感觉就是特征提取
'''
def svm_main():
	mLDA = GibbsLDA(T, n_iteration, alpha, beta)
	train_docs_list, train_label_list, eval_docs_list, eval_label_list, eval_label_to_bugids = mLDA.split_train_and_eval_set()
	model = mLDA.create_LDA_model(0, train_docs_list)

	train_doc_topic = model.doc_topic_

	tf_eval = CountVectorizer().fit_transform(eval_docs_list)  # tf为训练
	eval_doc_topic = model.transform(tf_eval, max_iter=n_iteration)  # 要求是array类型

	clf = svm.LinearSVC()       # 导入模型
	print('train_doc_topic: {}'.format(train_doc_topic.shape))
	print('train_label_list: {}'.format(np.array(train_label_list)))
	print('eval_doc_topic: {}'.format(eval_doc_topic.shape))
	print('eval_label_list: {}'.format(np.array(eval_label_list).shape))

	clf.fit(train_doc_topic, np.array(train_label_list))      # 训练模型


	predictions = clf.predict(eval_doc_topic)
	# print('predictions: {}'.format(predictions))
	print('eval_label_list: {}'.format(eval_label_list))
	mLDA.calculate_acc(eval_label_list, predictions, eval_label_to_bugids)
	all_probability = clf.decision_function(eval_doc_topic)  # shape=[n_eval_samples, n_labels], label的顺序是?

	print('all_probability: {}'.format(all_probability))
	print('clf.classes_: {}'.format(clf.classes_))        #
	# mLDA.calculate_topK(eval_label_list, all_probability, eval_label_to_bugids, 3)
	mLDA.calculate_svm_topK(eval_label_list, all_probability, eval_label_to_bugids, 3, clf.classes_)    # clf.classes_, shape=[n_labels], 是label名和索引的映射



if __name__ == '__main__':
	start_time = time.clock()
	svm_main()
	print('花费时间: {}'.format(time.clock() - start_time))