from tools import *
from sklearn import metrics
import sklearn.neighbors 
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
import csv
nb=[]
knn=[]
rfc=[]
dtc=[]
def NB(bunch_obj1,bunch_obj2):
    alpha_test = [1,0.11,0.12,0.1,0.09,0.08,0.01,0.001]
    fit_test=[True,False]
    for al in alpha_test:
        for ft in fit_test:
            # 训练分类器 ：输入词袋向量和分类标签，alpha:0.12最优：测试出来的 alpha越小，迭代次数越多，精度越高
            clf_nb = MultinomialNB(alpha=al,fit_prior=ft).fit(bunch_obj1.tdm, bunch_obj1.label)  #朴素贝叶斯模型
            # 预测分类结果
            predict_nb = clf_nb.predict(bunch_obj2.tdm)
            print("alpha:"+str(al)+'\tfit_prior:'+str(ft))
            print (classification_report(bunch_obj2.label, predict_nb, target_names = bunch_obj1.target_name))
            Precision = metrics.precision_score(bunch_obj2.label, predict_nb, average='weighted')
            Recall = metrics.recall_score(bunch_obj2.label, predict_nb, average='weighted')
            F1_score = metrics.f1_score(bunch_obj2.label, predict_nb, average='weighted')
            nb.append([str(al),str(ft),str(Precision),str(Recall),str(F1_score)])
            print('NB模型准确率：',Precision)
            print('NB模型召回率：',Recall)
            print('NB模型F1值：',F1_score)
            print('\n')
    with open('csv_compare/nb.csv','w',encoding='utf-8',newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['alpha','fit_prior','precision','recall','f1_score'])
        for data in nb:
            writer.writerow(data)
    f.close()

def KNN(bunch_obj1,bunch_obj2):
    p=[1,2]##分别表示欧氏距离、曼哈顿距离
    k=[4,5,6] ##取临近点个数 默认取1-9，由于单个运行复杂度较大，取默认值5上下两个值
    w=['uniform','distance'] ##权重选择
    for p_ in p:
        for k_ in k:
            for w_ in w:
                    clf_knn=KNeighborsClassifier(p=p_,n_neighbors=k_,weights=w_).fit(bunch_obj1.tdm, bunch_obj1.label)  #KNN模型 
                    predict_knn = clf_knn.predict(bunch_obj2.tdm)
                    print("p:"+str(p_)+'\tn_neighbor:'+str(k_)+'\tweights:'+w_)
                    print (classification_report(bunch_obj2.label, predict_knn, target_names = bunch_obj1.target_name))
                    Precision = metrics.precision_score(bunch_obj2.label, predict_knn, average='weighted')
                    Recall = metrics.recall_score(bunch_obj2.label, predict_knn, average='weighted')
                    F1_score = metrics.f1_score(bunch_obj2.label, predict_knn, average='weighted')
                    knn.append([str(p_),str(k_),w_,str(Precision),str(Recall),str(F1_score)])
                    print('KNN模型准确率：',Precision)
                    print('KNN模型召回率：',Recall)
                    print('KNN模型F1值：',F1_score)
                    print('\n')
    with open('csv_compare/knn.csv','w',encoding='utf-8',newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['p','n_neighbor','weights','precision','recall','f1_score'])
        for data in knn:
            writer.writerow(data)
    f.close()

def RFC(bunch_obj1,bunch_obj2):
    deep =[10,100]              ##森林子树量
    features=["sqrt","log2"]    ##每次找最好的分割点是可以考虑的特征数 把None去掉，复杂度太大
    boot_strap=[True,False]     ##是否有放回采样，默认True
    for d_ in deep:
        for f_ in features:
            for b_ in boot_strap:
                clf_rfc=RandomForestClassifier(n_estimators=d_,max_features=f_,bootstrap=b_).fit(bunch_obj1.tdm, bunch_obj1.label)     #随机森林模型
                predict_rfc = clf_rfc.predict(bunch_obj2.tdm)
                print("n_estimators:"+str(d_)+'\tmax_features:'+f_+'\tbootstrap:'+str(b_))
                print (classification_report(bunch_obj2.label, predict_rfc, target_names = bunch_obj1.target_name))
                Precision = metrics.precision_score(bunch_obj2.label, predict_rfc, average='weighted')
                Recall = metrics.recall_score(bunch_obj2.label, predict_rfc, average='weighted')
                F1_score = metrics.f1_score(bunch_obj2.label, predict_rfc, average='weighted')
                rfc.append([str(d_),f_,str(b_),str(Precision),str(Recall),str(F1_score)])
                print('随机森林模型准确率：',Precision)
                print('随机森林模型召回率：',Recall)
                print('随机森林模型F1值：',F1_score)
                print('\n')
    with open('csv_compare/rfc.csv','w',encoding='utf-8',newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['n_estimators','max_features','bootstrap','precision','recall','f1_score'])
        for data in rfc:
            writer.writerow(data)
    f.close()

def DTC(bunch_obj1,bunch_obj2):
    cri = ["gini","entropy"]###用来决定使用哪种计算方式评估节点的“重要性” 填写"gini"使用基尼系数； 填写“entropy”使用信息增益
    spl = ["best","random"]###用来控制决策树生成时，“节点”选择方式的参数
    ###splitter=“best”：分枝时虽然随机，但是还是会优先选择更“重要”的特征进行分枝
    ###splitter=“random”：一种“放弃治疗”感觉的随机（可能产生“过拟合”，因为模型会因为含有更多的不必要信息而更深更大）
    for c_ in cri:
        for s_ in spl:
            clf_dtc=DecisionTreeClassifier(criterion=c_,splitter=s_).fit(bunch_obj1.tdm, bunch_obj1.label)     #决策树模型
            predict_dtc = clf_dtc.predict(bunch_obj2.tdm)
            print("criterion:"+c_+'\tsplitter:'+s_)
            print (classification_report(bunch_obj2.label, predict_dtc, target_names = bunch_obj1.target_name))
            Precision = metrics.precision_score(bunch_obj2.label, predict_dtc, average='weighted')
            Recall = metrics.recall_score(bunch_obj2.label, predict_dtc, average='weighted')
            F1_score = metrics.f1_score(bunch_obj2.label, predict_dtc, average='weighted')
            dtc.append([c_,s_,str(Precision),str(Recall),str(F1_score)])
            print('决策树模型准确率：',Precision)
            print('决策树模型召回率：',Recall)
            print('决策树模型F1值：',F1_score)
            print('\n')
    with open('csv_compare/dtc.csv','w',encoding='utf-8',newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['criterion','splitter','precision','recall','f1_score'])
        for data in dtc:
            writer.writerow(data)
    f.close()

def result_show():
    # 导入训练集
    trainpath = 'tfidf_bunch/train_tfidfspace.dat'
    train_set = readbunchobj(trainpath)
    print('train ready')
    # 导入测试集
    testpath = 'tfidf_bunch/test_tfidfspace.dat'
    test_set = readbunchobj(testpath)
    print('test ready')
    try:
        print("Begin predict")
        
        NB(train_set,test_set)
        print("NB over")
        
        """
        KNN(train_set,test_set)
        print("KNN over")
        """
        """
        RFC(train_set,test_set)
        print('RFC over')
        """
        """
        DTC(train_set,test_set)
        print('DTC over')
        """
    except:
        print('error!')
    print('Finish! Congratulations!')