import joblib
from imblearn.over_sampling import RandomOverSampler
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn import tree
import numpy as np
from sklearn.model_selection import KFold, cross_val_score
from sklearn.ensemble import GradientBoostingClassifier
from utils import draw
from sklearn.model_selection import train_test_split

def decision_tree_train(datasets,labels,max_depth):
    # 训练集和测试集划分
    # X_train = datasets[:115]  # 第0到115个数
    # y_train = labels[:115]
    # X_test = datasets[90:]  # 第90到最后一个数
    # y_test = labels[90:]

    X_train, X_test, y_train, y_test = train_test_split(datasets, labels, test_size=0.2, random_state=40)

    #过采样
    oversampler = RandomOverSampler()
    X_train_oversampled, y_train_oversampled = oversampler.fit_resample(X_train, y_train)

    # 决策树分类器

    #以下参数选择均是基于PDE数据集
    #特征选择标准,entropy比gini平均准确率更高
    #特征划分点选择标准,random比best更好，random适合样本量较大
    #通过折线图观察到max_depth=2时准确率较高 PS:若选择best和entropy则max_depth设置为1时更加稳定，且效果更好
    #多数情况下交叉验证得到max_depth=2时效果较好
    clf = tree.DecisionTreeClassifier(max_depth=max_depth,criterion='entropy',splitter='best')  # 实例化
    clf = clf.fit(X_train_oversampled, y_train_oversampled)  # 用训练集数据训练模型
    # result = clf.score(X_test, y_test)  # 导入测试集，从接口中调用需要的信息

    joblib.dump(clf, "../files/decision.pkl")
    y_predict = clf.predict(X_test)  # 使用分类器对测试集进行预测

    auc = metrics.accuracy_score(y_test, y_predict)
    macro = metrics.precision_score(y_test, y_predict, average='macro')
    micro = metrics.precision_score(y_test, y_predict, average='micro')
    macro_recall = metrics.recall_score(y_test, y_predict, average='macro')
    weighted = metrics.f1_score(y_test, y_predict, average='weighted')
    print('准确率:', auc)  # 预测准确率输出
    print('宏平均精确率:', macro)  # 预测宏平均精确率输出
    print('微平均精确率:', micro)  # 预测微平均精确率输出
    print('宏平均召回率:', macro_recall)  # 预测宏平均召回率输出
    print('平均F1-score:', weighted)  # 预测平均f1-score输出
    print('混淆矩阵输出:\n', metrics.confusion_matrix(y_test, y_predict))  # 混淆矩阵输出
    print('分类报告:', metrics.classification_report(y_test, y_predict))  # 分类报告输出
    # draw.plot_roc(y_test, y_predict, auc, macro, macro_recall, weighted)  # 绘制ROC曲线并求出AUC值

    return auc, weighted  # 返回准确率与召回率以记录

def cal_depth_score(datasets,labels):
    #交叉验证计算决策树高度得分

    results = {}
    # 遍历不同的决策树高度
    for height in range(0,10):
        # 创建决策树分类器
        classifier = tree.DecisionTreeClassifier(max_depth=height+1)

        # 设置交叉验证
        kfold = KFold(n_splits=10, shuffle=True, random_state=42)

        # 进行交叉验证并计算平均得分
        scores = cross_val_score(classifier, datasets, labels, cv=kfold)
        avg_score = scores.mean()

        # 保存结果
        results[height] = avg_score

    score_list = []
    # 打印每个决策树高度的交叉验证得分
    for height, score in results.items():
        print(f"决策树高度 {height+1}: 平均得分 {score}")
        score_list.append(score)

    return score_list



def decision_tree_test(datasets,labels):
    X_test = datasets[:]
    clf0 = joblib.load("./files/decision.pkl")
    y_predict = clf0.predict(X_test)  # 使用分类器对测试集进行预测
    np.savetxt('./files/decision_result.txt', y_predict)

    # 计算预测正确的百分比并打印
    accuracy = np.mean(y_predict == labels) * 100
    print(f"预测正确的百分比: {accuracy}%")


def gbdt_train(datasets,labels):
    # 训练集和测试集划分
    X_train = datasets[:115]  # 第0到115个数
    y_train = labels[:115]
    X_test = datasets[90:]  # 第90到最后一个数
    y_test = labels[90:]

    #过采样
    oversampler = RandomOverSampler()
    X_train_oversampled, y_train_oversampled = oversampler.fit_resample(X_train, y_train)

    clf = GradientBoostingClassifier()
    clf.fit(X_train_oversampled,y_train_oversampled)

    joblib.dump(clf, "./files/gbdt.pkl")
    y_predict = clf.predict(X_test)  # 使用分类器对测试集进行预测

    auc = metrics.accuracy_score(y_test, y_predict)
    macro = metrics.precision_score(y_test, y_predict, average='macro')
    micro = metrics.precision_score(y_test, y_predict, average='micro')
    macro_recall = metrics.recall_score(y_test, y_predict, average='macro')
    weighted = metrics.f1_score(y_test, y_predict, average='weighted')
    print('准确率:', auc)  # 预测准确率输出
    print('宏平均精确率:', macro)  # 预测宏平均精确率输出
    print('微平均精确率:', micro)  # 预测微平均精确率输出
    print('宏平均召回率:', macro_recall)  # 预测宏平均召回率输出
    print('平均F1-score:', weighted)  # 预测平均f1-score输出
    print('混淆矩阵输出:\n', metrics.confusion_matrix(y_test, y_predict))  # 混淆矩阵输出
    print('分类报告:', metrics.classification_report(y_test, y_predict))  # 分类报告输出
    draw.plot_roc(y_test, y_predict, auc, macro, macro_recall, weighted)  # 绘制ROC曲线并求出AUC值
    return auc, weighted  # 返回准确率与召回率以记录

def gbdt_test(datasets,labels):
    X_test = datasets[:]
    clf0 = joblib.load("./files/gbdt.pkl")
    y_predict = clf0.predict(X_test)  # 使用分类器对测试集进行预测
    np.savetxt('./files/gbdt_result.txt', y_predict)

    # 计算预测正确的百分比并打印
    accuracy = np.mean(y_predict == labels) * 100
    print(f"预测正确的百分比: {accuracy}%")