# -*- coding: utf-8 -*-

from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
from data_format import x_train,y_train,x_cross,y_cross,x_test,y_test
from sklearn.preprocessing import StandardScaler  #特征缩放
import joblib as  jlb # 保存模型
#设置字体为楷体
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['KaiTi']
from test_result_show import plot_roc_random_forest,pie_chart
import warnings
warnings.filterwarnings('ignore')  # "error", "ignore", "always", "default", "module" or "once"

def random_forest_train(x_train,y_train,x_cross,y_cross,x_test,y_test):
    # 特征缩放
    sc = StandardScaler()
    x_train = sc.fit_transform(x_train)
    x_cross = sc.fit_transform(x_cross)
    x_test = sc.fit_transform(x_test)


    # C是对原始数据集进行有放回抽样生成的子数据集个数  # max_feature
    best_C = 1
    best_accurancy = 0
    c_list = []
    accurancy_list = []
    # 随机森林分类器
    for c in range(1, 101, 1):
        #使用训练集训练模型
        classfier = RandomForestClassifier(n_estimators=c, random_state=10)
        classfier.fit(x_train, y_train)

        # 使用交叉验证集评估预测
        y_pred = classfier.predict(x_cross)
        # print(classification_report(y_cross, y_pred))

        count = 0
        y_pred_list = y_pred.tolist()
        y_cross_list = y_cross.tolist()
        for i in range(0, len(y_cross)):
            if y_pred_list[i] == y_cross_list[i]:
                count += 1
        if (1.0 * count / len(y_pred)) > best_accurancy:
            best_accurancy = count / len(y_pred)
            best_C = c

        c_list.append(c)
        accurancy_list.append(count / len(y_pred))


    # 对 max_feature 进行调参
    best_feature = 1
    best_accurancy_feature = 0
    feature_list = []
    accurancy_feature_list = []
    # 随机森林分类器
    for c in range(1, 11, 1):
        # 使用训练集训练模型
        classfier = RandomForestClassifier(n_estimators=best_C, max_features= c ,random_state=10)
        classfier.fit(x_train, y_train)

        # 使用交叉验证集评估预测
        y_pred = classfier.predict(x_cross)

        count = 0
        y_pred_list = y_pred.tolist()
        y_cross_list = y_cross.tolist()
        for i in range(0, len(y_cross)):
            if y_pred_list[i] == y_cross_list[i]:
                count += 1
        if (1.0 * count / len(y_pred)) > best_accurancy:
            best_feature = count / len(y_pred)
            best_accurancy_feature = c

        feature_list.append(c)
        accurancy_feature_list.append(count / len(y_pred))

    print('best_C:{}'.format(best_C))
    print('best_accurancy:{}'.format(best_accurancy))

    print('best_feature:{}'.format(best_feature))
    print('best_accurancy_feature:{}'.format(best_accurancy_feature))


    # 将最佳模型保存
    classfier = RandomForestClassifier(n_estimators=best_C, max_features=best_feature  ,random_state=10)
    classfier.fit(x_train, y_train)
    jlb.dump(classfier, './models/random_forest.pkl')

    return c_list, accurancy_list

# 绘制c 和 accurancy 的变化曲线
def show_change_random_forest(x, y):
    plt.plot(x, y, c='g', label='accurancy')
    plt.xlabel('C')
    plt.ylabel('accurancy')
    plt.title('决策树的个数与准确率的关系')
    plt.legend()
    plt.show()


def RF_result(x_test,y_test):
    # 加载模型
    classfier = jlb.load('./models/random_forest.pkl')

    # 对测试集进行预测
    y_pred = classfier.predict(x_test)

    # 画出饼状图
    pie_chart(y_pred,'随机森林模型(RF)')


    plot_roc_random_forest(x_test,y_test,y_pred,classfier,'随机森林模型(RF)')  # 绘制ROC曲线并求出AUC值 以及 评估预测

    print('y_pred:{}'.format(y_pred))



if __name__ == '__main__':
    # c_list, accurancy_list = random_forest_train(x_train,y_train,x_cross,y_cross,x_test,y_test)
    # show_change_random_forest(c_list,accurancy_list)
    RF_result(x_test,y_test)

