import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier
from data_format import x_train, y_train, x_cross, y_cross, x_test, y_test
import matplotlib
from sklearn.model_selection import cross_val_score

matplotlib.rcParams['font.sans-serif'] = ['KaiTi']
from sklearn.preprocessing import StandardScaler  # 特征缩放
import joblib   # 保存模型
from test_result_show import plot_roc, plot_roc_knn, pie_chart
from data_format import x_train, y_train, x_cross, y_cross, x_test, y_test
import warnings

warnings.filterwarnings('ignore')  # "error", "ignore", "always", "default", "module" or "once"
import numpy as np
from sklearn import svm


def knn_train(x_train, y_train, x_cross, y_cross, x_test, y_test):
    # 特征缩放
    x_train = (x_train - np.min(x_train, axis=0)) / (np.max(x_train, axis=0) - np.min(x_train, axis=0))
    x_test = (x_test - np.min(x_test, axis=0)) / (np.max(x_test, axis=0) - np.min(x_test, axis=0))
    k_num = []
    scores = []

    best_K = 1
    best_accurancy = 0
    k_list = []
    accurancy_list = []

    # knn classifier
    for k in range(1, 100, 2):
        # 使用训练集训练模型
        classfier = KNeighborsClassifier(n_neighbors=k)
        classfier.fit(x_train, y_train)

        # 使用交叉验证集评估预测
        y_pred = classfier.predict(x_cross)
        print(classification_report(y_cross, y_pred))

        count = 0
        y_pred_list = y_pred.tolist()
        y_cross_list = y_cross.tolist()
        for i in range(0, len(y_cross)):
            if y_pred_list[i] == y_cross_list[i]:
                count += 1
        if (1.0 * count / len(y_pred)) > best_accurancy:
            best_K = k
            best_accurancy = count / len(y_pred)

        k_list.append(k)
        accurancy_list.append((1.0 * count / len(y_pred)))

    print('best_C:{}'.format(best_K))
    print('best_accurancy:{}'.format(best_accurancy))

    # 将最佳模型保存
    classfier = KNeighborsClassifier(n_neighbors=1)
    classfier.fit(x_train, y_train)
    jlb.dump(classfier, './models/knn.pkl')

    return k_list, accurancy_list


# 绘制k 和 accurancy 的变化曲线
def show_change_knn(x, y):
    plt.plot(x, y, c='g', label='accurancy')
    plt.xlabel('K')
    plt.ylabel('Accuracy')
    plt.title('K近邻与准确率的关系')
    plt.legend()
    plt.show()


def KNN_result(x_train, y_train, x_test, y_test):
    # 加载模型
    classfier = joblib.load('./models/knn.pkl')
    # 对测试集进行预测
    y_pred = classfier.predict(x_test)

    # 画出饼状图
    pie_chart(y_pred, 'K近邻预测模型(KNN)')

    # plot_roc_knn(x_test,y_test,y_pred,classfier,'K近邻预测模型(KNN)')  # 绘制ROC曲线并求出AUC值 以及 评估预测
    draw_roc(x_train, y_train, x_test, y_test)

    print('y_pred:{}'.format(y_pred))


def draw_roc(x_train, y_train, x_test, y_test):
    random_state = np.random.RandomState(0)
    svm_new = svm.SVC(kernel='linear', probability=True, random_state=random_state)

    ###通过decision_function()计算得到的y_score的值，用在roc_curve()函数中
    y_score = svm_new.fit(x_train, y_train).decision_function(x_test)

    # Compute ROC curve and ROC area for each class
    fpr, tpr, threshold = roc_curve(y_test, y_score)  ###计算真阳性率和假阳性率
    roc_auc = auc(fpr, tpr)  ###计算auc的值

    plt.figure()
    lw = 2
    plt.figure(figsize=(10, 10))
    plt.plot(fpr, tpr, color='darkorange',
             lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)  ###假正率为横坐标，真正率为纵坐标做曲线
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic example')
    plt.legend(loc="lower right")
    plt.show()


if __name__ == '__main__':
     c_list, accurancy_list = knn_train(x_train,y_train,x_cross,y_cross,x_test,y_test)
     show_change_knn(c_list,accurancy_list)
     KNN_result(x_train, y_train, x_test, y_test)
