from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, label_binarize
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC


def data_preprocessing():
    # 导入手写数据集
    mnist = datasets.load_digits()
    # 拆分数据与标签
    X, y = mnist.data, mnist.target
    # print(X.shape)
    random_state = np.random.RandomState(0)
    n_samples, n_features = X.shape
    X = np.c_[X, random_state.randn(n_samples, 10 * n_features)]
    # print(X.shape)
    # 数据标准化
    X = StandardScaler().fit_transform(X)
    # print(X.shape)
    # one-hot编码,是一种常用的编码方式
    y = label_binarize(y, classes=np.unique(y))
    # print(y[:10, :])
    # 划分数据集  shuffle表示打乱数据顺序，stratify表示分层采样
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0,
                                                        shuffle=True, stratify=y)
    return X_train, X_test, y_train, y_test


def train_model(model, X_train, X_test, y_train, y_test):
    # 由于使用了one-hot编码，这里也要使用对应的模型
    clf = OneVsRestClassifier(model)
    clf.fit(X_train, y_train)
    y_score = clf.decision_function(X_test)
    return y_score
    # print(y_score)


def micro_PR(y_score):
    # 对每一个类别计算性能指标
    precision = dict()
    recall = dict()
    average_precision = dict()
    # .shape会返回一个元组，存储行和列，取第二个数，也就是列
    n_classes = y_score.shape[1]
    for i in range(n_classes):
        precision[i], recall[i], _ = precision_recall_curve(y_test[:, i], y_score[:, i])
        average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
    # .ravel()  将一个矩阵进行平展操作
    precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(), y_score.ravel())
    average_precision["micro"] = average_precision_score(y_test, y_score, average="micro")
    return precision, recall, average_precision


def plt_show(precision, recall, average_precision, model_name):
    # 绘制P-R曲线, 使用阶梯图
    label = model_name + "AP = {0:0.2f}".format(average_precision["micro"])
    plt.step(recall["micro"], precision["micro"], where='post', lw=2, label=label)


if __name__ == "__main__":
    # 新建画布
    plt.figure("P-R曲线")
    X_train, X_test, y_train, y_test = data_preprocessing()
    # 逻辑回归
    y_score = train_model(LogisticRegression(), X_train, X_test, y_train, y_test)
    precision, recall, average_precision = micro_PR(y_score)
    plt_show(precision, recall, average_precision, "LogisticRegression")
    # SVM
    y_score = train_model(SVC(), X_train, X_test, y_train, y_test)
    precision, recall, average_precision = micro_PR(y_score)
    plt_show(precision, recall, average_precision, "SVM")
    # 线性判别分析
    y_score = train_model(LinearDiscriminantAnalysis(), X_train, X_test, y_train, y_test)
    precision, recall, average_precision = micro_PR(y_score)
    plt_show(precision, recall, average_precision, "LinearDiscriminantAnalysis")
    plt.xlabel("Recall")
    plt.ylabel("Precision")
    plt.grid()
    plt.plot([0, 1.05], [0, 1.05], color="navy", ls="--")
    plt.legend(fontsize=8)
    plt.xlim(0, 1.05)
    plt.ylim(0, 1.05)
    plt.title("Average precision score, micro-average over all classes")

    plt.show()
