
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from sklearn.preprocessing import label_binarize

# 加载鸢尾花数据集
iris = datasets.load_iris()
X = iris.data
y = iris.target

# 将标签二值化
y_bin = label_binarize(y, classes=[0, 1, 2])
n_classes = y_bin.shape[1]

# 存储性能度量结果
all_accuracies = []
all_precisions = []
all_recalls = []
all_f1s = []
all_conf_mats = []

# 定义重复实验的次数
n_experiments = 5

# 进行多次实验
for i in range(n_experiments):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=i)

    # 二值化训练集和测试集的目标
    y_train_bin = label_binarize(y_train, classes=[0, 1, 2])
    y_test_bin = label_binarize(y_test, classes=[0, 1, 2])

    # 创建 SVC 分类器
    clf = SVC(probability=True, random_state=i)

    # 训练模型
    clf.fit(X_train, y_train)

    # 预测测试集
    y_pred = clf.predict(X_test)
    y_score = clf.decision_function(X_test)

    # 计算性能度量
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred, average=\'macro\')
    recall = recall_score(y_test, y_pred, average=\'macro\')
    f1 = f1_score(y_test, y_pred, average=\'macro\')

    # 保存性能度量结果
    all_accuracies.append(accuracy)
    all_precisions.append(precision)
    all_recalls.append(recall)
    all_f1s.append(f1)
    all_conf_mats.append(confusion_matrix(y_test, y_pred))

    # 打印性能度量结果
    print(f\'Experiment {i + 1}:\')
    print(f\'Accuracy: {accuracy:.4f}\')
    print(f\'Precision: {precision:.4f}\')
    print(f\'Recall: {recall:.4f}\')
    print(f\'F1 Score: {f1:.4f}\')
    print(\'Confusion Matrix:\')
    print(confusion_matrix(y_test, y_pred))
    print(\'\\n\')

# 计算平均性能度量
average_accuracy = np.mean(all_accuracies)
average_precision = np.mean(all_precisions)
average_recall = np.mean(all_recalls)
average_f1 = np.mean(all_f1s)

# 打印平均性能度量结果
print(f\'Average Accuracy: {average_accuracy:.4f}\')
print(f\'Average Precision: {average_precision:.4f}\')
print(f\'Average Recall: {average_recall:.4f}\')
print(f\'Average F1 Score: {average_f1:.4f}\')

# 绘制平均 ROC-AUC 曲线
plt.figure()
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
    fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_score[:, i])
    roc_auc[i] = auc(fpr[i], tpr[i])

plt.plot(fpr[0], tpr[0], label=\'Class 0 (AUC = {:.2f})\'.format(roc_auc[0]))
plt.plot(fpr[1], tpr[1], label=\'Class 1 (AUC = {:.2f})\'.format(roc_auc[1]))
plt.plot(fpr[2], tpr[2], label=\'Class 2 (AUC = {:.2f})\'.format(roc_auc[2]))
plt.plot([0, 1], [0, 1], \'k--\')
plt.xlabel(\'False Positive Rate\')
plt.ylabel(\'True Positive Rate\')
plt.title(\'ROC Curve\')
plt.legend(loc=\

