import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler
import matplotlib

# 设置中文字体，防止绘图时出现乱码
matplotlib.rc("font", family="SimHei")  # 使用黑体字体
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题

# 1. 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 目标标签

# 2. 数据预处理：标准化处理，提高SVM的稳定性
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)  # 对特征进行标准化

# 3. 划分训练集和测试集（80%训练集，20%测试集）
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)

# 4. 超参数调优：使用网格搜索找到最优的C和gamma参数
param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1], 'kernel': ['rbf']}
svm = SVC()  # 初始化SVM模型

grid_search = GridSearchCV(svm, param_grid, cv=5, scoring='accuracy', n_jobs=-1)  # 5折交叉验证
grid_search.fit(X_train, y_train)  # 在训练集上进行参数搜索

# 输出最佳超参数及其对应的准确率
print("最佳参数:", grid_search.best_params_)
print("最佳交叉验证得分:", grid_search.best_score_)

# 5. 训练最终模型，使用最优参数
best_model = grid_search.best_estimator_
best_model.fit(X_train, y_train)

# 6. 在测试集上进行预测
y_pred = best_model.predict(X_test)

# 7. 模型评估
accuracy = accuracy_score(y_test, y_pred)  # 计算测试集准确率
print("\n测试集准确率:", accuracy)
print("\n分类报告:\n", classification_report(y_test, y_pred, target_names=iris.target_names))

# 8. 绘制混淆矩阵
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=iris.target_names, yticklabels=iris.target_names)
plt.title('混淆矩阵')
plt.xlabel('预测类别')
plt.ylabel('真实类别')
plt.show()

# 9. 交叉验证评分
cv_scores = cross_val_score(best_model, X_scaled, y, cv=5)  # 5折交叉验证
print("\n5折交叉验证得分:", cv_scores)
print("平均交叉验证得分:", cv_scores.mean())
print("交叉验证得分标准差:", cv_scores.std())

# 10. 可视化决策边界（仅使用前两个特征）
X_2d = X_scaled[:, [0, 1]]  # 仅取前两个特征
y_2d = y
X_train_2d, X_test_2d, y_train_2d, y_test_2d = train_test_split(X_2d, y_2d, test_size=0.2, random_state=42)
svm_2d = SVC(kernel='rbf', C=grid_search.best_params_['C'], gamma=grid_search.best_params_['gamma'])
svm_2d.fit(X_train_2d, y_train_2d)

# 绘制决策边界
h = 0.02  # 网格间隔
x_min, x_max = X_2d[:, 0].min() - 1, X_2d[:, 0].max() + 1
y_min, y_max = X_2d[:, 1].min() - 1, X_2d[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = svm_2d.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

plt.figure(figsize=(10, 8))
plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu, alpha=0.4)  # 画出决策边界
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdYlBu, edgecolor='k')  # 画出数据点
plt.xlabel('花萼长度（标准化）')
plt.ylabel('花萼宽度（标准化）')
plt.title('SVM 决策边界（2D特征）')
plt.colorbar(label='类别')
plt.show()

# 11. 学习曲线，分析样本数量对模型的影响
train_sizes, train_scores, test_scores = [], [], []
for size in np.linspace(0.1, 1.0, 10):  # 采样不同大小的训练集
    n_size = int(size * len(X_train))
    X_train_subset = X_train[:n_size]
    y_train_subset = y_train[:n_size]
    svm_fit = SVC(**grid_search.best_params_).fit(X_train_subset, y_train_subset)
    train_pred = svm_fit.predict(X_train_subset)
    test_pred = svm_fit.predict(X_test)
    train_sizes.append(n_size)
    train_scores.append(accuracy_score(y_train_subset, train_pred))
    test_scores.append(accuracy_score(y_test, test_pred))

# 绘制学习曲线
plt.figure(figsize=(10, 6))
plt.plot(train_sizes, train_scores, label='训练集准确率', marker='o')
plt.plot(train_sizes, test_scores, label='测试集准确率', marker='s')
plt.xlabel('训练样本数')
plt.ylabel('准确率')
plt.title('学习曲线')
plt.legend()
plt.grid(True)
plt.show()
