import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay

plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']  # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 1. 加载数据集
iris = load_iris()
X = iris.data  # 特征矩阵 (150个样本, 4个特征)
y = iris.target  # 目标变量 (0: setosa, 1: versicolor, 2: virginica)
feature_names = iris.feature_names
class_names = iris.target_names

print("数据集信息:")
print(f"特征: {feature_names}")
print(f"类别: {class_names}")
print(f"数据形状: {X.shape}, 目标形状: {y.shape}")

# 2. 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

print("\n训练集大小:", X_train.shape)
print("测试集大小:", X_test.shape)

# 3. 创建并训练CART分类器
# 使用默认参数（不剪枝）
cart_classifier = DecisionTreeClassifier(random_state=42)
cart_classifier.fit(X_train, y_train)

# 4. 评估模型
y_pred = cart_classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("\n未剪枝模型测试准确率:", accuracy)

# 5. 可视化决策树
plt.figure(figsize=(20, 10))
plot_tree(cart_classifier, 
          feature_names=feature_names, 
          class_names=class_names,
          filled=True, 
          rounded=True)
plt.title("未剪枝的CART决策树")
#plt.savefig('cart_unpruned.png', dpi=300)
plt.show()

# 6. 使用代价复杂度剪枝（CCP）
# 计算不同alpha对应的子树
path = cart_classifier.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas, impurities = path.ccp_alphas, path.impurities

print("\n有效的alpha值数量:", len(ccp_alphas))

# 为每个alpha训练一个子树
classifiers = []
for ccp_alpha in ccp_alphas:
    clf = DecisionTreeClassifier(random_state=42, ccp_alpha=ccp_alpha)
    clf.fit(X_train, y_train)
    classifiers.append(clf)

# 7. 评估不同alpha值对应的模型性能
train_acc = []
test_acc = []
for clf in classifiers:
    y_train_pred = clf.predict(X_train)
    y_test_pred = clf.predict(X_test)
    train_acc.append(accuracy_score(y_train, y_train_pred))
    test_acc.append(accuracy_score(y_test, y_test_pred))

# 绘制准确率随alpha变化图
plt.figure(figsize=(12, 6))
plt.plot(ccp_alphas, train_acc, 'o-', label="训练集准确率")
plt.plot(ccp_alphas, test_acc, 's-', label="测试集准确率")
plt.xlabel("alpha (剪枝强度)")
plt.ylabel("准确率")
plt.title("准确率 vs. alpha")
plt.legend()
plt.grid(True)
#plt.savefig('accuracy_vs_alpha.png', dpi=300)
plt.show()

# 8. 使用交叉验证选择最佳alpha
# 使用5折交叉验证
alpha_scores = []
for ccp_alpha in ccp_alphas:
    clf = DecisionTreeClassifier(random_state=42, ccp_alpha=ccp_alpha)
    scores = cross_val_score(clf, X_train, y_train, cv=5)
    alpha_scores.append(np.mean(scores))

# 找到最佳alpha
best_alpha_index = np.argmax(alpha_scores)
best_alpha = ccp_alphas[best_alpha_index]

print(f"\n最佳alpha值: {best_alpha:.6f}")
print(f"交叉验证平均准确率: {alpha_scores[best_alpha_index]:.4f}")

# 9. 使用最佳alpha训练最终模型
best_clf = DecisionTreeClassifier(random_state=42, ccp_alpha=best_alpha)
best_clf.fit(X_train, y_train)

# 10. 评估最终模型
y_train_pred = best_clf.predict(X_train)
y_test_pred = best_clf.predict(X_test)
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)

print("\n==== 最终模型性能 ====")
print(f"训练集准确率: {train_accuracy:.4f}")
print(f"测试集准确率: {test_accuracy:.4f}")

# 11. 可视化剪枝后的决策树
plt.figure(figsize=(12, 8))
plot_tree(best_clf, 
          feature_names=feature_names, 
          class_names=class_names,
          filled=True, 
          rounded=True)
plt.title(f"剪枝后的CART决策树 (alpha={best_alpha:.6f})")
#plt.savefig('cart_pruned.png', dpi=300)
plt.show()

# 12. 绘制混淆矩阵
cm = confusion_matrix(y_test, y_test_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=class_names)
disp.plot(cmap=plt.cm.Blues)
plt.title("测试集混淆矩阵")
#plt.savefig('confusion_matrix.png', dpi=300)
plt.show()

# 13. 特征重要性分析
importances = best_clf.feature_importances_
indices = np.argsort(importances)[::-1]

print("\n特征重要性:")
for f in range(X_train.shape[1]):
    print(f"{feature_names[indices[f]]}: {importances[indices[f]]:.4f}")

# 绘制特征重要性图
plt.figure(figsize=(10, 6))
plt.title("特征重要性")
plt.bar(range(X_train.shape[1]), importances[indices], align="center")
plt.xticks(range(X_train.shape[1]), [feature_names[i] for i in indices])
plt.ylabel("重要性得分")
plt.tight_layout()
#plt.savefig('feature_importance.png', dpi=300)
plt.show()