import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA  # 用于降维以可视化决策边界

# 加载鸢尾花数据集
iris = datasets.load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

# 为了可视化，我们使用PCA将特征降维到2D
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
# 训练决策树分类器
clf = DecisionTreeClassifier(random_state=42)
clf.fit(X_pca, y)

# 绘制整个样本数据集
# 创建画布
plt.figure(figsize=(10, 6))
scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis', edgecolor='k', s=40)
plt.title('Iris Dataset (PCA-reduced features)')
plt.xlabel('PCA Feature 1')
plt.ylabel('PCA Feature 2')
plt.legend(*scatter.legend_elements(), title="Classes")
# plt.show()

# # 绘制决策边界（这是一个简化的过程，只适用于2D数据）
# # 创建网格以评估模型
x_min, x_max = X_pca[:, 0].min() - 1, X_pca[:, 0].max() + 1
y_min, y_max = X_pca[:, 1].min() - 1, X_pca[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
                     np.arange(y_min, y_max, 0.01))
# print(xx,yy)
# # 预测网格中的每个点
# print(np.c_[xx.ravel(), yy.ravel()])
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
#
# # 绘制决策边界和区域
plt.contourf(xx, yy, Z, alpha=0.3, cmap='viridis')
plt.contour(xx, yy, Z, cmap='viridis', linewidths=1)
#
# # 显示图形
plt.show()
# # 可选：绘制决策树
# plt.figure(figsize=(20, 10))
# plot_tree(clf, feature_names=pca.components_.argmax(axis=0).astype(str) + '_pca', class_names=target_names, filled=True)
# plt.show()