import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt

# 加载数据集
data = load_breast_cancer()
X = pd.DataFrame(data.data, columns=data.feature_names)
y = pd.Series(data.target)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 决策树特征选择策略1：基于信息增益的特征选择
dt1 = DecisionTreeClassifier(criterion='entropy')
dt1.fit(X_train, y_train)

# 决策树特征选择策略2：基于基尼不纯度的特征选择
dt2 = DecisionTreeClassifier(criterion='gini')
dt2.fit(X_train, y_train)

# 可视化决策树
plt.figure(figsize=(10, 6))
plot_tree(dt1, feature_names=data.feature_names, filled=True)
plt.show()

plt.figure(figsize=(10, 6))
plot_tree(dt2, feature_names=data.feature_names, filled=True)
plt.show()

# 随机森林模型预测
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)

# 特征重要性排序
importance = rf.feature_importances_
indices = np.argsort(importance)[::-1]
sorted_features = X.columns[indices]

# 可视化特征重要性
plt.figure()
plt.title("Feature Importance")
plt.bar(range(X.shape[1]), importance[indices], align='center')
plt.xticks(range(X.shape[1]), sorted_features, rotation=90)
plt.tight_layout()
plt.show()

# 分析预测结果
dt1_accuracy = dt1.score(X_test, y_test)
dt2_accuracy = dt2.score(X_test, y_test)
rf_accuracy = rf.score(X_test, y_test)

print("决策树（基于信息增益）准确率：", dt1_accuracy)
print("决策树（基于基尼不纯度）准确率：", dt2_accuracy)
print("随机森林准确率：", rf_accuracy)