import numpy as np
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle

# 加载数据集
wine = load_wine()
X, y = wine.data, wine.target
feature_names = wine.feature_names
class_names = wine.target_names

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 1. 实现自己的随机森林
class MyRandomForest:
    def __init__(self, n_estimators=100, max_depth=None, max_features='sqrt', random_state=None):
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.max_features = max_features
        self.random_state = random_state
        self.trees = []
        self.feature_importances_ = None
        np.random.seed(random_state)
        
    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.feature_importances_ = np.zeros(n_features)
        
        # 确定每个树使用的特征数
        if self.max_features == 'sqrt':
            max_features = int(np.sqrt(n_features))
        else:
            max_features = n_features
            
        for _ in range(self.n_estimators):
            # 自助采样
            indices = np.random.choice(n_samples, n_samples, replace=True)
            X_sample = X[indices]
            y_sample = y[indices]
            
            # 特征采样
            feature_indices = np.random.choice(n_features, max_features, replace=False)
            X_sample = X_sample[:, feature_indices]
            
            # 训练决策树
            tree = DecisionTreeClassifier(max_depth=self.max_depth)
            tree.fit(X_sample, y_sample)
            self.trees.append((tree, feature_indices))
            
            # 累计特征重要性
            for i, idx in enumerate(feature_indices):
                self.feature_importances_[idx] += tree.feature_importances_[i]
                
        # 归一化特征重要性
        self.feature_importances_ /= np.sum(self.feature_importances_)
    
    def predict(self, X):
        predictions = np.zeros((len(self.trees), len(X)))
        for i, (tree, feature_indices) in enumerate(self.trees):
            predictions[i] = tree.predict(X[:, feature_indices])
        return np.array([np.bincount(predictions[:, i].astype(int)).argmax() for i in range(len(X))])

# 2. 训练三种模型
# 自己的随机森林
my_rf = MyRandomForest(n_estimators=100, max_depth=3, random_state=42)
my_rf.fit(X_train, y_train)
my_rf_pred = my_rf.predict(X_test)
my_rf_acc = accuracy_score(y_test, my_rf_pred)

# sklearn随机森林
sklearn_rf = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=42)
sklearn_rf.fit(X_train, y_train)
sklearn_rf_pred = sklearn_rf.predict(X_test)
sklearn_rf_acc = accuracy_score(y_test, sklearn_rf_pred)

# 决策树
dt = DecisionTreeClassifier(max_depth=3, random_state=42)
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
dt_acc = accuracy_score(y_test, dt_pred)

# 3. 可视化对比结果
plt.figure(figsize=(12, 6))

# 模型准确率对比
plt.subplot(1, 2, 1)
models = ['My RF', 'Sklearn RF', 'Decision Tree']
accuracies = [my_rf_acc, sklearn_rf_acc, dt_acc]
colors = ['#1f77b4', '#ff7f0e', '#2ca02c']
bars = plt.bar(models, accuracies, color=colors)
for bar in bars:
    height = bar.get_height()
    plt.text(bar.get_x() + bar.get_width()/2., height,
            f'{height:.3f}', ha='center', va='bottom')
plt.title('Model Accuracy Comparison')
plt.ylabel('Accuracy')
plt.ylim(0, 1.1)

# 特征重要性对比
plt.subplot(1, 2, 2)
x = np.arange(len(feature_names))
width = 0.25
plt.bar(x - width, my_rf.feature_importances_, width, label='My RF', color='#1f77b4')
plt.bar(x, sklearn_rf.feature_importances_, width, label='Sklearn RF', color='#ff7f0e')
plt.bar(x + width, dt.feature_importances_, width, label='Decision Tree', color='#2ca02c')
plt.xticks(x, feature_names, rotation=90)
plt.title('Feature Importance Comparison')
plt.legend()
plt.tight_layout()
plt.show()

# 4. 树结构可视化示例（展示第一棵决策树）
plt.figure(figsize=(20, 10))
plt.title("First Tree in My Random Forest", fontsize=16)

# 简化版树结构可视化（实际应用中建议使用sklearn的plot_tree）
def plot_tree_simple(tree, feature_names, class_names, ax, x=0.5, y=0.9, width=0.4, height=0.1):
    if tree.tree_.children_left[0] == -1:  # 叶节点
        value = tree.tree_.value[0]
        class_idx = np.argmax(value)
        ax.add_patch(Rectangle((x-0.05, y-0.02), 0.1, 0.04, color='lightgreen'))
        ax.text(x, y, f"Class: {class_names[class_idx]}\nSamples: {tree.tree_.n_node_samples[0]}", 
                ha='center', va='center', bbox=dict(facecolor='white', alpha=0.8))
    else:  # 决策节点
        feature = feature_names[tree.tree_.feature[0]]
        threshold = tree.tree_.threshold[0]
        ax.add_patch(Rectangle((x-0.05, y-0.02), 0.1, 0.04, color='skyblue'))
        ax.text(x, y, f"{feature} <= {threshold:.2f}\nSamples: {tree.tree_.n_node_samples[0]}", 
                ha='center', va='center', bbox=dict(facecolor='white', alpha=0.8))
        
        # 绘制左右子树
        left_x = x - width/2
        right_x = x + width/2
        new_y = y - height
        
        ax.annotate("", xy=(left_x, new_y), xytext=(x, y), 
                    arrowprops=dict(arrowstyle="->", lw=1.5, color='gray'))
        ax.annotate("", xy=(right_x, new_y), xytext=(x, y), 
                    arrowprops=dict(arrowstyle="->", lw=1.5, color='gray'))
        
        plot_tree_simple(tree, feature_names, class_names, ax, left_x, new_y, width/2, height)
        plot_tree_simple(tree, feature_names, class_names, ax, right_x, new_y, width/2, height)

ax = plt.gca()
ax.axis('off')
first_tree = my_rf.trees[0][0]
plot_tree_simple(first_tree, np.array(feature_names)[my_rf.trees[0][1]], class_names, ax)
plt.show()

# 5. 性能指标表格
from tabulate import tabulate
print("\n模型性能对比:")
print(tabulate([
    ["My Random Forest", my_rf_acc],
    ["Sklearn Random Forest", sklearn_rf_acc],
    ["Decision Tree", dt_acc]
], headers=["Model", "Accuracy"], tablefmt="grid"))