import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, ConfusionMatrixDisplay
from sklearn.preprocessing import StandardScaler
from typing import Optional, Union, List, Tuple
import random

# 设置随机种子，确保结果可重现
np.random.seed(42)
random.seed(42)

# 解决中文显示问题
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题


def load_and_preprocess_data(test_size: float = 0.3) -> Tuple[
    np.ndarray, np.ndarray, np.ndarray, np.ndarray, List[str]]:
    """加载葡萄酒数据集并进行预处理"""
    wine = load_wine()
    X, y = wine.data, wine.target
    feature_names = wine.feature_names

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=test_size, stratify=y, random_state=42
    )

    return X_train, X_test, y_train, y_test, feature_names


class MyRandomForest:
    """自己实现的随机森林分类器"""

    def __init__(
            self,
            n_estimators: int = 100,
            max_features: Union[str, int] = 'sqrt',
            max_depth: Optional[int] = None,
            min_samples_split: int = 2,
            bootstrap: bool = True,
            random_state: Optional[int] = None
    ):
        """
        初始化随机森林分类器

        参数:
            n_estimators: 森林中树的数量
            max_features: 每个节点考虑的特征数量 ('sqrt', 'log2', int或None)
            max_depth: 树的最大深度
            min_samples_split: 分裂内部节点所需的最小样本数
            bootstrap: 是否使用自助采样
            random_state: 随机种子
        """
        self.n_estimators = n_estimators
        self.max_features = max_features
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.bootstrap = bootstrap
        self.random_state = random_state
        self.trees = []
        self.feature_importances_ = None

        if random_state is not None:
            np.random.seed(random_state)
            random.seed(random_state)

    def _get_max_features(self, n_features: int) -> int:
        """确定每个节点考虑的最大特征数"""
        if self.max_features == 'sqrt':
            return max(1, int(np.sqrt(n_features)))
        elif self.max_features == 'log2':
            return max(1, int(np.log2(n_features)))
        elif isinstance(self.max_features, int):
            return min(n_features, self.max_features)
        else:  # None
            return n_features

    def fit(self, X: np.ndarray, y: np.ndarray) -> 'MyRandomForest':
        """训练随机森林模型"""
        n_samples, n_features = X.shape
        max_features = self._get_max_features(n_features)

        # 初始化特征重要性数组
        self.feature_importances_ = np.zeros(n_features)

        # 创建并训练每棵树
        for _ in range(self.n_estimators):
            # 自助采样 (bootstrap)
            if self.bootstrap:
                indices = np.random.choice(n_samples, size=n_samples, replace=True)
                X_bootstrap = X[indices]
                y_bootstrap = y[indices]
            else:
                X_bootstrap, y_bootstrap = X, y

            # 随机选择特征
            feature_indices = np.random.choice(n_features, size=max_features, replace=False)
            X_bootstrap_features = X_bootstrap[:, feature_indices]

            # 创建并训练决策树
            tree = DecisionTreeClassifier(
                max_depth=self.max_depth,
                min_samples_split=self.min_samples_split,
                random_state=random.randint(0, 1000)
            )
            tree.fit(X_bootstrap_features, y_bootstrap)

            # 保存树和对应的特征索引
            self.trees.append((tree, feature_indices))

            # 更新特征重要性
            for i, idx in enumerate(feature_indices):
                self.feature_importances_[idx] += tree.feature_importances_[i]

        # 归一化特征重要性
        if self.n_estimators > 0:
            self.feature_importances_ /= self.n_estimators

        return self

    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测样本类别"""
        if not self.trees:
            raise ValueError("模型尚未训练，请先调用fit方法")

        # 收集每棵树的预测结果
        predictions = np.zeros((X.shape[0], self.n_estimators))

        for i, (tree, feature_indices) in enumerate(self.trees):
            X_features = X[:, feature_indices]
            predictions[:, i] = tree.predict(X_features)

        # 多数投票决定最终预测
        final_predictions = np.zeros(X.shape[0], dtype=int)
        for i in range(X.shape[0]):
            final_predictions[i] = np.bincount(predictions[i].astype(int)).argmax()

        return final_predictions

    def predict_proba(self, X: np.ndarray) -> np.ndarray:
        """预测样本属于每个类别的概率"""
        if not self.trees:
            raise ValueError("模型尚未训练，请先调用fit方法")

        n_classes = len(np.unique([tree.classes_ for tree, _ in self.trees]))
        probas = np.zeros((X.shape[0], n_classes))

        for tree, feature_indices in self.trees:
            X_features = X[:, feature_indices]
            tree_proba = tree.predict_proba(X_features)

            # 确保每个树的预测概率形状一致
            if tree_proba.shape[1] < n_classes:
                full_proba = np.zeros((tree_proba.shape[0], n_classes))
                for i, cls in enumerate(tree.classes_):
                    full_proba[:, cls] = tree_proba[:, i]
                probas += full_proba
            else:
                probas += tree_proba

        return probas / self.n_estimators


def evaluate_models(
        X_train: np.ndarray,
        X_test: np.ndarray,
        y_train: np.ndarray,
        y_test: np.ndarray,
        feature_names: List[str],
        target_names: List[str]
) -> None:
    """评估不同模型的性能并可视化结果"""
    # 1. 单个决策树
    dt = DecisionTreeClassifier(random_state=42)
    dt.fit(X_train, y_train)
    dt_pred = dt.predict(X_test)
    dt_accuracy = accuracy_score(y_test, dt_pred)

    # 2. sklearn随机森林
    rf_sklearn = RandomForestClassifier(n_estimators=100, random_state=42)
    rf_sklearn.fit(X_train, y_train)
    rf_sklearn_pred = rf_sklearn.predict(X_test)
    rf_sklearn_accuracy = accuracy_score(y_test, rf_sklearn_pred)

    # 3. 自己实现的随机森林
    rf_mine = MyRandomForest(n_estimators=100, random_state=42)
    rf_mine.fit(X_train, y_train)
    rf_mine_pred = rf_mine.predict(X_test)
    rf_mine_accuracy = accuracy_score(y_test, rf_mine_pred)

    # 打印准确率比较
    print("模型性能比较:")
    print(f"决策树准确率: {dt_accuracy:.4f}")
    print(f"sklearn随机森林准确率: {rf_sklearn_accuracy:.4f}")
    print(f"自己实现的随机森林准确率: {rf_mine_accuracy:.4f}")

    # 打印分类报告
    print("\n决策树分类报告:")
    print(classification_report(y_test, dt_pred, target_names=target_names))

    print("\nsklearn随机森林分类报告:")
    print(classification_report(y_test, rf_sklearn_pred, target_names=target_names))

    print("\n自己实现的随机森林分类报告:")
    print(classification_report(y_test, rf_mine_pred, target_names=target_names))

    # 可视化混淆矩阵
    fig, axes = plt.subplots(1, 3, figsize=(18, 5))

    cm_dt = confusion_matrix(y_test, dt_pred)
    cm_rf_sklearn = confusion_matrix(y_test, rf_sklearn_pred)
    cm_rf_mine = confusion_matrix(y_test, rf_mine_pred)

    ConfusionMatrixDisplay(cm_dt, display_labels=target_names).plot(ax=axes[0], cmap='Blues')
    ConfusionMatrixDisplay(cm_rf_sklearn, display_labels=target_names).plot(ax=axes[1], cmap='Greens')
    ConfusionMatrixDisplay(cm_rf_mine, display_labels=target_names).plot(ax=axes[2], cmap='Oranges')

    axes[0].set_title(f'决策树 (准确率: {dt_accuracy:.4f})')
    axes[1].set_title(f'sklearn随机森林 (准确率: {rf_sklearn_accuracy:.4f})')
    axes[2].set_title(f'自己实现的随机森林 (准确率: {rf_mine_accuracy:.4f})')

    plt.tight_layout()
    plt.savefig('confusion_matrices.png', dpi=300)
    plt.show()

    # 可视化特征重要性
    fig, axes = plt.subplots(1, 3, figsize=(18, 6))

    # 对特征重要性进行排序
    dt_importance_idx = np.argsort(dt.feature_importances_)
    rf_sklearn_importance_idx = np.argsort(rf_sklearn.feature_importances_)
    rf_mine_importance_idx = np.argsort(rf_mine.feature_importances_)

    axes[0].barh(range(len(dt_importance_idx)), dt.feature_importances_[dt_importance_idx])
    axes[0].set_yticks(range(len(dt_importance_idx)))
    axes[0].set_yticklabels([feature_names[i] for i in dt_importance_idx])
    axes[0].set_title('决策树特征重要性')

    axes[1].barh(range(len(rf_sklearn_importance_idx)), rf_sklearn.feature_importances_[rf_sklearn_importance_idx])
    axes[1].set_yticks(range(len(rf_sklearn_importance_idx)))
    axes[1].set_yticklabels([feature_names[i] for i in rf_sklearn_importance_idx])
    axes[1].set_title('sklearn随机森林特征重要性')

    axes[2].barh(range(len(rf_mine_importance_idx)), rf_mine.feature_importances_[rf_mine_importance_idx])
    axes[2].set_yticks(range(len(rf_mine_importance_idx)))
    axes[2].set_yticklabels([feature_names[i] for i in rf_mine_importance_idx])
    axes[2].set_title('自己实现的随机森林特征重要性')

    plt.tight_layout()
    plt.savefig('feature_importance.png', dpi=300)
    plt.show()

    # 交叉验证评估
    print("\n模型交叉验证评估 (5折):")

    dt_cv_scores = cross_val_score(dt, X_train, y_train, cv=5)
    rf_sklearn_cv_scores = cross_val_score(rf_sklearn, X_train, y_train, cv=5)
    rf_mine_cv_scores = cross_val_score(
        rf_mine, X_train, y_train,
        cv=5, scoring='accuracy',
        fit_params=None  # 自定义模型需要设置为None
    )

    print(f"决策树交叉验证准确率: {np.mean(dt_cv_scores):.4f} ± {np.std(dt_cv_scores):.4f}")
    print(f"sklearn随机森林交叉验证准确率: {np.mean(rf_sklearn_cv_scores):.4f} ± {np.std(rf_sklearn_cv_scores):.4f}")
    print(f"自己实现的随机森林交叉验证准确率: {np.mean(rf_mine_cv_scores):.4f} ± {np.std(rf_mine_cv_scores):.4f}")

    return {
        'Decision Tree': dt_accuracy,
        'sklearn Random Forest': rf_sklearn_accuracy,
        'My Random Forest': rf_mine_accuracy
    }


def main():
    """主函数"""
    print("加载和预处理葡萄酒数据集...")
    X_train, X_test, y_train, y_test, feature_names = load_and_preprocess_data(test_size=0.3)

    # 获取目标类别名称
    wine = load_wine()
    target_names = wine.target_names

    print("\n开始评估模型性能...")
    results = evaluate_models(X_train, X_test, y_train, y_test, feature_names, target_names)

    print("\n评估完成!")
    print("模型性能排序 (准确率从高到低):")
    sorted_results = sorted(results.items(), key=lambda x: x[1], reverse=True)
    for model, accuracy in sorted_results:
        print(f"{model}: {accuracy:.4f}")


if __name__ == "__main__":
    main()