import numpy as np
import matplotlib.pyplot as plt

# 设置随机种子以确保结果可复现
np.random.seed(42)


class DecisionTree:
    """决策树类"""

    def __init__(self, max_depth=10, min_samples_split=2, min_samples_leaf=1):
        self.max_depth = max_depth  # 最大深度
        self.min_samples_split = min_samples_split  # 最小分割样本数
        self.min_samples_leaf = min_samples_leaf  # 叶子节点最小样本数
        self.tree = None  # 存储决策树结构

    def _gini_impurity(self, y):
        """计算基尼不纯度"""
        if len(y) == 0:
            return 0
        classes, counts = np.unique(y, return_counts=True)
        probabilities = counts / len(y)
        return 1 - np.sum(probabilities ** 2)

    def _best_split(self, X, y):
        """寻找最佳分割点"""
        best_gini = float('inf')
        best_feature = None
        best_threshold = None

        n_features = X.shape[1]

        # 遍历所有特征
        for feature_idx in range(n_features):
            feature_values = X[:, feature_idx]
            thresholds = np.unique(feature_values)

            # 遍历所有可能的阈值
            for threshold in thresholds:
                # 根据阈值分割数据
                left_mask = feature_values <= threshold
                right_mask = ~left_mask

                if np.sum(left_mask) == 0 or np.sum(right_mask) == 0:
                    continue

                # 计算分割后的加权基尼不纯度
                left_gini = self._gini_impurity(y[left_mask])
                right_gini = self._gini_impurity(y[right_mask])

                n_left = np.sum(left_mask)
                n_right = np.sum(right_mask)
                n_total = len(y)

                weighted_gini = (n_left / n_total) * left_gini + (n_right / n_total) * right_gini

                # 更新最佳分割
                if weighted_gini < best_gini:
                    best_gini = weighted_gini
                    best_feature = feature_idx
                    best_threshold = threshold

        return best_feature, best_threshold

    def _build_tree(self, X, y, depth=0):
        """递归构建决策树"""
        n_samples, n_features = X.shape
        n_classes = len(np.unique(y))

        # 停止条件
        if (depth >= self.max_depth or
                n_samples < self.min_samples_split or
                n_classes == 1):
            # 返回叶子节点（众数）
            leaf_value = np.bincount(y.astype(int)).argmax()
            return leaf_value

        # 寻找最佳分割
        best_feature, best_threshold = self._best_split(X, y)

        if best_feature is None:
            # 无法找到好的分割，返回叶子节点
            leaf_value = np.bincount(y.astype(int)).argmax()
            return leaf_value

        # 分割数据
        left_mask = X[:, best_feature] <= best_threshold
        right_mask = ~left_mask

        # 检查分割是否有效
        if np.sum(left_mask) < self.min_samples_leaf or np.sum(right_mask) < self.min_samples_leaf:
            leaf_value = np.bincount(y.astype(int)).argmax()
            return leaf_value

        # 创建节点
        node = {
            'feature': best_feature,
            'threshold': best_threshold,
            'left': self._build_tree(X[left_mask], y[left_mask], depth + 1),
            'right': self._build_tree(X[right_mask], y[right_mask], depth + 1)
        }

        return node

    def fit(self, X, y):
        """训练决策树"""
        self.tree = self._build_tree(X, y)

    def _predict_sample(self, sample):
        """预测单个样本"""
        node = self.tree

        while isinstance(node, dict):
            if sample[node['feature']] <= node['threshold']:
                node = node['left']
            else:
                node = node['right']

        return node

    def predict(self, X):
        """预测多个样本"""
        return np.array([self._predict_sample(sample) for sample in X])


class RandomForest:
    """随机森林类"""

    def __init__(self, n_trees=10, max_depth=10, min_samples_split=2,
                 min_samples_leaf=1, max_features='sqrt', bootstrap=True):
        self.n_trees = n_trees  # 树的数量
        self.max_depth = max_depth  # 最大深度
        self.min_samples_split = min_samples_split  # 最小分割样本数
        self.min_samples_leaf = min_samples_leaf  # 叶子节点最小样本数
        self.max_features = max_features  # 每次分割考虑的特征数  sqrt log2
        self.bootstrap = bootstrap  # 是否使用Bootstrap采样
        self.trees = []  # 存储所有决策树
        self.feature_indices = []  # 存储每棵树使用的特征索引

    def _bootstrap_sample(self, X, y):
        """Bootstrap采样
        核心思想是通过有放回抽样
        从原始样本中生成多个子样本，然后对这些子样本进行统计分析，以估计总体的统计特性。
        """
        n_samples = X.shape[0]
        indices = np.random.choice(n_samples, n_samples, replace=True)
        return X[indices], y[indices]

    def _get_random_features(self, n_features):
        """随机选择特征"""
        if self.max_features == 'sqrt':
            max_features = int(np.sqrt(n_features))
        elif self.max_features == 'log2':
            max_features = int(np.log2(n_features))
        elif isinstance(self.max_features, int):
            max_features = self.max_features
        else:
            max_features = n_features

        max_features = min(max_features, n_features)
        return np.random.choice(n_features, max_features, replace=False)

    def fit(self, X, y):
        """训练随机森林"""
        self.trees = []
        self.feature_indices = []

        for i in range(self.n_trees):
            # 创建决策树
            tree = DecisionTree(
                max_depth=self.max_depth,
                min_samples_split=self.min_samples_split,
                min_samples_leaf=self.min_samples_leaf
            )

            # Bootstrap采样
            if self.bootstrap:
                X_sample, y_sample = self._bootstrap_sample(X, y)
            else:
                X_sample, y_sample = X, y

            # 随机选择特征
            feature_indices = self._get_random_features(X.shape[1])
            X_subset = X_sample[:, feature_indices]

            # 训练树
            tree.fit(X_subset, y_sample)

            # 存储树和特征索引
            self.trees.append(tree)
            self.feature_indices.append(feature_indices)

    def predict(self, X):
        """预测"""
        # 获取每棵树的预测结果
        tree_predictions = []

        for i, tree in enumerate(self.trees):
            # 使用对应的特征子集
            X_subset = X[:, self.feature_indices[i]]
            predictions = tree.predict(X_subset)
            tree_predictions.append(predictions)

        # 转换为numpy数组
        tree_predictions = np.array(tree_predictions)

        # 投票决定最终预测结果
        final_predictions = []
        for j in range(X.shape[0]):
            votes = tree_predictions[:, j]
            # 获取众数
            unique_votes, counts = np.unique(votes, return_counts=True)
            final_prediction = unique_votes[np.argmax(counts)]
            final_predictions.append(final_prediction)

        return np.array(final_predictions)

    def predict_proba(self, X):
        """预测概率"""
        # 获取每棵树的预测结果
        tree_predictions = []

        for i, tree in enumerate(self.trees):
            X_subset = X[:, self.feature_indices[i]]
            predictions = tree.predict(X_subset)
            tree_predictions.append(predictions)

        tree_predictions = np.array(tree_predictions)

        # 计算每个类别的概率
        n_classes = len(np.unique(np.concatenate(tree_predictions)))
        probabilities = []

        for j in range(X.shape[0]):
            votes = tree_predictions[:, j]
            class_probs = np.zeros(n_classes)

            for class_label in range(n_classes):
                class_probs[class_label] = np.sum(votes == class_label) / len(votes)

            probabilities.append(class_probs)

        return np.array(probabilities)


def generate_dataset():
    """生成3个类别的数据集，每个类别100个样本
    每个类别有两个特征值
    """
    np.random.seed(42)

    # 类别0：围绕(2, 2)的高斯分布
    class0_x1 = np.random.normal(2, 1, 100)
    class0_x2 = np.random.normal(2, 1, 100)
    class0_labels = np.zeros(100)

    # 类别1：围绕(6, 6)的高斯分布
    class1_x1 = np.random.normal(6, 1, 100)
    class1_x2 = np.random.normal(6, 1, 100)
    class1_labels = np.ones(100)

    # 类别2：围绕(2, 6)的高斯分布
    class2_x1 = np.random.normal(2, 1, 100)
    class2_x2 = np.random.normal(6, 1, 100)
    class2_labels = np.full(100, 2)

    # 合并所有数据
    X = np.column_stack([
        np.concatenate([class0_x1, class1_x1, class2_x1]),
        np.concatenate([class0_x2, class1_x2, class2_x2])
    ])

    y = np.concatenate([class0_labels, class1_labels, class2_labels])

    return X, y


def train_test_split(X, y, test_size=0.3):
    """划分训练集和测试集"""
    n_samples = len(X)
    n_test = int(n_samples * test_size)

    # 随机选择测试集索引
    test_indices = np.random.choice(n_samples, n_test, replace=False)
    train_indices = np.array([i for i in range(n_samples) if i not in test_indices])

    X_train, X_test = X[train_indices], X[test_indices]
    y_train, y_test = y[train_indices], y[test_indices]

    return X_train, X_test, y_train, y_test


def calculate_accuracy(y_true, y_pred):
    """计算准确率"""
    return np.mean(y_true == y_pred)


# 主程序
if __name__ == "__main__":
    print("开始生成数据集...")
    # 生成数据集
    X, y = generate_dataset()
    print(f"数据集生成完成！总样本数：{len(X)}")
    print(f"特征维度：{X.shape[1]}")
    print(f"类别分布：{np.bincount(y.astype(int))}")

    print("\n划分训练集和测试集...")
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
    print(f"训练集大小：{len(X_train)}")
    print(f"测试集大小：{len(X_test)}")

    print("\n开始训练随机森林...")
    # 创建并训练随机森林
    rf = RandomForest(n_trees=20, max_depth=10, max_features='sqrt')
    rf.fit(X_train, y_train)
    print("随机森林训练完成！")

    print("\n进行预测...")
    # 进行预测
    y_pred = rf.predict(X_test)

    # 计算准确率
    accuracy = calculate_accuracy(y_test, y_pred)
    print(f"测试集准确率：{accuracy:.4f}")

    # 显示预测概率
    y_proba = rf.predict_proba(X_test)
    print(f"\n前5个样本的预测概率：")
    for i in range(min(5, len(y_test))):
        print(f"样本{i + 1}: 真实标签={int(y_test[i])}, 预测标签={int(y_pred[i])}")
        print(f"  各类别概率: {y_proba[i]}")

    # 计算各类别的预测统计
    print(f"\n预测结果统计：")
    print(f"真实标签分布：{np.bincount(y_test.astype(int))}")
    print(f"预测标签分布：{np.bincount(y_pred.astype(int))}")

    # 简单的混淆矩阵
    print(f"\n混淆矩阵 (行为真实标签，列为预测标签)：")
    confusion_matrix = np.zeros((3, 3))
    for true_label, pred_label in zip(y_test, y_pred):
        confusion_matrix[int(true_label), int(pred_label)] += 1

    print("    预测: 0   1   2")
    for i in range(3):
        print(f"真实{i}:  ", end="")
        for j in range(3):
            print(f"{int(confusion_matrix[i, j]):3d} ", end="")
        print()

    print(f"\n随机森林实现完成！使用了{rf.n_trees}棵决策树。")
