# **************************************
# --*-- coding: utf-8 --*--
# @Author  : white
# @FileName: 决策树.py
# @Time    : 2025-08-29
# **************************************
import numpy as np
import pandas as pd

# Step 1: Generate synthetic dataset with 100 samples and 5 classes
# Features: 2 numerical features for simplicity
np.random.seed(42)  # For reproducibility
n_samples = 100
n_features = 2
n_classes = 5

# Generate random features
X = np.random.rand(n_samples, n_features) * 10  # Features between 0 and 10

# Generate random labels (0 to 4)
y = np.random.randint(0, n_classes, n_samples)

# Create Pandas DataFrame for easier handling
data = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(n_features)])
data['label'] = y

print("Sample data:")
print(data.head())


# Step 2: Implement Decision Tree from scratch using only NumPy and Pandas

class DecisionTree:
    def __init__(self, max_depth=None, min_samples_split=2):
        """
        :param max_depth: 最大深度
        :param min_samples_split: 样本数
        """
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.tree = None

    def _entropy(self, y):
        """
        熵计算：_entropy 方法计算当前节点的熵。
        :param y:
        :return:
        """
        # 用于获取数组中唯一元素及其出现次数的函数。
        _, counts = np.unique(y, return_counts=True)
        # 类别比例
        probabilities = counts / len(y)
        return -np.sum(probabilities * np.log2(probabilities + 1e-10))  # Avoid log(0)

    def _information_gain(self, y, y_left, y_right):
        """
        信息增益：_information_gain 计算父节点和子节点的熵差。
        :param y:
        :param y_left:
        :param y_right:
        :return:
        """
        # 获取父节点的熵
        parent_entropy = self._entropy(y)
        n = len(y)
        # 获取左节点的熵
        left_entropy = self._entropy(y_left)
        # 获取右节点的熵
        right_entropy = self._entropy(y_right)
        # 左右节点在总节点中所占的比例的总熵
        weighted_entropy = (len(y_left) / n) * left_entropy + (len(y_right) / n) * right_entropy
        # 返回节点熵差
        return parent_entropy - weighted_entropy

    def _best_split(self, X, y):
        """
        最优划分：_best_split 遍历特征和阈值，找到信息增益最大的划分。
        :param X:
        :param y:
        :return:
        """
        # 设置一个负无穷数
        best_gain = -np.inf
        best_feature = None
        best_threshold = None

        n_samples, n_features = X.shape
        for feature in range(n_features):
            """
            循环遍历每个特征样本数据，找出与父节点信息增益最大的特征的样本数据
            """
            # 获取出对应样本（列）的唯一值
            thresholds = np.unique(X[:, feature])
            for threshold in thresholds:
                """
                循环遍历每个样本与父节点之间的信息增益，找出最大的信息增益值与特征
                """
                # 获取出所有小于等于对应特征值的值 构成左右分支
                left_mask = X[:, feature] <= threshold
                right_mask = ~left_mask

                # 如果左分支样本数小于最小样本数，或者右分支小于样本数，不用进行切割则返回
                if np.sum(left_mask) < self.min_samples_split or np.sum(right_mask) < self.min_samples_split:
                    continue

                y_left = y[left_mask]
                y_right = y[right_mask]
                # 获取信息增益
                gain = self._information_gain(y, y_left, y_right)
                # 获取最大的信息增益 信息增益越大越好
                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature
                    best_threshold = threshold

        return best_feature, best_threshold

    def _build_tree(self, X, y, depth=0):
        """
        递归建树：_build_tree 递归地划分数据，生成树结构。
        :param X:
        :param y:
        :param depth:
        :return:
        """
        n_samples = len(y)
        # 当达到最大深度或者当前的样本数小于最小样本数
        if (self.max_depth is not None and depth >= self.max_depth) or n_samples < self.min_samples_split:
            # Leaf node: return the most common class
            # 获取出现次数最多的结果值
            # np.bincount用于计算非负整数数组中每个索引所对应的元素出现次数的函数 并获取其中的最大值
            return np.bincount(y).argmax()
        # 获取特征与父节点最大的特征与样本数据
        feature, threshold = self._best_split(X, y)
        # 未找到最好节点
        if feature is None:
            # No split found: leaf node
            # 获取出现次数最多的结果值
            return np.bincount(y).argmax()

        left_mask = X[:, feature] <= threshold
        right_mask = ~left_mask

        # 进行节点的循环判断
        left_tree = self._build_tree(X[left_mask], y[left_mask], depth + 1)
        right_tree = self._build_tree(X[right_mask], y[right_mask], depth + 1)

        # Return node as a dictionary
        return {
            'feature'  : feature,
            'threshold': threshold,
            'left'     : left_tree,
            'right'    : right_tree
        }

    def fit(self, X, y):
        """Fit the decision tree"""
        if isinstance(X, pd.DataFrame):
            X = X.values
        if isinstance(y, pd.Series):
            y = y.values
        """获得树得结构，
         {
            'feature'  : feature,
            'threshold': threshold,
            'left'     : left_tree, 左边嵌套
            'right'    : right_tree 右边嵌套
        }
        """
        self.tree = self._build_tree(X, y)

    def _predict_one(self, x, node):
        """Predict for a single sample
        根据特征值与与预测之间的大小关系，进行向下查找树的节点
        """
        # 到达最底层节点
        if not isinstance(node, dict):
            return node  # Leaf node
        # 获取对应维度的样本数据 并进行对比
        if x[node['feature']] <= node['threshold']:
            return self._predict_one(x, node['left'])
        else:
            return self._predict_one(x, node['right'])

    def predict(self, X):
        """
        预测：_predict_one 遍历树路径，输出叶节点类别。
        :param X:
        :return:
        """
        if isinstance(X, pd.DataFrame):
            X = X.values
        return np.array([self._predict_one(x, self.tree) for x in X])


# Step 3: Train and test the model
# Split data into train and test (80/20)
train_size = int(0.8 * n_samples)
X_train = X[:train_size]
y_train = y[:train_size]
X_test = X[train_size:]
y_test = y[train_size:]

# Initialize and fit the tree
dt = DecisionTree(max_depth=5)  # Limit depth for simplicity
dt.fit(X_train, y_train)

# Predict on test set
predictions = dt.predict(X_test)

# Calculate accuracy
accuracy = np.mean(predictions == y_test)
print(f"Test Accuracy: {accuracy:.2f}")


# Print tree structure (for visualization)
def print_tree(node, depth=0):
    if not isinstance(node, dict):
        print("  " * depth + f"Leaf: {node}")
        return
    print("  " * depth + f"Feature {node['feature']} <= {node['threshold']:.2f}")
    print_tree(node['left'], depth + 1)
    print_tree(node['right'], depth + 1)


print("Decision Tree Structure:")
print_tree(dt.tree)
