import numpy as np
import pandas as pd

class mDecisionTree:
    """
    一个简单的决策树分类器实现。
    属性:
        min_samples_split (int): 分裂内部节点所需的最小样本数。
        max_depth (float): 树的最大深度。
        tree (Node): 决策树的根节点。
    方法:
        fit(X, y):
            从训练集 (X, y) 构建决策树分类器。
        predict(X):
            预测 X 的类别。
        _build_tree(data, depth=0):
            递归地构建决策树。
        _get_best_split(data, num_features):
            找到最佳的特征和阈值来分割数据。
        _split(data, feature_index, threshold):
            根据给定的特征和阈值将数据分为左子集和右子集。
        _information_gain(parent, l_child, r_child):
            计算分裂带来的信息增益。
        _entropy(y):
            计算标签的熵。
        _calculate_leaf_value(y):
            计算叶节点的值。
        _predict(inputs, tree):
            使用决策树预测单个样本的类别。
    """
    def __init__(self, min_samples_split=2, max_depth=float('inf')):
        self.min_samples_split = min_samples_split
        self.max_depth = max_depth
        self.tree = None

    def fit(self, X, y):
        data = np.hstack((X, y.reshape(-1, 1)))
        self.tree = self._build_tree(data)

    def predict(self, X):
        return np.array([self._predict(inputs, self.tree) for inputs in X])

    def _build_tree(self, data, depth=0):
        X, y = data[:, :-1], data[:, -1]
        num_samples, num_features = X.shape

        if num_samples >= self.min_samples_split and depth <= self.max_depth:
            best_split = self._get_best_split(data, num_features)
            if best_split["gain"] > 0:
                left_subtree = self._build_tree(best_split["left_data"], depth + 1)
                right_subtree = self._build_tree(best_split["right_data"], depth + 1)
                return Node(best_split["feature_index"], best_split["threshold"], left_subtree, right_subtree)

        leaf_value = self._calculate_leaf_value(y)
        return Node(value=leaf_value)

    def _get_best_split(self, data, num_features):
        best_split = {}
        max_gain = -float("inf")

        for feature_index in range(num_features):
            feature_values = data[:, feature_index]
            possible_thresholds = np.unique(feature_values)
            for threshold in possible_thresholds:
                left_data, right_data = self._split(data, feature_index, threshold)
                if len(left_data) > 0 and len(right_data) > 0:
                    y, left_y, right_y = data[:, -1], left_data[:, -1], right_data[:, -1]
                    curr_gain = self._information_gain(y, left_y, right_y)
                    if curr_gain > max_gain:
                        best_split["feature_index"] = feature_index
                        best_split["threshold"] = threshold
                        best_split["left_data"] = left_data
                        best_split["right_data"] = right_data
                        best_split["gain"] = curr_gain
                        max_gain = curr_gain

        return best_split

    def _split(self, data, feature_index, threshold):
        left_data = np.array([row for row in data if row[feature_index] <= threshold])
        right_data = np.array([row for row in data if row[feature_index] > threshold])
        return left_data, right_data

    def _information_gain(self, parent, l_child, r_child):
        weight_l = len(l_child) / len(parent)
        weight_r = len(r_child) / len(parent)
        gain = self._entropy(parent) - (weight_l * self._entropy(l_child) + weight_r * self._entropy(r_child))
        return gain

    def _entropy(self, y):
        class_labels = np.unique(y)
        entropy = 0
        for cls in class_labels:
            p_cls = len(y[y == cls]) / len(y)
            entropy -= p_cls * np.log2(p_cls)
        return entropy

    def _calculate_leaf_value(self, y):
        y = list(y)
        return max(y, key=y.count)

    def _predict(self, inputs, tree):
        if tree.value is not None:
            return tree.value
        feature_val = inputs[tree.feature_index]
        if feature_val <= tree.threshold:
            return self._predict(inputs, tree.left)
        else:
            return self._predict(inputs, tree.right)

class Node:
    def __init__(self, feature_index=None, threshold=None, left=None, right=None, value=None):
        self.feature_index = feature_index
        self.threshold = threshold
        self.left = left
        self.right = right
        self.value = value

# 示例数据
if __name__ == "__main__":
    # 创建示例数据集
    data = {
        'X1': [2.771244718, 1.728571309, 3.678319846, 3.961043357, 2.999208922, 7.497545867, 9.00220326, 7.444542326, 10.12493903, 6.642287351],
        'X2': [1.784783929, 1.169761413, 2.81281357, 2.61995032, 2.209014212, 3.162953546, 3.339047188, 0.476683375, 3.234550982, 3.319983761],
        'y': [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
    }
    df = pd.DataFrame(data)
    X = df[['X1', 'X2']].values
    y = df['y'].values

    # 创建决策树模型
    clf = mDecisionTree(min_samples_split=2, max_depth=3)
    clf.fit(X, y)

    # 预测
    predictions = clf.predict(X)
    print("预测结果:", predictions)