import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split

##################################################
# 1) 定义树节点结构
##################################################
class Node:
    """
    二叉树节点:
      - feature_index: 用于分裂的特征索引 (int)
      - threshold: 用于分裂的阈值 (float)
      - left, right: 左、右子节点 (Node)
      - label: 若为叶节点, 则该节点的类别标签 (int); 否则为 None
    """
    def __init__(self, 
                 feature_index=None, 
                 threshold=None, 
                 left=None, 
                 right=None, 
                 label=None):
        self.feature_index = feature_index
        self.threshold = threshold
        self.left = left
        self.right = right
        self.label = label

##################################################
# 2) 辅助函数
##################################################
def majority_vote(y):
    """
    返回数组 y 中出现次数最多的那个标签。
    若出现次数相同，返回第一个出现最多的标签。
    适用于任何整数标签(含负数、多类别等)。
    """
    unique_labels, counts = np.unique(y, return_counts=True)
    # 找到出现次数最多的 label
    max_count_index = np.argmax(counts)
    return unique_labels[max_count_index]

def gini_impurity(y):
    """
    计算 Gini 不纯度: Gini(D) = 1 - ∑(p_k^2)
    其中 p_k 是第 k 类在当前节点 y 中的比例。
    """
    labels, counts = np.unique(y, return_counts=True)
    p = counts / len(y)
    return 1 - np.sum(p**2)

def find_best_split(X, y):
    """
    遍历所有特征、所有可能阈值，找到让加权 Gini 最小的分裂.
    返回 (best_feature, best_threshold).
    若无法继续分裂(如全是同类, 或只有1条样本), 返回 (None, None).
    """
    n_samples, n_features = X.shape
    if n_samples <= 1 or len(np.unique(y)) == 1:
        return None, None

    best_gini = float("inf")
    best_feature = None
    best_threshold = None

    for feature_index in range(n_features):
        feature_values = X[:, feature_index]
        unique_vals = np.unique(feature_values)

        for threshold in unique_vals:
            # 根据 threshold 划分左右子集
            left_mask = (feature_values <= threshold)
            right_mask = (feature_values > threshold)

            y_left = y[left_mask]
            y_right = y[right_mask]
            if len(y_left) == 0 or len(y_right) == 0:
                # 若一边为空, 则分裂无意义, 跳过
                continue

            # 计算左右子集的 Gini
            gini_left = gini_impurity(y_left)
            gini_right = gini_impurity(y_right)
            # 按子集大小加权
            w_left = len(y_left) / n_samples
            w_right = len(y_right) / n_samples
            weighted_gini = w_left*gini_left + w_right*gini_right

            # 若该分裂使总不纯度下降更多, 则更新最优分裂
            if weighted_gini < best_gini:
                best_gini = weighted_gini
                best_feature = feature_index
                best_threshold = threshold

    if best_feature is None:
        return None, None

    return best_feature, best_threshold


def build_tree(X, y, max_depth, min_samples_split, depth=0):
    """
    递归构建决策树:
      - max_depth: 树最大深度
      - min_samples_split: 节点最少样本数
      - depth: 当前深度 (递归参数)
    返回构建好的子树(根节点)
    """
    # 1) 判断是否到达停止条件
    if (depth >= max_depth 
        or len(y) < min_samples_split 
        or len(np.unique(y)) == 1):
        # 叶节点标签 = 出现最多的类
        leaf_label = majority_vote(y)
        return Node(label=leaf_label)

    # 2) 找最优分裂 (特征 + 阈值)
    feature_index, threshold = find_best_split(X, y)
    # 若找不到更好分裂, 则叶节点
    if feature_index is None:
        leaf_label = majority_vote(y)
        return Node(label=leaf_label)

    # 3) 根据最优分裂阈值划分数据
    left_mask = (X[:, feature_index] <= threshold)
    right_mask = (X[:, feature_index] > threshold)

    X_left, y_left = X[left_mask], y[left_mask]
    X_right, y_right = X[right_mask], y[right_mask]

    # 4) 递归构建左右子树
    left_child = build_tree(X_left, y_left, max_depth, min_samples_split, depth+1)
    right_child = build_tree(X_right, y_right, max_depth, min_samples_split, depth+1)

    # 5) 创建并返回当前节点
    return Node(feature_index=feature_index,
                threshold=threshold,
                left=left_child,
                right=right_child,
                label=None)

##################################################
# 3) 包装为 CARTDecisionTree 类
##################################################
class CARTDecisionTree:
    """
    手写的 CART (分类) 决策树
    示例用法:
        clf = CARTDecisionTree(max_depth=5, min_samples_split=2)
        clf.fit(X_train, y_train)  # X_train, y_train 均是 np.array
        y_pred = clf.predict(X_test)
    """
    def __init__(self, max_depth=5, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.root = None

    def fit(self, X, y, sample_weight=None):
        """
        训练决策树
          X: shape=[n_samples, n_features], 数值型或能比较大小
          y: shape=[n_samples,], 整型标签(可含负数、多分类)
        """
        self.root = build_tree(X, y,
                               max_depth=self.max_depth,
                               min_samples_split=self.min_samples_split)

    def _predict_sample(self, x, node):
        """
        对单个样本 x 递归预测
        """
        # 叶节点 => 返回标签
        if node.label is not None:
            return node.label
        # 根据分裂规则, 判断走左子树还是右子树
        if float(x[node.feature_index]) <= float(node.threshold):
            return self._predict_sample(x, node.left)
        else:
            return self._predict_sample(x, node.right)

    def predict(self, X):
        """
        对矩阵 X 的每行做预测
        返回 np.array, shape=[n_samples,]
        """
        return np.array([self._predict_sample(x, self.root) for x in X.values])

##################################################
# 4) 测试示例 (以二分类为例)
##################################################
if __name__ == "__main__":
    # 构造一个简单示例数据
    # 假设我们要区分 y = 0 和 y = 1
    X_data = np.array([[2, 3],
                       [1, 5],
                       [2, 2],
                       [3, 6],
                       [10, 12],
                       [11, 11],
                       [12, 10]])
    y_data = np.array([0, 0, 0, 0, 1, 1, 1])

    # 拆分训练测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_data, y_data, test_size=0.3, random_state=42
    )

    # 初始化并训练决策树
    clf = CARTDecisionTree(max_depth=3, min_samples_split=1)
    clf.fit(X_train, y_train)

    # 预测
    y_pred = clf.predict(X_test)
    print("Predictions:", y_pred)
    print("Ground truth:", y_test)
    accuracy = np.mean(y_pred == y_test)
    print("Accuracy:", accuracy)
