from collections import Counter
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression


class DecisionTree:
    def __init__(self, max_depth=None, *, criterion='entropy'):
        self.max_depth = max_depth or 10
        self.criterion = criterion
        self.tree = None

    def fit(self, X, y):
        self.tree = self._build_tree(X, y, depth=0)

    def predict(self, X):
        return np.array([self._predict(inputs) for inputs in X])

    def _most_common_label(self, y):
        counter = Counter(y)
        return counter.most_common(1)[0][0]  # 返回出现次数最多的类别

    def _build_tree(self, X, y, depth):
        # 提取数据集的特征和标签
        num_samples, num_features = X.shape
        num_labels = len(np.unique(y))

        # 如果满足停止条件，返回叶节点
        if depth == self.max_depth or num_labels == 1:
            label = self._most_common_label(y)
            return label

        # 找到划分最佳位置
        best_feature, best_threshold = self._best_split(X, y, num_features)
        # 划分数据集
        left_indices, right_indices = self._split(
            X[:, best_feature], best_threshold)
        # 递归构建子树
        left_tree = self._build_tree(
            X[left_indices], y[left_indices], depth + 1)
        right_tree = self._build_tree(
            X[right_indices], y[right_indices], depth + 1)

        return (best_feature, best_threshold, left_tree, right_tree)

    def _best_split(self, X, y, num_features):
        best_gain = -1
        best_feature, best_threshold = None, None
        if self.criterion == 'logistic':
            for feature_idx in range(num_features):
                model = LogisticRegression()
                model.fit(X[:, feature_idx].reshape(-1, 1), y)
                probs = model.predict_proba(
                    X[:, feature_idx].reshape(-1, 1))[:, 1]
                thresholds = np.unique(X[:, feature_idx])

                for threshold in thresholds:
                    score = self._logit_score(
                        probs, X[:, feature_idx], threshold)
                    if score > best_score:
                        best_score = score
                        best_feature = feature_idx
                        best_threshold = threshold
        else:
            for feature_idx in range(num_features):
                # 获取特征的所有值
                thresholds = np.unique(X[:, feature_idx])
                for threshold in thresholds:
                    gain = self._information_gain(
                        y, X[:, feature_idx], threshold)
                    if gain > best_gain:
                        best_gain = gain
                        best_feature = feature_idx
                        best_threshold = threshold
        return best_feature, best_threshold

    def _information_gain(self, y, feature, threshold):
        # 这一步的意义在于：如果以当前特征和阈值划分数据集，那么信息增益是多少
        if self.criterion == 'logistic':
            pass
        else:
            if self.criterion == 'entropy':
                criterion_metric = self._entropy
            else:
                criterion_metric = self._gini
            parent_criterion_metric = self.criterion_metric(y)
            left_indices, right_indices = self._split(feature, threshold)
            num_left, num_right = len(left_indices), len(right_indices)
            if num_left == 0 or num_right == 0:
                return 0
            left_criterion_metric, right_criterion_metric = (self.criterion_metric(y[left_indices]),
                                                             self.criterion_metric(y[right_indices]))
            n = len(y)
            child_criterion_metric = (num_left / n) * left_criterion_metric + \
                (num_right / n) * right_criterion_metric
            return parent_criterion_metric - child_criterion_metric

    def _logit_score(self, probs, feature, threshold):
        left_indices, right_indices = self._split(feature, threshold)
        num_left, num_right = len(left_indices), len(right_indices)
        if num_left == 0 or num_right == 0:
            return -np.inf
        # 如果划分是理想的，应该使得左子集和右子集的类别分布尽可能不同。
        # 所以使用两个子集的均值的差值作为评分标准
        return abs(np.mean(probs[left_indices]) - np.mean(probs[right_indices]))

    def _entropy(self, y):
        counts = np.bincount(y)  # 统计每个类别的数量
        probabilities = counts / len(y)  # 计算每个类别的概率
        # 不大于0不能取对数
        return -np.sum([p * np.log2(p) for p in probabilities if p > 0])

    def _gini(self, y):
        counts = np.bincount(y)  # 统计每个类别的数量
        probabilities = counts / len(y)
        gini = 1 - np.sum([p**2 for p in probabilities])
        return gini

    def _split(self, feature, threshold):
        # 辅助函数，根据阈值划分数据集
        left_indices = np.argwhere(feature <= threshold).flatten()
        right_indices = np.argwhere(feature > threshold).flatten()
        return left_indices, right_indices

    def _predict(self, inputs):
        tree = self.tree
        while isinstance(tree, tuple):
            # 在树中迭代查找，直到找到叶节点，因为叶节点只是一个类别
            feature_idx, threshold, left_tree, right_tree = tree
            if inputs[feature_idx] <= threshold:
                tree = left_tree
            else:
                tree = right_tree
        return tree


class RegressionTreeByVariance:
    def __init__(self, max_depth=None):
        self.max_depth = max_depth
        self.tree = None

    def fit(self, X, y):
        self.tree = self._build_tree(X, y, depth=0)

    def predict(self, X):
        return np.array([self._predict(inputs) for inputs in X])

    def _build_tree(self, X, y, depth):
        # 提取数据集的特征和标签
        num_samples, num_features = X.shape
        num_labels = len(np.unique(y))

        # 如果满足停止条件，返回叶节点
        if depth == self.max_depth or num_labels == 1 or num_samples < 2:
            label = np.mean(y)
            return label

        # 找到划分最佳位置
        best_feature, best_threshold = self._best_split(X, y, num_features)
        # 划分数据集
        left_indices, right_indices = self._split(
            X[:, best_feature], best_threshold)
        # 递归构建子树
        left_tree = self._build_tree(
            X[left_indices], y[left_indices], depth + 1)
        right_tree = self._build_tree(
            X[right_indices], y[right_indices], depth + 1)

        return (best_feature, best_threshold, left_tree, right_tree)

    def _best_split(self, X, y, num_features):
        best_gain = -1
        best_feature, best_threshold = None, None
        for feature_idx in range(num_features):
            # 获取连续变量每两个值的中点作为阈值
            thresholds = (X[:-1, feature_idx] + X[1:, feature_idx]) / 2
            for threshold in thresholds:
                # 获取当前特征的信息增益
                gain = self._information_gain(y, X[:, feature_idx], threshold)
                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature_idx
                    best_threshold = threshold
        return best_feature, best_threshold

    def _variance(self, y):
        return np.var(y)

    def _information_gain(self, y, feature, threshold):
        parent_variance = self._variance(y)
        left_indices, right_indices = self._split(feature, threshold)
        num_left, num_right = len(left_indices), len(right_indices)
        if num_left == 0 or num_right == 0:
            return 0
        left_variance = self._variance(y[left_indices])
        right_variance = self._variance(y[right_indices])
        n = len(y)
        child_variance = (num_left / n) * left_variance + \
            (num_right / n) * right_variance
        return parent_variance - child_variance

    def _split(self, feature, threshold):
        # 辅助函数，根据阈值划分数据集
        left_indices = np.argwhere(feature <= threshold).flatten()
        right_indices = np.argwhere(feature > threshold).flatten()
        return left_indices, right_indices

    def _predict(self, inputs):
        tree = self.tree
        while isinstance(tree, tuple):
            # 在树中迭代查找，直到找到叶节点，因为叶节点只是一个类别
            feature_idx, threshold, left_tree, right_tree = tree
            if inputs[feature_idx] <= threshold:
                tree = left_tree
            else:
                tree = right_tree
        return tree