import numpy as np
import matplotlib.pyplot as plt

class DecisionTreeNode:
    def __init__(self, feature=None, threshold=None, left=None, right=None, *, value=None):
        self.feature = feature
        self.threshold = threshold
        self.left = left
        self.right = right
        self.value = value

    def is_leaf_node(self):
        return self.value is not None

class DecisionTree:
    def __init__(self, min_samples_split=2, max_depth=10):
        self.root = None
        self.min_samples_split = min_samples_split
        self.max_depth = max_depth

    def fit(self, X, y):
        if len(X) != len(y):
            raise ValueError("The number of samples in X and y must match.")
        self.root = self._grow_tree(X, y)

    def _grow_tree(self, X, y, depth=0):
        n_samples, n_features = X.shape
        # 检查是否所有样本都属于同一类别
        if len(set(y)) == 1:
            return DecisionTreeNode(value=y[0])

        # 检查是否达到最大深度或不足以继续分裂
        if n_samples < self.min_samples_split or depth >= self.max_depth:
            return DecisionTreeNode(value=self._majority_vote(y))

        # 选择最佳特征和阈值
        feature, threshold = self._best_split(X, y)

        # 如果没有找到合适的分裂点，则停止分裂
        if feature is None:
            return DecisionTreeNode(value=self._majority_vote(y))

        # 根据阈值分裂数据集
        left_X, left_y, right_X, right_y = self._split(X, y, feature, threshold)

        # 递归地构建左右子树
        left = self._grow_tree(left_X, left_y, depth + 1)
        right = self._grow_tree(right_X, right_y, depth + 1)

        return DecisionTreeNode(feature, threshold, left, right)

    def _best_split(self, X, y):
        # 初始化最佳增益和最佳特征
        best_gain = -1
        best_feature = None
        best_threshold = None

        # 遍历每个特征
        for feature in range(X.shape[1]):
            # 计算每个特征的增益
            gain = self._information_gain(X, y, feature)
            # 更新最佳增益和最佳特征
            if gain > best_gain:
                best_gain = gain
                best_feature = feature
                best_threshold = np.median(X[:, feature])

        return best_feature, best_threshold

    def _information_gain(self, X, y, feature):
        # 计算熵
        parent_entropy = self._entropy(y)
        # 计算分裂后的熵
        total_gain = 0
        thresholds = np.unique(X[:, feature])
        for threshold in thresholds:
            left_X, left_y, right_X, right_y = self._split(X, y, feature, threshold)
            if len(left_X) == 0 or len(right_X) == 0:
                continue
            prob = len(left_y) / len(y)
            gain = prob * self._entropy(left_y) + (1 - prob) * self._entropy(right_y)
            total_gain = max(total_gain, parent_entropy - gain)

        return total_gain

    def _entropy(self, y):
        # 计算熵
        unique, counts = np.unique(y, return_counts=True)
        probabilities = counts / counts.sum()
        entropy = -np.sum(probabilities * np.log2(probabilities))
        return entropy

    def _split(self, X, y, feature, threshold):
        # 根据阈值分裂数据集
        left_X = X[X[:, feature] < threshold]
        left_y = y[X[:, feature] < threshold]
        right_X = X[X[:, feature] >= threshold]
        right_y = y[X[:, feature] >= threshold]

        return left_X, left_y, right_X, right_y

    def _majority_vote(self, y):
        # 使用 numpy 的函数来计算每个元素的出现次数
        unique, counts = np.unique(y, return_counts=True)
        # 找到出现次数最多的元素的索引
        most_common_index = np.argmax(counts)
        # 返回出现次数最多的元素
        return unique[most_common_index]

    def predict(self, X):
        return [self._predict(inputs) for inputs in X]

    def _predict(self, inputs):
        node = self.root
        while not node.is_leaf_node():
            if inputs[node.feature] < node.threshold:
                node = node.left
            else:
                node = node.right
        return node.value

    def score(self, X, y):
        # 计算准确率
        y_predicted = self.predict(X)
        correct_predictions = np.sum(y_predicted == y)
        accuracy = correct_predictions / len(y)
        return accuracy

    def visualize_tree(self):
        if self.root is None:
            raise ValueError("The tree has not been trained yet. Call 'fit' first.")

        plt.figure(figsize=(12, 8))
        self._plot_tree(self.root, 0, 0, 1000, 100, 100)
        plt.show()

    def _plot_tree(self, node, x, y, width, x_offset, y_offset):
        if node is None:
            return

        # 绘制当前节点
        if node.is_leaf_node():
            plt.text(x, y, f"Class: {node.value}",
                     bbox=dict(facecolor='green', alpha=0.5),
                     fontsize=10, ha='center', va='center')
        else:
            plt.text(x, y, f"Feature {node.feature}\nThreshold: {node.threshold}",
                     bbox=dict(facecolor='lightblue', alpha=0.5),
                     fontsize=10, ha='center', va='center')

        # 绘制左右子节点
        if node.left:
            x_left = x - x_offset
            y_left = y - y_offset
            plt.plot([x, x_left], [y, y_left], 'k-')
            plt.text((x + x_left) / 2, (y + y_left) / 2, "Yes", fontsize=8)
            self._plot_tree(node.left, x_left, y_left, width / 2, x_offset / 2, y_offset)

        if node.right:
            x_right = x + x_offset
            y_right = y - y_offset
            plt.plot([x, x_right], [y, y_right], 'k-')
            plt.text((x + x_right) / 2, (y + y_right) / 2, "No", fontsize=8)
            self._plot_tree(node.right, x_right, y_right, width / 2, x_offset / 2, y_offset)