import math
from data_loader import load_wine_data, train_test_split, discretize_data

class Node:
    """决策树节点类"""
    def __init__(self, feature_index=None, threshold=None, value=None, children=None):
        self.feature_index = feature_index  # 分裂特征索引
        self.value = value                  # 叶节点的预测值
        self.children = children or {}      # 子节点字典 {特征值: 节点}

class ID3DecisionTree:
    """ID3决策树实现"""
    
    def __init__(self, max_depth=None):
        self.max_depth = max_depth
        self.root = None
        self.feature_names = [
            "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", 
            "Magnesium", "Total phenols", "Flavanoids", 
            "Nonflavanoid phenols", "Proanthocyanins", 
            "Color intensity", "Hue", "OD280/OD315", "Proline"
        ]
    
    def entropy(self, y):
        """计算信息熵"""
        if len(y) == 0:
            return 0
        
        # 统计每个类别的数量
        class_counts = {}
        for label in y:
            if label in class_counts:
                class_counts[label] += 1
            else:
                class_counts[label] = 1
        
        # 计算熵
        entropy_val = 0.0
        total = len(y)
        for count in class_counts.values():
            probability = count / total
            if probability > 0:
                entropy_val -= probability * math.log2(probability)
        
        return entropy_val
    
    def information_gain(self, X, y, feature_index):
        """计算信息增益"""
        total_entropy = self.entropy(y)
        
        # 获取该特征的所有值
        feature_values = [sample[feature_index] for sample in X]
        unique_values = set(feature_values)
        
        # 计算加权子集熵
        weighted_entropy = 0.0
        for value in unique_values:
            # 获取该特征值对应的子集
            sub_y = [y[i] for i in range(len(X)) if X[i][feature_index] == value]
            if len(sub_y) > 0:
                probability = len(sub_y) / len(y)
                weighted_entropy += probability * self.entropy(sub_y)
        
        return total_entropy - weighted_entropy
    
    def best_feature(self, X, y, features):
        """选择信息增益最大的特征"""
        best_gain = -1
        best_feature = None
        
        for feature_index in features:
            gain = self.information_gain(X, y, feature_index)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
        
        return best_feature, best_gain
    
    def majority_class(self, y):
        """返回多数类"""
        class_counts = {}
        for label in y:
            if label in class_counts:
                class_counts[label] += 1
            else:
                class_counts[label] = 1
        
        # 返回出现次数最多的类别
        return max(class_counts.items(), key=lambda x: x[1])[0]
    
    def build_tree(self, X, y, features, depth=0):
        """递归构建决策树"""
        # 终止条件
        if (len(set(y)) == 1 or  # 所有样本同一类别
            len(features) == 0 or  # 没有特征了
            (self.max_depth and depth >= self.max_depth)):  # 达到最大深度
            return Node(value=self.majority_class(y))
        
        # 选择最佳分裂特征
        best_feat, best_gain = self.best_feature(X, y, features)
        
        if best_gain == 0:  # 如果信息增益为0，创建叶节点
            return Node(value=self.majority_class(y))
        
        # 创建内部节点
        node = Node(feature_index=best_feat)
        node.children = {}
        
        # 获取该特征的所有唯一值
        feature_values = [sample[best_feat] for sample in X]
        unique_values = set(feature_values)
        
        # 剩余特征
        remaining_features = [f for f in features if f != best_feat]
        
        # 为每个特征值递归构建子树
        for value in unique_values:
            # 获取该特征值对应的子集
            sub_X = [X[i] for i in range(len(X)) if X[i][best_feat] == value]
            sub_y = [y[i] for i in range(len(X)) if X[i][best_feat] == value]
            
            if len(sub_y) == 0:
                # 如果没有样本，创建叶节点（多数类）
                node.children[value] = Node(value=self.majority_class(y))
            else:
                node.children[value] = self.build_tree(
                    sub_X, sub_y, remaining_features, depth + 1
                )
        
        return node
    
    def fit(self, X, y):
        """训练决策树"""
        features = list(range(len(X[0])))
        self.root = self.build_tree(X, y, features)
    
    def predict_single(self, x, node):
        """单个样本预测"""
        if node.value is not None:  # 叶节点
            return node.value
        
        feat_val = x[node.feature_index]
        if feat_val in node.children:
            return self.predict_single(x, node.children[feat_val])
        else:
            # 如果遇到未见过的特征值，返回当前节点的多数类
            return self._get_majority_class(node)
    
    def _get_majority_class(self, node):
        """获取节点下的多数类"""
        classes = []
        self._collect_classes(node, classes)
        
        class_counts = {}
        for cls in classes:
            if cls in class_counts:
                class_counts[cls] += 1
            else:
                class_counts[cls] = 1
        
        return max(class_counts.items(), key=lambda x: x[1])[0]
    
    def _collect_classes(self, node, classes):
        """收集节点下的所有类别"""
        if node.value is not None:
            classes.append(node.value)
        elif node.children:
            for child in node.children.values():
                self._collect_classes(child, classes)
    
    def predict(self, X):
        """批量预测"""
        return [self.predict_single(x, self.root) for x in X]
    
    def accuracy(self, y_true, y_pred):
        """计算准确率"""
        correct = 0
        for i in range(len(y_true)):
            if y_true[i] == y_pred[i]:
                correct += 1
        return correct / len(y_true)
    
    def print_tree(self, node=None, depth=0):
        """打印决策树"""
        if node is None:
            node = self.root
        
        indent = "  " * depth
        
        if node.value is not None:
            print(f"{indent}类别: {node.value}")
        else:
            feature_name = self.feature_names[node.feature_index]
            print(f"{indent}{feature_name}:")
            for val, child in node.children.items():
                print(f"{indent}  值 {val}:")
                self.print_tree(child, depth + 1)

def main():
    print("=== ID3决策树 - Wine分类（纯Python实现） ===\n")
    
    # 加载数据
    X, y = load_wine_data()
    print(f"数据记录数: {len(X)}")
    print(f"特征数量: {len(X[0])}")
    
    # 统计类别分布
    class_counts = {}
    for label in y:
        if label in class_counts:
            class_counts[label] += 1
        else:
            class_counts[label] = 1
    
    print("类别分布: ", end="")
    for label, count in sorted(class_counts.items()):
        print(f"类别{label}: {count}", end="  ")
    print()
    
    # 离散化连续特征（ID3需要离散特征）
    print("\n离散化连续特征...")
    X_disc = discretize_data(X, n_bins=3)
    
    # 划分训练测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_disc, y, test_size=0.3, random_state=42
    )
    print(f"训练集大小: {len(X_train)}")
    print(f"测试集大小: {len(X_test)}")
    
    # 训练ID3决策树
    print("\n训练ID3决策树...")
    tree = ID3DecisionTree(max_depth=4)
    tree.fit(X_train, y_train)
    
    # 预测
    y_pred = tree.predict(X_test)
    accuracy = tree.accuracy(y_test, y_pred)
    
    print(f"测试集准确率: {accuracy:.4f}")
    
    # 打印决策树结构
    print("\n决策树结构:")
    print("=" * 50)
    tree.print_tree()
    
    # 显示详细预测结果
    print(f"\n前10个测试样本的详细预测结果:")
    print("样本\t真实类别\t预测类别\t是否正确")
    print("-" * 45)
    
    correct_count = 0
    for i in range(min(10, len(y_test))):
        true_class = y_test[i]
        pred_class = y_pred[i]
        correct = "✓" if true_class == pred_class else "✗"
        if correct == "✓":
            correct_count += 1
        
        print(f"{i+1}\t{true_class}\t\t{pred_class}\t\t{correct}")
    
    print(f"\n前10个样本中正确分类: {correct_count}个")
    
    # 计算每个类别的准确率
    print("\n各类别准确率:")
    class_correct = {}
    class_total = {}
    
    for i in range(len(y_test)):
        true_class = y_test[i]
        if true_class not in class_total:
            class_total[true_class] = 0
            class_correct[true_class] = 0
        
        class_total[true_class] += 1
        if y_test[i] == y_pred[i]:
            class_correct[true_class] += 1
    
    for class_label in sorted(class_total.keys()):
        acc = class_correct[class_label] / class_total[class_label]
        print(f"类别 {class_label}: {class_correct[class_label]}/{class_total[class_label]} = {acc:.4f}")

if __name__ == "__main__":
    main()