import math
import random
from collections import defaultdict


class ID3DecisionTree:
    def __init__(self, max_depth=5):
        """初始化ID3决策树"""
        self.tree = None
        self.max_depth = max_depth
        self.feature_names = None

    def calculate_entropy(self, dataset):
        """计算数据集的信息熵"""
        label_counts = defaultdict(int)
        for _, label in dataset:
            label_counts[label] += 1

        entropy = 0.0
        total = len(dataset)
        for count in label_counts.values():
            probability = count / total
            entropy -= probability * math.log2(probability) if probability > 0 else 0

        return entropy

    def split_dataset(self, dataset, feature_index, threshold):
        """根据特征索引和阈值拆分数据集"""
        left = []  # 特征值 <= 阈值的样本
        right = []  # 特征值 > 阈值的样本
        for features, label in dataset:
            if features[feature_index] <= threshold:
                left.append((features, label))
            else:
                right.append((features, label))
        return left, right

    def find_best_split(self, dataset):
        """寻找最佳的分裂特征和阈值"""
        best_gain = -1
        best_feature = -1
        best_threshold = None
        base_entropy = self.calculate_entropy(dataset)
        num_features = len(dataset[0][0]) if dataset else 0

        for feature_index in range(num_features):
            # 提取该特征的所有值并去重排序
            feature_values = sorted({features[feature_index] for features, _ in dataset})

            # 尝试不同的阈值（相邻值的中点）
            for i in range(len(feature_values) - 1):
                threshold = (feature_values[i] + feature_values[i + 1]) / 2
                left, right = self.split_dataset(dataset, feature_index, threshold)

                if len(left) == 0 or len(right) == 0:
                    continue

                # 计算信息增益
                p = len(left) / len(dataset)
                gain = base_entropy - p * self.calculate_entropy(left) - (1 - p) * self.calculate_entropy(right)

                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature_index
                    best_threshold = threshold

        return best_feature, best_threshold, best_gain

    def majority_class(self, dataset):
        """返回数据集中出现次数最多的类别"""
        class_counts = defaultdict(int)
        for _, label in dataset:
            class_counts[label] += 1
        return max(class_counts, key=class_counts.get) if class_counts else None

    def build_tree(self, dataset, depth=0):
        """递归构建决策树"""
        # 如果所有样本属于同一类别，返回该类别
        labels = {label for _, label in dataset}
        if len(labels) == 1:
            return {'type': 'leaf', 'class': labels.pop()}

        # 如果达到最大深度，返回多数类别
        if depth >= self.max_depth:
            return {'type': 'leaf', 'class': self.majority_class(dataset)}

        # 寻找最佳分裂点
        best_feature, best_threshold, best_gain = self.find_best_split(dataset)

        # 如果信息增益为0，无法再分裂，返回多数类别
        if best_gain <= 0 or best_feature == -1:
            return {'type': 'leaf', 'class': self.majority_class(dataset)}

        # 分裂数据集并递归构建子树
        left, right = self.split_dataset(dataset, best_feature, best_threshold)

        return {
            'type': 'node',
            'feature': best_feature,
            'threshold': best_threshold,
            'left': self.build_tree(left, depth + 1),  # <= threshold
            'right': self.build_tree(right, depth + 1)  # > threshold
        }

    def fit(self, dataset, feature_names=None):
        """训练模型"""
        self.feature_names = feature_names
        self.tree = self.build_tree(dataset)
        return self

    def predict_sample(self, sample):
        """预测单个样本类别"""
        if not self.tree:
            raise Exception("模型尚未训练，请先调用fit方法")

        node = self.tree
        while node['type'] != 'leaf':
            feature_value = sample[0][node['feature']]
            node = node['left'] if feature_value <= node['threshold'] else node['right']
        return node['class']

    def predict(self, dataset):
        """预测数据集的类别"""
        return [self.predict_sample(sample) for sample in dataset]

    def evaluate(self, dataset):
        """评估模型准确率"""
        predictions = self.predict(dataset)
        correct = sum(1 for i in range(len(predictions)) if predictions[i] == dataset[i][1])
        return correct / len(dataset) * 100

    def print_tree(self, indent=""):
        """打印决策树结构"""
        if not self.tree:
            print("决策树尚未构建")
            return

        def print_node(node, indent):
            if node['type'] == 'leaf':
                print(f"{indent}预测类别: {node['class']}")
                return

            feature_idx = node['feature']
            feature_name = self.feature_names[feature_idx] if self.feature_names else f"特征{feature_idx}"
            print(f"{indent}{feature_name} <= {node['threshold']:.4f}?")

            print(f"{indent}├─ 是:")
            print_node(node['left'], indent + "│  ")

            print(f"{indent}└─ 否:")
            print_node(node['right'], indent + "   ")

        print_node(self.tree, indent)


# 数据集加载和处理函数
def load_wine_data(filename='wine.data'):
    """加载Wine数据集"""
    data = []
    try:
        # 尝试从本地文件加载
        with open(filename, 'r') as file:
            for line in file:
                line = line.strip()
                if line:
                    parts = line.split(',')
                    # 第一列是类别，后面是特征
                    label = int(parts[0])
                    features = [float(x) for x in parts[1:]]
                    data.append((features, label))
    except FileNotFoundError:
        # 如果本地文件不存在，从UCI网站下载
        print("本地文件未找到，尝试从UCI网站下载...")
        import urllib.request
        url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
        with urllib.request.urlopen(url) as response:
            content = response.read().decode('utf-8')
            lines = content.split('\n')
            for line in lines:
                line = line.strip()
                if line:
                    parts = line.split(',')
                    label = int(parts[0])
                    features = [float(x) for x in parts[1:]]
                    data.append((features, label))

    print(f"成功加载Wine数据集，共{len(data)}个样本")
    return data


def split_data(data, test_ratio=0.3):
    """将数据分为训练集和测试集"""
    # 随机打乱数据
    random.shuffle(data)
    split_index = int(len(data) * (1 - test_ratio))
    train_set = data[:split_index]
    test_set = data[split_index:]
    print(f"数据集拆分完成：训练集{len(train_set)}个样本，测试集{len(test_set)}个样本")
    return train_set, test_set


# 主函数
def main():
    # 特征名称
    feature_names = [
        "Alcohol", "Malic_acid", "Ash", "Alcalinity_of_ash", "Magnesium",
        "Total_phenols", "Flavanoids", "Nonflavanoid_phenols", "Proanthocyanins",
        "Color_intensity", "Hue", "OD280/OD315_of_diluted_wines", "Proline"
    ]

    # 加载数据
    data = load_wine_data()

    # 拆分训练集和测试集
    train_set, test_set = split_data(data)

    # 创建并训练ID3决策树
    print("\n训练ID3决策树中...")
    tree = ID3DecisionTree(max_depth=5)
    tree.fit(train_set, feature_names)

    # 打印决策树结构
    print("\n决策树结构:")
    tree.print_tree()

    # 评估模型
    train_accuracy = tree.evaluate(train_set)
    test_accuracy = tree.evaluate(test_set)
    print(f"\n训练集准确率: {train_accuracy:.2f}%")
    print(f"测试集准确率: {test_accuracy:.2f}%")

    # 展示部分预测结果
    print("\n部分预测结果示例：")
    predictions = tree.predict(test_set)
    for i in range(min(5, len(test_set))):
        features, actual = test_set[i]
        predicted = predictions[i]
        print(f"样本{i + 1}: 实际类别={actual}, 预测类别={predicted}, "
              f"{'正确' if actual == predicted else '错误'}")


if __name__ == "__main__":
    main()
