import math
from collections import Counter
import random


# 加载 Wine 数据集
def load_wine_data(filename='wine.data'):
    """
    从指定文件加载Wine数据集，每一行代表一个样本，包含特征值和类别标签。

    参数:
        filename (str): 数据文件的路径，默认为'wine.data'

    返回:
        list: 包含元组的列表，每个元组的第一个元素是特征列表，第二个元素是类别标签字符串。
    """
    data = []
    try:
        with open(filename, 'r') as file:
            for line in file:
                row = line.strip().split(',')
                if row:  # 忽略空行
                    label = row[0]  # 第一列为类别
                    features = list(map(float, row[1:]))
                    data.append((features, label))
    except FileNotFoundError:
        print(f"文件 {filename} 未找到，请检查文件路径。")
        exit(1)
    return data


# 计算信息熵
def entropy(labels):
    """
    计算给定类别标签列表的信息熵。

    参数:
        labels (list of str): 类别标签列表

    返回:
        float: 信息熵值
    """
    total = len(labels)
    if total == 0:
        return 0
    counts = Counter(labels)
    return -sum((count / total) * math.log2(count / total) for count in counts.values())


# 按特征值划分数据集
def split_dataset(data, feature_index, threshold):
    """
    根据指定特征和阈值将数据集划分为两个子集。

    参数:
        data (list of tuple): 原始数据集，每个元组包含特征列表和类别标签
        feature_index (int): 用于分裂的特征索引
        threshold (float): 分裂阈值

    返回:
        tuple: 包含两个子集的元组，左边子集和右边子集
    """
    left = [(features, label) for features, label in data if features[feature_index] <= threshold]
    right = [(features, label) for features, label in data if features[feature_index] > threshold]
    return left, right


# 计算信息增益
def information_gain(data, feature_index, threshold):
    """
    计算根据指定特征和阈值划分后的信息增益。

    参数:
        data (list of tuple): 原始数据集，每个元组包含特征列表和类别标签
        feature_index (int): 用于分裂的特征索引
        threshold (float): 分裂阈值

    返回:
        float: 信息增益值
    """
    labels = [label for _, label in data]
    total_entropy = entropy(labels)
    left, right = split_dataset(data, feature_index, threshold)
    p_left = len(left) / len(data) if data else 0
    p_right = len(right) / len(data) if data else 0
    gain = total_entropy - (p_left * entropy([label for _, label in left]) +
                            p_right * entropy([label for _, label in right]))
    return gain


# 构造决策树
def build_tree(data, available_features):
    """
    使用ID3算法递归地构建决策树。

    参数:
        data (list of tuple): 当前节点的数据集，每个元组包含特征列表和类别标签
        available_features (set of int): 可用作分裂的特征索引集合

    返回:
        dict or str: 决策树节点（字典）或叶子节点（类别标签）
    """
    labels = [label for _, label in data]

    # 如果所有样本属于同一类别，则返回叶子节点
    if len(set(labels)) == 1:
        return labels[0]

    # 如果没有特征可分裂，则返回最多的类别
    if not available_features:
        return Counter(labels).most_common(1)[0][0]

    # 选择最佳分裂特征
    best_feature = None
    best_threshold = None
    best_gain = -1

    for feature_index in available_features:
        # 获取该特征的所有不同值作为候选阈值
        feature_values = sorted({sample[feature_index] for sample, _ in data})
        for i in range(len(feature_values) - 1):
            threshold = (feature_values[i] + feature_values[i + 1]) / 2
            gain = information_gain(data, feature_index, threshold)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = threshold

    if best_gain <= 0:
        return Counter(labels).most_common(1)[0][0]

    # 分裂数据集
    left, right = split_dataset(data, best_feature, best_threshold)

    # 创建子树
    remaining_features = available_features.copy()
    remaining_features.remove(best_feature)

    return {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': build_tree(left, remaining_features),
        'right': build_tree(right, remaining_features)
    }


# 分类函数
def classify(tree, sample):
    """
    使用构建好的决策树对单个样本进行分类。

    参数:
        tree (dict or str): 决策树节点（字典）或叶子节点（类别标签）
        sample (list of float): 测试样本的特征列表

    返回:
        str: 预测的类别标签
    """
    if isinstance(tree, str):
        return tree
    feature = tree['feature']
    threshold = tree['threshold']
    if sample[feature] <= threshold:
        return classify(tree['left'], sample)
    else:
        return classify(tree['right'], sample)


# 主函数
if __name__ == "__main__":
    # 加载数据
    print("正在加载Wine数据集...")
    data = load_wine_data()

    # 划分训练集和测试集，设置随机种子以保证结果的可重复性
    random.seed(42)
    random.shuffle(data)
    split_index = int(len(data) * 0.8)
    train_data, test_data = data[:split_index], data[split_index:]

    # 初始化可用特征索引集合
    available_features = set(range(len(data[0][0])))

    # 构造决策树
    print("正在构建决策树...")
    tree = build_tree(train_data, available_features)

    # 测试决策树
    correct_predictions = 0
    total_tests = len(test_data)
    print(f"使用决策树对测试集进行分类...")
    for features, true_label in test_data:
        predicted_label = classify(tree, features)
        if predicted_label == true_label:
            correct_predictions += 1

    accuracy = correct_predictions / total_tests
    print(f"决策树分类准确率: {accuracy * 100:.2f}%")

