import math
from collections import Counter
import random


# 加载 Wine 数据集
def load_wine_data(filename='wine.data'):
    """
    从给定的文件名加载Wine数据集。

    :param filename: 包含Wine数据集的文件名，默认为'wine.data'
    :return: 包含特征和标签的数据列表
    """
    data = []
    try:
        with open(filename, 'r') as f:
            for line in f:
                if line.strip():
                    row = line.strip().split(',')
                    label = row[0]  # 第一列为类别
                    features = list(map(float, row[1:]))
                    data.append((features, label))
    except FileNotFoundError:
        print(f"Error: 文件 {filename} 未找到。")
        exit(1)
    return data


# 计算信息熵
def entropy(labels):
    """
    计算给定标签列表的信息熵。

    :param labels: 标签列表
    :return: 信息熵值
    """
    total = len(labels)
    counts = Counter(labels)
    return -sum((count / total) * math.log2(count / total) for count in counts.values())


# 按特征值划分数据集
def split_dataset(data, feature_index, threshold):
    """
    根据指定特征和阈值将数据集划分为两部分。

    :param data: 数据集
    :param feature_index: 特征索引
    :param threshold: 分裂阈值
    :return: (左子集, 右子集)
    """
    left, right = [], []
    for features, label in data:
        if features[feature_index] <= threshold:
            left.append((features, label))
        else:
            right.append((features, label))
    return left, right


# 计算信息增益
def information_gain(data, feature_index, threshold):
    """
    计算根据指定特征和阈值分裂后的信息增益。

    :param data: 数据集
    :param feature_index: 特征索引
    :param threshold: 分裂阈值
    :return: 信息增益值
    """
    total_entropy = entropy([label for _, label in data])
    left, right = split_dataset(data, feature_index, threshold)
    p_left = len(left) / len(data) if data else 0
    p_right = len(right) / len(data) if data else 0
    gain = total_entropy - (p_left * entropy([label for _, label in left]) +
                            p_right * entropy([label for _, label in right]))
    return gain


# 构造决策树
def build_tree(data, available_features=None):
    """
    使用ID3算法构造决策树。

    :param data: 当前节点的数据集
    :param available_features: 可用的特征索引列表
    :return: 决策树结构
    """
    labels = [label for _, label in data]
    if not available_features:
        available_features = list(range(len(data[0][0])))

    # 如果所有标签相同，则返回叶子节点
    if len(set(labels)) == 1:
        return labels[0]

    # 如果没有特征可分裂，则返回最多的类别
    if not available_features or len(set(labels)) == 0:
        return Counter(labels).most_common(1)[0][0]

    # 选择最佳分裂特征
    best_feature, best_threshold, best_gain = None, None, -1
    for feature_index in available_features:
        unique_values = sorted(set(sample[feature_index] for sample, _ in data))
        for i in range(len(unique_values) - 1):
            threshold = (unique_values[i] + unique_values[i + 1]) / 2
            gain = information_gain(data, feature_index, threshold)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = threshold

    if best_gain <= 0:
        return Counter(labels).most_common(1)[0][0]

    # 分裂数据集
    left, right = split_dataset(data, best_feature, best_threshold)
    remaining_features = available_features.copy()
    remaining_features.remove(best_feature)

    # 创建子树
    return {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': build_tree(left, remaining_features),
        'right': build_tree(right, remaining_features)
    }


# 分类函数
def classify(tree, sample):
    """
    使用构建好的决策树对样本进行分类。

    :param tree: 决策树结构
    :param sample: 样本特征向量
    :return: 预测的标签
    """
    if isinstance(tree, str):
        return tree
    feature = tree['feature']
    threshold = tree['threshold']
    if sample[feature] <= threshold:
        return classify(tree['left'], sample)
    else:
        return classify(tree['right'], sample)


# 主函数
if __name__ == "__main__":
    # 加载数据并划分训练集和测试集
    data = load_wine_data()
    random.shuffle(data)
    split_ratio = 0.8  # 训练集与测试集的比例
    split_index = int(len(data) * split_ratio)
    train_data, test_data = data[:split_index], data[split_index:]

    # 构造决策树
    tree = build_tree(train_data)

    # 测试决策树
    correct_predictions = 0
    for features, true_label in test_data:
        predicted_label = classify(tree, features)
        if predicted_label == true_label:
            correct_predictions += 1

    accuracy = correct_predictions / len(test_data)
    print(f"决策树分类准确率: {accuracy * 100:.2f}%")