import math
import random
from collections import Counter

# 设置随机数种子，方便结果复现
random.seed(42)


# 加载Wine数据集
def load_wine_data():
    data = []
    try:
        with open('wine.data', 'r') as f:
            for line in f:
                row = line.strip().split(',')
                label = row[0]  # 第一列为类别
                features = list(map(float, row[1:]))
                data.append((features, label))
    except FileNotFoundError:
        print("数据集文件不存在，请检查文件路径是否正确。")
    return data


# 计算信息熵
def entropy(labels):
    total = len(labels)
    counts = Counter(labels)
    return -sum((count / total) * math.log2(count / total) for count in counts.values())


# 按特征值划分数据集
def split_dataset(data, feature_index, threshold):
    left = []
    right = []
    for features, label in data:
        if features[feature_index] <= threshold:
            left.append((features, label))
        else:
            right.append((features, label))
    return left, right


# 计算信息增益
def information_gain(data, feature_index, threshold):
    total_entropy = entropy([label for _, label in data])
    left, right = split_dataset(data, feature_index, threshold)
    p_left = len(left) / len(data)
    p_right = len(right) / len(data)
    gain = total_entropy - (p_left * entropy([label for _, label in left]) +
                            p_right * entropy([label for _, label in right]))
    return gain


# 构造决策树
def build_tree(data, features):
    labels = [label for _, label in data]
    # 如果所有标签相同，则返回叶子节点（类别）
    if len(set(labels)) == 1:
        return labels[0]
    # 如果没有特征可分裂，则返回数据集中出现次数最多的类别
    if not features:
        return Counter(labels).most_common(1)[0][0]

    best_feature = None
    best_threshold = None
    best_gain = -1
    for feature_index in features:
        thresholds = set([features[feature_index] for features, _ in data])
        for threshold in thresholds:
            gain = information_gain(data, feature_index, threshold)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = threshold

    left, right = split_dataset(data, best_feature, best_threshold)
    if not left or not right:
        return Counter(labels).most_common(1)[0][0]

    remaining_features = features.copy()
    remaining_features.remove(best_feature)

    return {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': build_tree(left, remaining_features),
        'right': build_tree(right, remaining_features)
    }


# 分类函数
def classify(tree, sample):
    if isinstance(tree, str):
        return tree
    feature = tree['feature']
    threshold = tree['threshold']
    if sample[feature] <= threshold:
        return classify(tree['left'], sample)
    else:
        return classify(tree['right'], sample)


# 主函数
if __name__ == "__main__":
    # 加载数据
    data = load_wine_data()
    features = list(range(len(data[0][0])))

    # 划分训练集和测试集
    random.shuffle(data)
    split_index = int(len(data) * 0.8)
    train_data, test_data = data[:split_index], data[split_index:]

    # 构造决策树
    tree = build_tree(train_data, features)

    # 测试决策树
    correct = 0
    for features, label in test_data:
        prediction = classify(tree, features)
        if prediction == label:
            correct += 1

    accuracy = correct / len(test_data)
    print(f"决策树分类准确率: {accuracy * 100:.2f}%")
