import math
from collections import Counter
import random


# 加载Wine数据集
def load_wine_data():
    dataset = []
    try:
        with open('wine.data', 'r') as file:
            lines = file.readlines()
            for line in lines:
                parts = line.strip().split(',')
                label = parts[0]  # 第一列为类别
                feature_values = [float(val) for val in parts[1:]]
                dataset.append((feature_values, label))
    except FileNotFoundError:
        print("找不到 'wine.data' 文件，请检查文件路径是否正确。")
    return dataset


# 计算信息熵
def calculate_entropy(labels):
    label_count = len(labels)
    label_counts = Counter(labels)
    entropy_value = 0
    for count in label_counts.values():
        probability = count / label_count
        entropy_value -= probability * math.log2(probability)
    return entropy_value


# 按特征值划分数据集
def split_data(data, feature_index, threshold):
    left_data = []
    right_data = []
    for feature_vector, label in data:
        if feature_vector[feature_index] <= threshold:
            left_data.append((feature_vector, label))
        else:
            right_data.append((feature_vector, label))
    return left_data, right_data


# 计算信息增益
def calculate_information_gain(data, feature_index, threshold):
    total_entropy = calculate_entropy([label for _, label in data])
    left_data, right_data = split_data(data, feature_index, threshold)
    left_proportion = len(left_data) / len(data)
    right_proportion = len(right_data) / len(data)
    left_entropy = calculate_entropy([label for _, label in left_data])
    right_entropy = calculate_entropy([label for _, label in right_data])
    gain = total_entropy - (left_proportion * left_entropy + right_proportion * right_entropy)
    return gain


# 构造决策树
def build_decision_tree(data, feature_indices):
    # 获取所有标签
    labels = [label for _, label in data]
    # 若所有标签相同，直接返回该标签作为叶子节点
    if len(set(labels)) == 1:
        return labels[0]
    # 若没有特征可供划分，返回出现次数最多的标签
    if not feature_indices:
        return Counter(labels).most_common(1)[0][0]

    best_feature = None
    best_threshold = None
    best_gain = -1
    for feature_index in feature_indices:
        unique_values = set([features[feature_index] for features, _ in data])
        for value in unique_values:
            gain = calculate_information_gain(data, feature_index, value)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = value

    left_data, right_data = split_data(data, best_feature, best_threshold)
    if not left_data or not right_data:
        return Counter(labels).most_common(1)[0][0]

    remaining_features = feature_indices.copy()
    remaining_features.remove(best_feature)

    return {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': build_decision_tree(left_data, remaining_features),
        'left_data': left_data,  # 可添加这行用于调试等目的，查看具体划分的数据情况，可按需删除
        'right': build_decision_tree(right_data, remaining_features),
        'right_data': right_data  # 同理，可按需删除
    }


# 分类函数
def classify_sample(tree, sample):
    if isinstance(tree, str):
        return tree
    feature = tree['feature']
    threshold = tree['threshold']
    if sample[feature] <= threshold:
        return classify_sample(tree['left'], sample)
    else:
        return classify_sample(tree['right'], sample)


if __name__ == "__main__":
    # 加载数据
    wine_data = load_wine_data()
    all_feature_indices = list(range(len(wine_data[0][0])))

    # 划分训练集和测试集
    random.shuffle(wine_data)
    split_index = int(len(wine_data) * 0.8)
    train_data = wine_data[:split_index]
    test_data = wine_data[split_index:]

    # 构造决策树
    decision_tree = build_decision_tree(train_data, all_feature_indices)

    # 测试决策树
    correct_count = 0
    for features, label in test_data:
        prediction = classify_sample(decision_tree, features)
        if prediction == label:
            correct_count += 1

    accuracy = correct_count / len(test_data)
    print(f"决策树分类准确率: {accuracy * 100:.2f}%")
