import math
from collections import Counter
import random
import graphviz


# 加载Wine数据集
def load_wine_data():
    data = []
    with open('wine.data', 'r') as f:
        for line in f:
            row = line.strip().split(',')
            label = row[0]  # 第一列为类别
            features = list(map(float, row[1:]))
            data.append((features, label))
    return data


# 计算信息熵
def entropy(labels):
    total = len(labels)
    counts = Counter(labels)
    return -sum((count / total) * math.log2(count / total) for count in counts.values())


# 按特征值划分数据集
def split_dataset(data, feature_index, threshold):
    left = []
    right = []
    for features, label in data:
        if features[feature_index] <= threshold:
            left.append((features, label))
        else:
            right.append((features, label))
    return left, right


# 计算信息增益
def information_gain(data, feature_index, threshold):
    total_entropy = entropy([label for _, label in data])
    left, right = split_dataset(data, feature_index, threshold)
    p_left = len(left) / len(data)
    p_right = len(right) / len(data)
    gain = total_entropy - (p_left * entropy([label for _, label in left]) +
                            p_right * entropy([label for _, label in right]))
    return gain


# 构造决策树
def build_tree(data, features):
    labels = [label for _, label in data]
    # 如果所有标签相同，则返回叶子节点
    if len(set(labels)) == 1:
        return labels[0]
    # 如果没有特征可分裂，则返回最多的类别
    if not features:
        return Counter(labels).most_common(1)[0][0]

    # 选择最佳分裂特征
    best_feature = None
    best_threshold = None
    best_gain = -1
    for feature_index in features:
        thresholds = set(features[feature_index] for features, _ in data)
        for threshold in thresholds:
            gain = information_gain(data, feature_index, threshold)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = threshold

    # 分裂数据集
    left, right = split_dataset(data, best_feature, best_threshold)
    if not left or not right:
        return Counter(labels).most_common(1)[0][0]

    remaining_features = features.copy()
    remaining_features.remove(best_feature)

    # 创建子树
    return {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': build_tree(left, remaining_features),
        'right': build_tree(right, remaining_features)
    }


# 分类函数
def classify(tree, sample):
    if isinstance(tree, str):
        return tree
    feature = tree['feature']
    threshold = tree['threshold']
    if sample[feature] <= threshold:
        return classify(tree['left'], sample)
    else:
        return classify(tree['right'], sample)


# 可视化决策树的函数
def visualize_tree(tree, features, dot=None):
    if dot is None:
        dot = graphviz.Digraph()
    if isinstance(tree, str):
        dot.node(str(id(tree)), label=tree)
        return dot
    feature_name = features[tree['feature']]
    threshold = tree['threshold']
    dot.node(str(id(tree)), label=f"{feature_name} <= {threshold}")
    dot.edge(str(id(tree)), str(id(tree['left'])), label="True")
    dot.edge(str(id(tree)), str(id(tree['right'])), label="False")
    dot = visualize_tree(tree['left'], features, dot)
    dot = visualize_tree(tree['right'], features, dot)
    return dot


# 主函数
if __name__ == "__main__":
    # 加载数据
    data = load_wine_data()
    features = list(range(len(data[0][0])))

    # 划分训练集和测试集
    random.shuffle(data)
    split_index = int(len(data) * 0.8)
    train_data, test_data = data[:split_index], data[split_index:]

    # 构造决策树
    tree = build_tree(train_data, features)

    # 可视化决策树
    dot = visualize_tree(tree, features)
    dot.render('decision_tree', format='png', view=True)

    # 用于统计每个类别预测正确的数量
    correct_per_class = Counter()
    # 用于统计每个类别在测试集中出现的总数量
    total_per_class = Counter()

    # 测试决策树，同时统计每个类别的相关情况
    for features, label in test_data:
        prediction = classify(tree, features)
        if prediction == label:
            correct_per_class[label] += 1
        total_per_class[label] += 1

    # 计算总体准确率
    overall_correct = sum(correct_per_class.values())
    overall_accuracy = overall_correct / len(test_data) * 100
    print(f"决策树总体分类准确率: {overall_accuracy:.2f}%")

    # 计算并输出每个类别的准确率
    for class_label in sorted(set(total_per_class.keys())):
        class_accuracy = correct_per_class[class_label] / total_per_class[class_label] * 100 if total_per_class[
                                                                                                    class_label] > 0 else 0
        print(f"类别 {class_label} 的分类准确率: {class_accuracy:.2f}%")
