import math
from collections import Counter
import random
import graphviz


# 加载Wine数据集，并进行初步处理
def read_wine_dataset():
    dataset = []
    with open('wine.data', 'r') as file:
        for line in file:
            values = line.strip().split(',')
            class_label = int(values[0])
            attributes = list(map(float, values[1:]))
            dataset.append((attributes, class_label))
    return dataset


# 类别映射到中文
class_mapping = {1: '第一类', 2: '第二类', 3: '第三类'}


# 计算给定标签列表的香农熵
def compute_shannon_entropy(labels):
    label_counts = Counter(labels)
    entropy_value = 0.0
    for count in label_counts.values():
        prob = count / len(labels)
        entropy_value -= prob * math.log2(prob)
    return entropy_value


# 根据特征索引和阈值分割数据集
def divide_dataset(dataset, feature_idx, threshold):
    subset_left = [(features, label) for features, label in dataset if features[feature_idx] <= threshold]
    subset_right = [(features, label) for features, label in dataset if features[feature_idx] > threshold]
    return subset_left, subset_right


# 计算信息增益
def calc_information_gain(dataset, feature_idx, threshold):
    initial_entropy = compute_shannon_entropy([label for _, label in dataset])
    subset_left, subset_right = divide_dataset(dataset, feature_idx, threshold)

    if not subset_left or not subset_right:
        return 0

    probability_left = len(subset_left) / len(dataset)
    probability_right = len(subset_right) / len(dataset)

    gain = initial_entropy - (probability_left * compute_shannon_entropy([label for _, label in subset_left]) +
                              probability_right * compute_shannon_entropy([label for _, label in subset_right]))
    return gain


# 构建决策树模型
def construct_decision_tree(dataset, available_features):
    labels = [label for _, label in dataset]

    # 如果所有实例都属于同一类别，则返回该类别的叶节点
    if len(set(labels)) == 1:
        return {'class': class_mapping[labels[0]]}

    # 如果没有可用特征或所有实例具有相同的属性值，则返回出现次数最多的类别作为叶节点
    if not available_features or all(len(set([row[i] for row, _ in dataset])) == 1 for i in available_features):
        most_common_class = Counter(labels).most_common(1)[0][0]
        return {'class': class_mapping[most_common_class]}

    best_feature, best_threshold, max_gain = None, None, -1

    for feature_index in available_features:
        unique_values = set([row[feature_index] for row, _ in dataset])
        for value in unique_values:
            gain = calc_information_gain(dataset, feature_index, value)
            if gain > max_gain:
                max_gain, best_feature, best_threshold = gain, feature_index, value

    # 分割数据集并递归构建子树
    left_subset, right_subset = divide_dataset(dataset, best_feature, best_threshold)
    remaining_features = [f for f in available_features if f != best_feature]

    decision_node = {
        'feature': best_feature,
        'threshold': best_threshold,
        'left': construct_decision_tree(left_subset, remaining_features),
        'right': construct_decision_tree(right_subset, remaining_features)
    }

    return decision_node


# 对新样本进行分类预测
def predict(tree, sample):
    if 'class' in tree:
        return tree['class']
    else:
        feature_val = sample[tree['feature']]
        branch = 'left' if feature_val <= tree['threshold'] else 'right'
        return predict(tree[branch], sample)


# 可视化决策树
def render_decision_tree(tree, attribute_names, dot=None):
    if dot is None:
        dot = graphviz.Digraph()
    if 'class' in tree:
        dot.node(str(id(tree)), label=f"类别: {tree['class']}")
        return dot

    feature_name = attribute_names[tree['feature']]
    dot.node(str(id(tree)), label=f"{feature_name} <= {tree['threshold']:.2f}")
    dot.edge(str(id(tree)), str(id(tree['left'])), label='是')
    dot.edge(str(id(tree)), str(id(tree['right'])), label='否')

    render_decision_tree(tree['left'], attribute_names, dot)
    render_decision_tree(tree['right'], attribute_names, dot)

    return dot


# 计算混淆矩阵
def confusion_matrix(true_labels, predictions):
    cm = {}
    for true, pred in zip(true_labels, predictions):
        if true not in cm:
            cm[true] = Counter()
        cm[true][pred] += 1
    return cm


if __name__ == "__main__":
    # 数据预处理
    wine_data = read_wine_dataset()
    attribute_indices = list(range(len(wine_data[0][0])))

    # 随机划分训练集和测试集
    random.shuffle(wine_data)
    split_point = int(len(wine_data) * 0.8)
    training_set, testing_set = wine_data[:split_point], wine_data[split_point:]

    # 建立决策树模型
    decision_tree = construct_decision_tree(training_set, attribute_indices)

    # 展示决策树结构
    attribute_names = ['酒精含量', '苹果酸', '灰分', '碱性灰分', '镁',
                       '总酚', '黄烷类', '非黄烷类酚', '原花青素',
                       '颜色强度', '色调', 'OD280/OD315稀释葡萄酒', '脯氨酸']
    visualization = render_decision_tree(decision_tree, attribute_names)
    visualization.render('decision_tree_visualization', format='png', cleanup=True)

    # 测试模型性能
    true_labels = []
    predictions = []
    for sample, true_label in testing_set:
        predicted_label = predict(decision_tree, sample)
        true_labels.append(class_mapping[true_label])
        predictions.append(predicted_label)

    overall_accuracy = sum(1 for tl, pl in zip(true_labels, predictions) if tl == pl) / len(testing_set) * 100
    print(f"决策树总体分类准确率: {overall_accuracy:.2f}%")

    # 输出每个类别的准确率
    cm = confusion_matrix(true_labels, predictions)
    for true_class, preds in cm.items():
        total = sum(preds.values())
        correct = preds.get(true_class, 0)
        class_acc = correct / total * 100 if total > 0 else 0
        print(f"类别 {true_class} 的分类准确率: {class_acc:.2f}%")

    # 打印混淆矩阵
    print("\n混淆矩阵:")
    for true_class, preds in sorted(cm.items()):
        print(f"真实类别: {true_class}")
        for pred_class, count in sorted(preds.items()):
            print(f"\t预测为 {pred_class}: {count} 次")