import random
import math


# 加载Wine数据集（第一列为类别）
def load_wine_data(filename):
    data = []
    labels = []
    with open(filename, 'r') as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            parts = line.split(',')
            label = int(parts[0]) - 1  # 原类别1,2,3映射为0,1,2（方便后续处理）
            features = [float(p) for p in parts[1:]]  # 13个特征
            data.append(features)
            labels.append(label)
    return data, labels


# 计算信息熵
def entropy(labels):
    class_counts = {}
    for label in labels:
        class_counts[label] = class_counts.get(label, 0) + 1
    ent = 0.0
    total = len(labels)
    for count in class_counts.values():
        p = count / total
        ent -= p * math.log2(p) if p != 0 else 0
    return ent


# 连续特征的最佳分割点及信息增益
def split_continuous_feature(data, labels, feature_idx):
    values = [data[i][feature_idx] for i in range(len(data))]
    unique_vals = sorted(list(set(values)))
    thresholds = [(unique_vals[i - 1] + unique_vals[i]) / 2 for i in range(1, len(unique_vals))]
    if not thresholds:
        return None, -1  # 无法分割

    max_gain = -1
    best_threshold = None
    original_entropy = entropy(labels)

    for threshold in thresholds:
        left_labels = [labels[i] for i in range(len(data)) if data[i][feature_idx] <= threshold]
        right_labels = [labels[i] for i in range(len(data)) if data[i][feature_idx] > threshold]
        p_left = len(left_labels) / len(labels)
        p_right = len(right_labels) / len(labels)
        cond_entropy = p_left * entropy(left_labels) + p_right * entropy(right_labels)
        gain = original_entropy - cond_entropy

        if gain > max_gain:
            max_gain = gain
            best_threshold = threshold
    return best_threshold, max_gain


# 选择最佳分裂特征
def choose_best_feature(data, labels):
    num_features = len(data[0]) if data else 0
    max_gain = -1
    best_feature_idx = -1
    best_threshold = None

    for i in range(num_features):
        threshold, gain = split_continuous_feature(data, labels, i)
        if gain > max_gain:
            max_gain = gain
            best_feature_idx = i
            best_threshold = threshold
    return best_feature_idx, best_threshold, max_gain


# 多数投票（叶子节点类别）
def majority_vote(labels):
    class_counts = {}
    for label in labels:
        class_counts[label] = class_counts.get(label, 0) + 1
    return max(class_counts, key=class_counts.get)


# 递归构建决策树
def build_tree(data, labels):
    if len(set(labels)) == 1:  # 所有样本同类别
        return labels[0]
    if not data[0]:  # 无特征可分
        return majority_vote(labels)

    best_idx, best_thresh, max_gain = choose_best_feature(data, labels)
    if max_gain <= 0:  # 信息增益不显著
        return majority_vote(labels)

    # 分裂为左右子树
    left_data, left_labels = [], []
    right_data, right_labels = [], []
    for i in range(len(data)):
        if data[i][best_idx] <= best_thresh:
            left_data.append(data[i])
            left_labels.append(labels[i])
        else:
            right_data.append(data[i])
            right_labels.append(labels[i])

    tree = {
        'feature': best_idx,
        'threshold': best_thresh,
        'left': build_tree(left_data, left_labels),
        'right': build_tree(right_data, right_labels)
    }
    return tree


# 预测样本类别
def predict_sample(tree, sample):
    if not isinstance(tree, dict):  # 叶子节点
        return tree
    if sample[tree['feature']] <= tree['threshold']:
        return predict_sample(tree['left'], sample)
    else:
        return predict_sample(tree['right'], sample)


# 计算准确率
def calculate_accuracy(predictions, true_labels):
    correct = sum(p == t for p, t in zip(predictions, true_labels))
    return correct / len(predictions)


# 主函数（添加了打印具体分类结果的代码）
def main():
    data, labels = load_wine_data('wine.data')
    # 划分训练集（80%）和测试集（20%）
    combined = list(zip(data, labels))
    random.shuffle(combined)
    data_shuffled, labels_shuffled = zip(*combined)
    split_idx = int(len(data) * 0.8)

    train_data = list(data_shuffled[:split_idx])
    train_labels = list(labels_shuffled[:split_idx])
    test_data = list(data_shuffled[split_idx:])
    test_labels = list(labels_shuffled[split_idx:])

    # 构建树并预测
    tree = build_tree(train_data, train_labels)
    predictions = [predict_sample(tree, sample) for sample in test_data]

    # 打印具体分类结果（预测标签与真实标签）
    print("===== 具体分类结果 =====")
    print(f"测试样本数量：{len(test_data)}")
    print(f"预测标签（0/1/2对应原类别1/2/3）：{predictions}")
    print(f"真实标签（0/1/2对应原类别1/2/3）：{test_labels}")

    # 打印准确率
    accuracy = calculate_accuracy(predictions, test_labels)
    print("\n===== 分类效果评估 =====")
    print(f"ID3决策树测试集准确率：{accuracy:.4f}")


if __name__ == "__main__":
    main()