import math
import csv
import os
from collections import Counter
import random

# 1. 加载Wine数据集（四分位数离散化特征）
def load_wine(file_path):
    # 特征名称（13个特征）
    features = [
        'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash',
        'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols',
        'Proanthocyanins', 'Color intensity', 'Hue',
        'OD280/OD315', 'Proline'
    ]
    data = []  # 存储原始数据 (特征列表, 标签)
    
    # 读取原始数据
    with open(file_path, 'r') as f:
        reader = csv.reader(f)
        for row in reader:
            if len(row) != 14:
                continue  # 跳过无效行
            try:
                label = int(row[0])
                attrs = list(map(float, row[1:14]))  # 13个特征
                data.append((attrs, label))
            except ValueError:
                continue
    
    # 按四分位数离散化连续特征（4个区间）
    discretized_data = []
    for i in range(13):  # 对每个特征计算四分位数
        # 提取该特征的所有值
        values = [d[0][i] for d in data]
        values.sort()
        q1 = values[int(len(values)*0.25)]  # 下四分位
        q2 = values[int(len(values)*0.5)]   # 中位数
        q3 = values[int(len(values)*0.75)]  # 上四分位
        
        # 离散化当前特征
        for idx in range(len(data)):
            val = data[idx][0][i]
            if val <= q1:
                data[idx][0][i] = f"{features[i]}_1"  # 区间1
            elif val <= q2:
                data[idx][0][i] = f"{features[i]}_2"  # 区间2
            elif val <= q3:
                data[idx][0][i] = f"{features[i]}_3"  # 区间3
            else:
                data[idx][0][i] = f"{features[i]}_4"  # 区间4
    
    # 转换为 (特征字典, 标签) 格式
    return [(dict(zip(features, d[0])), d[1]) for d in data], features

# 2. 随机划分训练集和测试集
def split_data(data, test_ratio=0.3):
    random.shuffle(data)
    split = int(len(data)*(1-test_ratio))
    return data[:split], data[split:]

# 3. 计算信息熵
def entropy(labels):
    counts = Counter(labels)
    total = len(labels)
    return -sum((count/total)*math.log2(count/total) for count in counts.values())

# 4. 计算信息增益
def info_gain(data, feature):
    # 原始熵
    original_entropy = entropy([d[1] for d in data])
    # 按特征值分组
    groups = {}
    for d in data:
        val = d[0][feature]
        if val not in groups:
            groups[val] = []
        groups[val].append(d)
    # 加权熵
    weighted_entropy = sum((len(g)/len(data))*entropy([x[1] for x in g]) for g in groups.values())
    return original_entropy - weighted_entropy

# 5. 选择最佳划分特征
def best_feature(data, features):
    gains = {f: info_gain(data, f) for f in features}
    return max(gains, key=gains.get)

# 6. 多数投票
def vote(labels):
    return Counter(labels).most_common(1)[0][0]

# 7. 构建ID3决策树
def build_tree(data, features, max_depth=4):
    labels = [d[1] for d in data]
    
    # 终止条件1：所有样本同类别
    if len(set(labels)) == 1:
        return labels[0]
    # 终止条件2：无特征或达到最大深度
    if not features or max_depth == 0:
        return vote(labels)
    
    # 选择最佳特征
    best_feat = best_feature(data, features)
    tree = {best_feat: {}}
    remaining_feats = [f for f in features if f != best_feat]
    
    # 按特征值构建子树
    for val in set(d[0][best_feat] for d in data):
        subset = [d for d in data if d[0][best_feat] == val]
        tree[best_feat][val] = build_tree(subset, remaining_feats, max_depth-1)
    
    return tree

# 8. 预测函数
def predict(tree, sample):
    if not isinstance(tree, dict):
        return tree
    feat = next(iter(tree.keys()))
    val = sample[0][feat]
    # 处理未知特征值
    if val not in tree[feat]:
        return vote([v for v in tree[feat].values() if not isinstance(v, dict)])
    return predict(tree[feat][val], sample)

# 9. 计算准确率
def accuracy(tree, test_data):
    correct = 0
    for sample in test_data:
        if predict(tree, sample) == sample[1]:
            correct += 1
    return correct / len(test_data)

# 10. 主函数
def main():
    file_path = r'D:\Python学习\python01\id3_wine\wine.data'
    
    # 检查文件
    if not os.path.exists(file_path):
        print(f"错误：文件不存在 - {file_path}")
        print("请下载：https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data")
        return
    
    # 加载数据
    data, features = load_wine(file_path)
    if not data:
        print("错误：未加载到有效数据")
        return
    print(f"加载完成：{len(data)}个样本，{len(features)}个特征")
    
    # 划分数据集
    train, test = split_data(data)
    print(f"训练集：{len(train)}个，测试集：{len(test)}个")
    
    # 构建树
    tree = build_tree(train, features, max_depth=4)
    print("决策树构建完成")
    
    # 评估
    train_acc = accuracy(tree, train)
    test_acc = accuracy(tree, test)
    print(f"训练集准确率：{train_acc:.4f}")
    print(f"测试集准确率：{test_acc:.4f}")

if __name__ == "__main__":
    main()