import numpy as np
import pandas as pd
from collections import Counter


# 计算信息熵
def entropy(y):
    class_counts = Counter(y)
    total = len(y)
    return -sum((count / total) * np.log2(count / total) for count in class_counts.values())


# 计算特征的信息增益
def information_gain(X, y, feature_index):
    feature_values = X[:, feature_index]
    total_entropy = entropy(y)
    # 根据特征值将数据集划分
    unique_values = np.unique(feature_values)
    weighted_entropy = 0
    for value in unique_values:
        subset = y[feature_values == value]
        weighted_entropy += (len(subset) / len(y)) * entropy(subset)
    return total_entropy - weighted_entropy


# ID3算法（决策树）
def id3(X, y, features):
    if len(np.unique(y)) == 1:  # 如果所有标签相同，返回该标签
        return y[0]
    if len(features) == 0:  # 如果没有特征可分，返回出现最多的标签
        return Counter(y).most_common(1)[0][0]

    # 选择信息增益最大的特征
    gains = [information_gain(X, y, i) for i in range(X.shape[1])]
    best_feature_index = np.argmax(gains)

    tree = {features[best_feature_index]: {}}
    unique_values = np.unique(X[:, best_feature_index])

    for value in unique_values:
        subset_X = X[X[:, best_feature_index] == value]
        subset_y = y[X[:, best_feature_index] == value]
        subtree = id3(subset_X, subset_y, [f for i, f in enumerate(features) if i != best_feature_index])
        tree[features[best_feature_index]][value] = subtree

    return tree


# 读取Wine数据集
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
data = pd.read_csv(url, header=None)

# 选择特定类别数据
X = data.iloc[:, 1:].values  # 特征
y = data.iloc[:, 0].values  # 标签
features = [f"Feature{i}" for i in range(1, X.shape[1] + 1)]

# 训练ID3决策树
tree = id3(X, y, features)
print("ID3生成的决策树：")
print(tree)
