import pandas as pd
import numpy as np
from collections import Counter

# 计算信息熵
def entropy(y):
    count = Counter(y)
    total = len(y)
    return -sum((freq/total) * np.log2(freq/total) for freq in count.values())

# 计算信息增益
def information_gain(X, y, feature_index):
    # 计算特征的所有可能值
    values = np.unique(X[:, feature_index])
    
    # 计算特征的条件熵
    total_entropy = entropy(y)
    weighted_entropy = 0
    for value in values:
        subset_y = y[X[:, feature_index] == value]
        weighted_entropy += (len(subset_y) / len(X)) * entropy(subset_y)
    
    return total_entropy - weighted_entropy

# 构建决策树
def id3(X, y, feature_names):
    # 如果所有样本都属于同一类别，返回叶节点
    if len(np.unique(y)) == 1:
        return np.unique(y)[0]
    
    # 如果没有特征可供选择，返回最多的类别
    if len(X[0]) == 0:
        return Counter(y).most_common(1)[0][0]
    
    # 选择信息增益最大的特征
    gains = [information_gain(X, y, i) for i in range(X.shape[1])]
    best_feature_index = np.argmax(gains)
    best_feature_name = feature_names[best_feature_index]
    
    tree = {best_feature_name: {}}
    
    # 获取该特征的所有值，并递归构建子树
    values = np.unique(X[:, best_feature_index])
    for value in values:
        subset_X = X[X[:, best_feature_index] == value]
        subset_y = y[X[:, best_feature_index] == value]
        
        # 去掉已选择的特征
        subset_X = np.delete(subset_X, best_feature_index, axis=1)
        
        # 递归构建树
        tree[best_feature_name][value] = id3(subset_X, subset_y, np.delete(feature_names, best_feature_index))
    
    return tree

# 预测函数
def predict(tree, sample):
    if isinstance(tree, dict):
        feature = list(tree.keys())[0]
        feature_value = sample[feature]
        return predict(tree[feature][feature_value], sample)
    else:
        return tree

# 载入Wine数据集
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
columns = ["Class", "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium",
           "Total phenols", "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins",
           "Color intensity", "Hue", "OD280/OD315 of diluted wines", "Proline"]
data = pd.read_csv(url, header=None, names=columns)

# 过滤出Class为1和2的数据
filtered_data = data[data['Class'].isin([1, 2])]
X = filtered_data.iloc[:, 1:].values  # 特征
y = filtered_data['Class'].values    # 标签
feature_names = np.array(columns[1:])  # 特征名称

# 构建ID3决策树
tree = id3(X, y, feature_names)
print("ID3 Decision Tree:\n", tree)

# 进行预测
sample = X[0]  # 选择一个样本
prediction = predict(tree, sample)
print(f"Prediction for the first sample: {prediction}")