import numpy as np
import pandas as pd
import math

class Node:
    def __init__(self, label=None, attribute=None, children=None):
        self.label = label
        self.attribute = attribute
        self.children = children if children is not None else {}

def entropy(y):
    class_counts = np.bincount(y)
    class_probs = class_counts / len(y)
    entropy = -np.sum(class_probs * np.log2(class_probs + 1e-10))
    return entropy

def information_gain(X, y, attribute):
    total_entropy = entropy(y)

    attribute_values = X[attribute].unique()
    weighted_entropy = 0

    for value in attribute_values:
        subset_X = X[X[attribute] == value]
        subset_y = y[X[attribute] == value]
        subset_entropy = entropy(subset_y)
        weighted_entropy += len(subset_y) / len(y) * subset_entropy

    return total_entropy - weighted_entropy

def choose_best_attribute(X, y):
    best_gain = 0
    best_attribute = None

    for attribute in X.columns:
        gain = information_gain(X, y, attribute)
        if gain > best_gain:
            best_gain = gain
            best_attribute = attribute

    return best_attribute

def build_decision_tree(X, y, max_depth=None):
    node = Node()

    if len(np.unique(y)) == 1 or (max_depth is not None and max_depth == 0):
        node.label = np.argmax(np.bincount(y))
        return node

    best_attribute = choose_best_attribute(X, y)
    node.attribute = best_attribute

    attribute_values = X[best_attribute].unique()
    for value in attribute_values:
        subset_X = X[X[best_attribute] == value]
        subset_y = y[X[best_attribute] == value]

        if len(subset_X) == 0:
            node.children[value] = np.argmax(np.bincount(y))
        else:
            node.children[value] = build_decision_tree(subset_X.drop(best_attribute, axis=1), subset_y, max_depth=max_depth - 1)

    return node

def predict_sample(x, tree):
    if tree.label is not None:
        return tree.label

    attribute_value = x[tree.attribute]
    if attribute_value not in tree.children:
        return np.argmax(np.bincount(y_train))

    return predict_sample(x, tree.children[attribute_value])

def predict(X_test, tree):
    predictions = []
    for _, x in X_test.iterrows():
        predictions.append(predict_sample(x, tree))
    return predictions

# 读取数据集
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
names = ["Class", "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium", "Total phenols",
         "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins", "Color intensity", "Hue",
         "OD280/OD315 of diluted wines", "Proline"]
data = pd.read_csv(url, names=names)

# 划分训练集和测试集
def train_test_split(X, y, test_size=0.2, random_state=None):
    if random_state is not None:
        np.random.seed(random_state)

    indices = np.random.permutation(len(X))
    test_size = int(len(X) * test_size)
    test_indices = indices[:test_size]
    train_indices = indices[test_size:]

    X_train, X_test = X.iloc[train_indices], X.iloc[test_indices]
    y_train, y_test = y.iloc[train_indices], y.iloc[test_indices]

    return X_train, X_test, y_train, y_test

# 使用划分函数
X_train, X_test, y_train, y_test = train_test_split(data.iloc[:, 1:], data.iloc[:, 0], test_size=0.2, random_state=42)

# 构建决策树
tree = build_decision_tree(X_train, y_train, max_depth=5)

# 预测测试集
y_pred = predict(X_test, tree)

# 计算分类准确度
acc = (y_pred == y_test).mean()
print(f"Accuracy: {acc}")

# 输出测试集的真实类别和预测类别
print("True Class   Predicted Class")
for true_class, predicted_class in zip(y_test, y_pred):
    print(f"{true_class:<12} {predicted_class}")