import pandas as pd
import numpy as np


# 加载数据集
def load_data(file_path):
    column_names = ['Class', 'Alcohol', 'Malic_Acid', 'Ash', 'Alcalinity', 'Magnesium',
                    'Total_Phenols', 'Flavanoids', 'Nonflavanoid_Phenols',
                    'Proanthocyanins', 'Color_Intensity', 'Hue', 'OD280_OD315', 'Proline']
    data = pd.read_csv(file_path, names=column_names)
    return data


# 计算信息熵
def entropy(y):
    value_counts = np.bincount(y)
    probabilities = value_counts / len(y)
    return -np.sum(probabilities * np.log2(probabilities + 1e-9))  # 防止对数为零


# 计算信息增益
def information_gain(X, y, feature_index):
    total_entropy = entropy(y)
    values, counts = np.unique(X[:, feature_index], return_counts=True)
    weighted_entropy = 0.0

    for value, count in zip(values, counts):
        subset = y[X[:, feature_index] == value]
        weighted_entropy += (count / len(y)) * entropy(subset)

    return total_entropy - weighted_entropy


# ID3 决策树构建
class DecisionTree:
    def __init__(self):
        self.tree = None

    def fit(self, X, y):
        self.tree = self._build_tree(X, y)

    def _build_tree(self, X, y):
        # 如果所有标签相同，返回该标签
        if len(set(y)) == 1:
            return y[0]

        num_features = X.shape[1]
        best_gain = -1
        best_feature = -1

        # 寻找最佳特征
        for feature_index in range(num_features):
            gain = information_gain(X, y, feature_index)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index

        # 如果没有增益，返回最常见的标签
        if best_gain == 0:
            return Counter(y).most_common(1)[0][0]

        # 创建树节点
        tree = {best_feature: {}}
        values = np.unique(X[:, best_feature])

        for value in values:
            subset_indices = X[:, best_feature] == value
            subset_X = X[subset_indices]
            subset_y = y[subset_indices]
            subtree = self._build_tree(subset_X, subset_y)
            tree[best_feature][value] = subtree

        return tree

    def predict(self, X):
        return [self._predict_single(sample, self.tree) for sample in X]

    def _predict_single(self, sample, tree):
        if not isinstance(tree, dict):
            return tree
        feature_index = list(tree.keys())[0]
        feature_value = sample[feature_index]
        if feature_value in tree[feature_index]:
            return self._predict_single(sample, tree[feature_index][feature_value])
        else:
            return None  # 未知值处理


# 主程序
if __name__ == "__main__":
    # 加载数据
    data = load_data('wine/wine.data')

    # 准备训练数据
    X = data.iloc[:, 1:].values  # 特征
    y = data.iloc[:, 0].values  # 标签

    # 创建决策树实例并训练
    tree = DecisionTree()
    tree.fit(X, y)

    # 进行预测
    predictions = tree.predict(X)

    # 输出结果
    accuracy = np.mean(predictions == y)
    print(f'Accuracy: {accuracy * 100:.2f}%')