"""
任务3：ID3决策树算法 - Wine数据集
从零实现ID3算法，基于信息熵选择最佳特征
"""

import numpy as np
from collections import Counter
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split


def entropy(y):
    if len(y) == 0:
        return 0
    counts = np.bincount(y)
    probabilities = counts / len(y)
    return -np.sum([p * np.log2(p) for p in probabilities if p > 0])


def information_gain(X, y, feature_idx):
    total_entropy = entropy(y)
    feature_values = np.unique(X[:, feature_idx])
    weighted_entropy = 0

    for value in feature_values:
        subset_mask = X[:, feature_idx] == value
        subset_y = y[subset_mask]
        if len(subset_y) > 0:
            weight = len(subset_y) / len(y)
            weighted_entropy += weight * entropy(subset_y)

    return total_entropy - weighted_entropy


class ID3DecisionTree:
    def __init__(self, max_depth=5):
        self.max_depth = max_depth
        self.tree = {}

    def fit(self, X, y, features=None, depth=0):
        if features is None:
            features = list(range(X.shape[1]))

        if len(np.unique(y)) == 1:
            return y[0]

        if len(features) == 0 or (self.max_depth and depth >= self.max_depth):
            return Counter(y).most_common(1)[0][0]

        gains = [information_gain(X, y, f) for f in features]
        best_feature_idx = np.argmax(gains)
        best_feature = features[best_feature_idx]

        tree = {best_feature: {}}
        remaining_features = [f for f in features if f != best_feature]

        for value in np.unique(X[:, best_feature]):
            subset_mask = X[:, best_feature] == value
            subset_X = X[subset_mask]
            subset_y = y[subset_mask]

            if len(subset_y) == 0:
                tree[best_feature][value] = Counter(y).most_common(1)[0][0]
            else:
                subtree = self.fit(subset_X, subset_y, remaining_features, depth + 1)
                tree[best_feature][value] = subtree

        self.tree = tree
        return tree

    def predict(self, X):
        return np.array([self._predict(x, self.tree) for x in X])

    def _predict(self, x, tree):
        if not isinstance(tree, dict):
            return tree

        feature = list(tree.keys())[0]
        subtree = tree[feature]
        feature_value = x[feature]

        if feature_value in subtree:
            return self._predict(x, subtree[feature_value])
        else:
            return self._get_most_common_class(subtree)

    def _get_most_common_class(self, subtree):
        classes = []
        for value in subtree.values():
            if isinstance(value, dict):
                classes.extend(self._extract_classes(value))
            else:
                classes.append(value)
        return Counter(classes).most_common(1)[0][0]

    def _extract_classes(self, tree):
        classes = []
        for value in tree.values():
            if isinstance(value, dict):
                classes.extend(self._extract_classes(value))
            else:
                classes.append(value)
        return classes

    def accuracy(self, X_test, y_test):
        predictions = self.predict(X_test)
        return np.sum(predictions == y_test) / len(y_test)


def main():
    print("=== 任务3：ID3决策树 - Wine数据集 ===\n")

    wine = load_wine()
    X, y = wine.data, wine.target

    print("数据集信息:")
    print(f"- 记录数: {X.shape[0]}")
    print(f"- 属性数: {X.shape[1]}")
    print(f"- 属性: {wine.feature_names}")

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

    id3 = ID3DecisionTree(max_depth=5)
    id3.fit(X_train, y_train)
    accuracy = id3.accuracy(X_test, y_test)

    print(f"\n实验结果:")
    print(f"- ID3决策树准确率: {accuracy:.4f} ({accuracy * 100:.2f}%)")


if __name__ == "__main__":
    main()