import numpy as np
import pandas as pd

def calc_entropy(y):
    class_count = np.bincount(y)
    p = class_count / len(y)
    p = p[p > 0]
    return -np.sum(p * np.log2(p))

# 按特征划分数据集
def split_data(X, y, feat_idx, threshold):
    left_mask = X[:, feat_idx] <= threshold
    right_mask = ~left_mask
    X_left, X_right = X[left_mask], X[right_mask]
    y_left, y_right = y[left_mask], y[right_mask]
    return X_left, X_right, y_left, y_right


# 选择最优特征
def choose_best_feat(X, y):
    n_feat = X.shape[1]
    base_ent = calc_entropy(y)
    best_gain, best_idx, best_thresh = -np.inf, -1, 0

    for idx in range(n_feat):
        unique_vals = np.unique(X[:, idx])
        if len(unique_vals) > 10:
            thresholds = np.percentile(unique_vals, [25, 50, 75])
        else:
            thresholds = unique_vals
        for thresh in thresholds:
            X_left, X_right, y_left, y_right = split_data(X, y, idx, thresh)
            if len(y_left) == 0 or len(y_right) == 0:
                continue
            # 计算条件熵
            ent_left = calc_entropy(y_left)
            ent_right = calc_entropy(y_right)
            ent = (len(y_left) / len(y)) * ent_left + (len(y_right) / len(y)) * ent_right
            gain = base_ent - ent

            if gain > best_gain:
                best_gain, best_idx, best_thresh = gain, idx, thresh

    if best_gain <= 0:
        return -1, 0
    return best_idx, best_thresh

def majority_vote(y):
    return int(np.argmax(np.bincount(y)))

# 递归构建决策树
def build_tree(X, y, feat_names, depth=0, max_depth=5, min_samples_split=2):
    if len(np.unique(y)) == 1:
        return int(y[0])
    if depth >= max_depth or len(y) < min_samples_split or X.shape[1] == 0:
        return majority_vote(y)

    # 选择最优特征
    best_feat_idx, best_thresh = choose_best_feat(X, y)
    if best_feat_idx == -1:  # 没有有效特征可划分
        return majority_vote(y)

    best_feat_name = feat_names[best_feat_idx]
    tree = {best_feat_name: {}}
    X_left, X_right, y_left, y_right = split_data(X, y, best_feat_idx, best_thresh)
    if len(X_left) == 0:
        left_subtree = majority_vote(y)
    else:
        left_subtree = build_tree(X_left, y_left, feat_names, depth + 1, max_depth, min_samples_split)
    if len(X_right) == 0:
        right_subtree = majority_vote(y)
    else:
        right_subtree = build_tree(X_right, y_right, feat_names, depth + 1, max_depth, min_samples_split)

    tree[best_feat_name]['<=%.2f' % best_thresh] = left_subtree
    tree[best_feat_name]['>%.2f' % best_thresh] = right_subtree
    return tree

# 预测单一样本
def predict_sample(sample, tree, feat_names):
    if not isinstance(tree, dict):
        return tree

    feat_name = next(iter(tree.keys()))
    feat_idx = feat_names.index(feat_name)
    sample_val = sample[feat_idx]

    for branch_key in tree[feat_name].keys():
        if branch_key.startswith('<='):
            threshold = float(branch_key[2:])
            if sample_val <= threshold:
                return predict_sample(sample, tree[feat_name][branch_key], feat_names)
        elif branch_key.startswith('>'):
            threshold = float(branch_key[1:])
            if sample_val > threshold:
                return predict_sample(sample, tree[feat_name][branch_key], feat_names)

    return majority_vote(np.array([0, 1, 2]))

def predict(X, tree, feat_names):
    return [predict_sample(sample, tree, feat_names) for sample in X]

if __name__ == "__main__":
    wine_cols = ["class", "alcohol", "malic_acid", "ash", "alcalinity_of_ash",
                 "magnesium", "total_phenols", "flavanoids", "nonflavanoid_phenols",
                 "proanthocyanins", "color_intensity", "hue", "od280/od315", "proline"]
    try:
        wine_data = pd.read_csv("wine.data", header=None, names=wine_cols)
    except:
        wine_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",
                                header=None, names=wine_cols)

    wine_data["class"] = wine_data["class"] - 1
    X = wine_data.iloc[:, 1:].values  # 特征矩阵
    y = wine_data.iloc[:, 0].values  # 标签向量
    feat_names = wine_cols[1:]  # 特征名列表
    np.random.seed(42)
    indices = np.random.permutation(len(X))
    train_size = int(len(X) * 0.7)
    X_train, X_test = X[indices[:train_size]], X[indices[train_size:]]
    y_train, y_test = y[indices[:train_size]], y[indices[train_size:]]
    tree = build_tree(X_train, y_train, feat_names, max_depth=5, min_samples_split=3)
    print("ID3决策树结构（部分展示）：")
    print(tree)

    y_pred = predict(X_test, tree, feat_names)
    accuracy = np.sum(y_pred == y_test) / len(y_test)
    print(f"\n测试集准确率：{accuracy:.2%}")
    print("\n前5条测试样本预测结果：")
    for i in range(min(5, len(y_test))):
        print(f"真实类别：{y_test[i]} | 预测类别：{y_pred[i]}")