import random
import math
from urllib.request import urlopen
from collections import Counter

# 1. 加载Wine数据集
def load_wine_data():
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
    try:
        with urlopen(url) as response:
            data_str = response.read().decode('utf-8').strip()
        lines = data_str.split('\n')
    except Exception as e:
        print(f"数据加载失败：{e}，请检查网络后重试")
        return [], []
    
    X, y = [], []
    for line in lines:
        line = line.strip()
        if not line:
            continue
        parts = list(map(float, line.split(',')))
        y.append(parts[0])
        X.append(parts[1:])
    return X, y

# 2. 划分训练集和测试集
def train_test_split(X, y, test_size=0.2, random_state=42):
    random.seed(random_state)
    n_samples = len(X)
    test_indices = random.sample(range(n_samples), int(n_samples * test_size))
    
    X_train, X_test = [], []
    y_train, y_test = [], []
    for i in range(n_samples):
        if i in test_indices:
            X_test.append(X[i])
            y_test.append(y[i])
        else:
            X_train.append(X[i])
            y_train.append(y[i])
    return X_train, X_test, y_train, y_test

# 3. 连续特征离散化
def discretize_features(X, n_bins=3):
    if not X:
        return []
    n_samples, n_features = len(X), len(X[0])
    X_disc = [[] for _ in range(n_samples)]
    
    for i in range(n_features):
        feature_vals = [X[j][i] for j in range(n_samples)]
        sorted_vals = sorted(feature_vals)
        bins = [sorted_vals[int((b / n_bins) * n_samples)] for b in range(1, n_bins)]
        
        for j in range(n_samples):
            val = feature_vals[j]
            disc_val = 0
            for b in bins:
                if val > b:
                    disc_val += 1
                else:
                    break
            X_disc[j].append(disc_val)
    return X_disc

# 4. ID3决策树核心实现（修正版）
class ID3DecisionTree:
    def __init__(self, max_depth=5):
        self.tree = None
        self.max_depth = max_depth

    def _entropy(self, y):
        counter = Counter(y)
        entropy = 0.0
        for count in counter.values():
            p = count / len(y)
            if p > 0:
                entropy -= p * math.log2(p)
        return entropy

    def _info_gain(self, X, y, feature_idx):
        parent_entropy = self._entropy(y)
        feature_values = [X[i][feature_idx] for i in range(len(X))]
        unique_vals = set(feature_values)
        
        child_entropy = 0.0
        for val in unique_vals:
            subset_y = [y[i] for i in range(len(y)) if X[i][feature_idx] == val]
            child_entropy += (len(subset_y) / len(y)) * self._entropy(subset_y)
        return parent_entropy - child_entropy

    def _best_feature(self, X, y):
        n_features = len(X[0]) if X else 0
        gains = [self._info_gain(X, y, i) for i in range(n_features)]
        return gains.index(max(gains))

    def _majority_vote(self, y):
        """确保输入y是纯类别标签列表（无字典）"""
        return Counter(y).most_common(1)[0][0]

    # 新增：从子树中提取所有叶子节点的类别（递归）
    def _extract_leaves(self, subtree):
        leaves = []
        if isinstance(subtree, dict):  # 如果是内部节点（字典），递归遍历
            for val in subtree.values():
                leaves.extend(self._extract_leaves(val))
        else:  # 如果是叶子节点（类别标签），直接收集
            leaves.append(subtree)
        return leaves

    def _build_tree(self, X, y, features, depth):
        if len(set(y)) == 1:
            return y[0]
        if not features or depth >= self.max_depth:
            return self._majority_vote(y)
        
        best_idx = self._best_feature(X, y)
        best_feature = features[best_idx]
        tree = {best_feature: {}}
        remaining_features = [f for i, f in enumerate(features) if i != best_idx]
        
        for val in set(X[i][best_idx] for i in range(len(X))):
            subset_X, subset_y = [], []
            for i in range(len(X)):
                if X[i][best_idx] == val:
                    new_x = [X[i][j] for j in range(len(X[i])) if j != best_idx]
                    subset_X.append(new_x)
                    subset_y.append(y[i])
            tree[best_feature][val] = self._build_tree(
                subset_X, subset_y, remaining_features, depth + 1
            )
        return tree

    def fit(self, X, y):
        features = list(range(len(X[0]))) if X else []
        self.tree = self._build_tree(X, y, features, depth=0)

    def _predict_single(self, x, tree):
        if not isinstance(tree, dict):  # 叶子节点，直接返回类别
            return tree
        
        feature = next(iter(tree))
        val = x[feature]
        
        if val in tree[feature]:  # 特征值在训练集中见过，继续遍历子树
            return self._predict_single(x, tree[feature][val])
        else:  # 特征值未见过：提取子树所有叶子节点的类别，再多数表决
            leaves = self._extract_leaves(tree[feature])  # 关键修正：只取叶子节点
            return self._majority_vote(leaves)

    def predict(self, X):
        return [self._predict_single(x, self.tree) for x in X]

    def accuracy(self, X, y):
        y_pred = self.predict(X)
        correct = sum(1 for p, t in zip(y_pred, y) if p == t)
        return correct / len(y) if len(y) > 0 else 0.0

# 5. 运行测试
if __name__ == "__main__":
    X, y = load_wine_data()
    if not X:
        exit()
    print(f"成功加载Wine数据集：{len(X)}个样本，13个特征")
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    print(f"训练集：{len(X_train)}个样本，测试集：{len(X_test)}个样本")
    
    X_train_disc = discretize_features(X_train, n_bins=3)
    X_test_disc = discretize_features(X_test, n_bins=3)
    
    id3 = ID3DecisionTree(max_depth=5)
    id3.fit(X_train_disc, y_train)
    print("ID3决策树训练完成")
    
    train_acc = id3.accuracy(X_train_disc, y_train)
    test_acc = id3.accuracy(X_test_disc, y_test)
    
    print("\n===== ID3决策树分类结果 =====")
    print(f"训练集准确率：{train_acc:.4f}")
    print(f"测试集准确率：{test_acc:.4f}")
