import numpy as np
import urllib.request


url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
response = urllib.request.urlopen(url)
data = response.read().decode('utf-8')


lines = data.strip().split('\n')
wine_data = []
for line in lines:
    values = [float(x) for x in line.split(',')]
    wine_data.append(values)

wine_data = np.array(wine_data)
X = wine_data[:, 1:]  # 特征
y = wine_data[:, 0]  # 标签

print("Wine数据集:")
print(f"样本数: {X.shape[0]}, 特征数: {X.shape[1]}")
unique, counts = np.unique(y, return_counts=True)
for cls, count in zip(unique, counts):
    print(f"类别{int(cls)}: {count}个样本")

class SimpleID3:
    def __init__(self, max_depth=3):
        self.max_depth = max_depth
        self.tree = None

    def fit(self, X, y):
        self.tree = self._build_tree(X, y, depth=0)

    def _build_tree(self, X, y, depth):

        if len(np.unique(y)) == 1 or depth >= self.max_depth:
            return np.bincount(y.astype(int)).argmax()
        best_feature, best_threshold = self._find_best_split(X, y)

        if best_feature is None:
            return np.bincount(y.astype(int)).argmax()

        left_mask = X[:, best_feature] <= best_threshold
        right_mask = ~left_mask

        left_tree = self._build_tree(X[left_mask], y[left_mask], depth + 1)
        right_tree = self._build_tree(X[right_mask], y[right_mask], depth + 1)

        return (best_feature, best_threshold, left_tree, right_tree)

    def _find_best_split(self, X, y):
        best_gain = 0
        best_feature = None
        best_threshold = None

        for feature in range(X.shape[1]):
            thresholds = np.unique(X[:, feature])
            for threshold in thresholds:
                gain = self._information_gain(y, X[:, feature], threshold)
                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature
                    best_threshold = threshold

        return best_feature, best_threshold

    def _information_gain(self, y, feature, threshold):

        parent_entropy = self._entropy(y)

        left_mask = feature <= threshold
        right_mask = ~left_mask

        if np.sum(left_mask) == 0 or np.sum(right_mask) == 0:
            return 0

        n = len(y)
        n_left, n_right = np.sum(left_mask), np.sum(right_mask)
        e_left = self._entropy(y[left_mask])
        e_right = self._entropy(y[right_mask])

        child_entropy = (n_left / n) * e_left + (n_right / n) * e_right
        return parent_entropy - child_entropy

    def _entropy(self, y):
        counts = np.bincount(y.astype(int))
        probabilities = counts / len(y)
        return -np.sum([p * np.log2(p) for p in probabilities if p > 0])

    def predict(self, X):
        return np.array([self._predict_one(x, self.tree) for x in X])

    def _predict_one(self, x, tree):
        if not isinstance(tree, tuple):  # 叶节点
            return tree

        feature, threshold, left_tree, right_tree = tree
        if x[feature] <= threshold:
            return self._predict_one(x, left_tree)
        else:
            return self._predict_one(x, right_tree)


# 划分训练测试集
split = int(0.7 * len(X))
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]

# 训练和预测
tree = SimpleID3(max_depth=4)
tree.fit(X_train, y_train)
y_pred = tree.predict(X_test)
# 计算准确率
accuracy = np.sum(y_pred == y_test) / len(y_test)
print(f"\nID3决策树准确率: {accuracy:.2%}")

# 显示部分结果
print("\n预测结果（前10个）:")
for i in range(min(10, len(y_test))):
    correct = "正确" if y_test[i] == y_pred[i] else "错误"
    print(f"真实: {int(y_test[i])}, 预测: {int(y_pred[i])}, {correct}")