import csv
from collections import Counter
from math import log2
from statistics import mean


# 加载Wine数据集并进行预处理
def load_data(url):
    import requests
    response = requests.get(url)
    data = []
    for line in response.iter_lines():
        if line:
            row = line.decode('utf-8').split(',')
            data.append(row)
    # 将数据转换为数值型
    data = [[float(x) for x in row] for row in data]
    # 分离特征和标签
    X = [row[1:] for row in data]
    y = [int(row[0]) for row in data]
    return X, y


# 计算信息熵
def calc_entropy(data):
    label_counts = Counter(data)
    entropy = 0.0
    for count in label_counts.values():
        prob = count / len(data)
        entropy -= prob * log2(prob)
    return entropy


# 找到最佳分割点
def find_best_split(X, y):
    best_info_gain = -1
    best_feature_index = None
    best_threshold = None

    base_entropy = calc_entropy(y)

    for feature_index in range(len(X[0])):
        feature_values = [X[i][feature_index] for i in range(len(X))]
        unique_values = list(set(feature_values))
        unique_values.sort()

        for i in range(1, len(unique_values)):
            threshold = mean([unique_values[i - 1], unique_values[i]])
            left_y = [y[j] for j in range(len(X)) if X[j][feature_index] <= threshold]
            right_y = [y[j] for j in range(len(X)) if X[j][feature_index] > threshold]

            prob_left = len(left_y) / len(y)
            prob_right = len(right_y) / len(y)
            new_entropy = prob_left * calc_entropy(left_y) + prob_right * calc_entropy(right_y)
            info_gain = base_entropy - new_entropy

            if info_gain > best_info_gain:
                best_info_gain = info_gain
                best_feature_index = feature_index
                best_threshold = threshold

    return best_feature_index, best_threshold, best_info_gain


# 决策树节点类
class TreeNode:
    def __init__(self, X, y, depth=0):
        self.X = X
        self.y = y
        self.depth = depth
        self.left = None
        self.right = None
        self.feature_index = None
        self.threshold = None
        self.label = None

    def is_pure(self):
        return len(set(self.y)) == 1

    def build_tree(self):
        if self.is_pure() or not self.X or not self.y:
            self.label = Counter(self.y).most_common(1)[0][0]
            return

        self.feature_index, self.threshold, _ = find_best_split(self.X, self.y)

        if self.feature_index is None:
            self.label = Counter(self.y).most_common(1)[0][0]
            return

        left_X = [self.X[i] for i in range(len(self.X)) if self.X[i][self.feature_index] <= self.threshold]
        left_y = [self.y[i] for i in range(len(self.y)) if self.X[i][self.feature_index] <= self.threshold]
        self.left = TreeNode(left_X, left_y, self.depth + 1)
        self.left.build_tree()

        right_X = [self.X[i] for i in range(len(self.X)) if self.X[i][self.feature_index] > self.threshold]
        right_y = [self.y[i] for i in range(len(self.y)) if self.X[i][self.feature_index] > self.threshold]
        self.right = TreeNode(right_X, right_y, self.depth + 1)
        self.right.build_tree()

    def predict(self, x):
        if self.label is not None:
            return self.label
        if x[self.feature_index] <= self.threshold:
            return self.left.predict(x)
        else:
            return self.right.predict(x)


# 构建决策树
def build_decision_tree(X, y):
    root = TreeNode(X, y)
    root.build_tree()
    return root


# 测试模型
def test_model(tree, X_test, y_test):
    predictions = [tree.predict(x) for x in X_test]
    accuracy = sum([1 for p, t in zip(predictions, y_test) if p == t]) / len(y_test)
    print(f"Accuracy: {accuracy * 100:.2f}%")


# 主函数
if __name__ == "__main__":
    url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
    X, y = load_data(url)

    # 简单地将数据分为训练集和测试集（这里只是简单切分，实际应用中应该使用更科学的方法）
    split_point = int(len(X) * 0.8)
    X_train, X_test = X[:split_point], X[split_point:]
    y_train, y_test = y[:split_point], y[split_point:]

    tree = build_decision_tree(X_train, y_train)
    test_model(tree, X_test, y_test)