import math
from collections import Counter
import random


def load_wine_data(file_path='wine.data'):
    """
    从指定路径加载Wine数据集，并返回特征-标签对列表。

    参数:
        file_path (str): 数据文件的路径，默认为 'wine.data'

    返回:
        list of tuple: 每个元组包含一个特征列表和一个对应的标签字符串。

    注意:
        - 文件应以逗号分隔值（CSV）格式存储。
        - 第一列为类别标签，其余列为特征值。
    """
    data = []
    try:
        with open(file_path, 'r') as f:
            for line in f:
                stripped_line = line.strip()
                if not stripped_line:  # 跳过空行
                    continue
                row = stripped_line.split(',')
                label = row[0]  # 第一列为类别
                features = list(map(float, row[1:]))  # 其余列为特征值
                data.append((features, label))
    except FileNotFoundError:
        print(f"Error: 文件 {file_path} 未找到，请检查文件路径。")
        return []
    except ValueError:
        print("Error: 文件中存在无法转换为浮点数的值，请检查数据格式。")
        return []
    return data


def entropy(labels):
    """
    计算给定标签列表的信息熵。

    参数:
        labels (list): 标签列表

    返回:
        float: 信息熵值
    """
    total = len(labels)
    counts = Counter(labels)
    return -sum((count / total) * math.log2(count / total) for count in counts.values())


def split_dataset(data, feature_index, threshold):
    """
    根据给定特征和阈值划分数据集。

    参数:
        data (list of tuples): 数据集，每个元组包含特征列表和标签
        feature_index (int): 用于分裂的特征索引
        threshold (float): 分裂阈值

    返回:
        tuple: 包含两个子数据集的元组，分别为满足条件（<=阈值）和不满足条件的数据集
    """
    left = [item for item in data if item[0][feature_index] <= threshold]
    right = [item for item in data if item[0][feature_index] > threshold]
    return left, right


def information_gain(data, feature_index, threshold):
    """
    计算给定特征和阈值的信息增益。

    参数:
        data (list of tuples): 数据集，每个元组包含特征列表和标签
        feature_index (int): 用于计算信息增益的特征索引
        threshold (float): 分裂阈值

    返回:
        float: 信息增益值
    """
    if not data:
        return 0

    total_entropy = entropy([label for _, label in data])
    left, right = split_dataset(data, feature_index, threshold)
    p_left = len(left) / len(data) if data else 0
    p_right = len(right) / len(data) if data else 0
    gain = total_entropy - (p_left * entropy([label for _, label in left]) +
                            p_right * entropy([label for _, label in right]))
    return gain


def build_tree(data, available_features):
    """
    递归地构建决策树。

    参数:
        data (list of tuples): 当前数据集，每个元组包含特征列表和标签
        available_features (set): 可用作分裂的特征集合

    返回:
        dict or str: 决策树节点或叶子节点标签
    """
    if not data:
        return "No Data"

    labels = [label for _, label in data]

    # 如果所有标签相同，则返回叶子节点
    if len(set(labels)) == 1:
        return labels[0]

    # 如果没有特征可分裂，则返回最多的类别
    if not available_features:
        return Counter(labels).most_common(1)[0][0]

    best_gain = -1
    best_feature = None
    best_threshold = None

    # 选择最佳分裂特征和阈值
    for feature_index in available_features:
        unique_values = sorted(set(sample[feature_index] for sample, _ in data))
        # 使用相邻值的平均作为阈值候选，减少计算量
        thresholds = [(unique_values[i] + unique_values[i + 1]) / 2 for i in range(len(unique_values) - 1)]
        for threshold in thresholds:
            gain = information_gain(data, feature_index, threshold)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_threshold = threshold

    # 如果找到了有效的分裂，则分裂数据集并递归构造子树
    if best_gain >= 0:
        left, right = split_dataset(data, best_feature, best_threshold)
        if not left or not right:  # 如果分裂无效，返回多数类
            return Counter(labels).most_common(1)[0][0]

        remaining_features = available_features.copy()
        remaining_features.remove(best_feature)

        return {
            'feature': best_feature,
            'threshold': best_threshold,
            'left': build_tree(left, remaining_features),
            'right': build_tree(right, remaining_features)
        }

    # 如果没有找到有效的分裂，则返回多数类
    return Counter(labels).most_common(1)[0][0]


def classify(tree, sample):
    """
    使用决策树对样本进行分类。

    参数:
        tree (dict or str): 决策树节点或叶子节点标签
        sample (list): 样本特征列表

    返回:
        str: 预测的类别标签
    """
    if isinstance(tree, str):
        return tree
    if sample[tree['feature']] <= tree['threshold']:
        return classify(tree['left'], sample)
    else:
        return classify(tree['right'], sample)


if __name__ == "__main__":
    # 加载数据
    data = load_wine_data()
    if not data:
        print("数据加载失败，程序终止。")
    else:
        # 划分训练集和测试集
        random.shuffle(data)
        split_index = int(len(data) * 0.8)
        train_data, test_data = data[:split_index], data[split_index:]

        # 确定可用的特征索引
        features = set(range(len(data[0][0])))

        # 构造决策树
        tree = build_tree(train_data, features)

        # 测试决策树
        correct_predictions = sum(classify(tree, features) == label for features, label in test_data)
        accuracy = correct_predictions / len(test_data)
        print(f"决策树分类准确率: {accuracy * 100:.2f}%")