import math
import csv
import random
import os
def load_dataset(filename):
    """加载Wine数据集"""
    dataset = []
    try:
        with open(filename, 'r') as csvfile:
            reader = csv.reader(csvfile, delimiter=',')
            for row in reader:
                instance = [float(value) for value in row[1:]] + [row[0]]
                dataset.append(instance)
        return dataset
    except FileNotFoundError:
        print(f"错误：找不到文件 '{filename}'")
        print(f"当前工作目录：{os.getcwd()}")
        exit(1)
    except Exception as e:
        print(f"加载文件时发生错误：{str(e)}")
        exit(1)

def split_dataset(dataset, feature_index, value):
    """根据特征索引和值分割数据集"""
    left = []
    right = []
    # 对于连续值，我们使用均值作为分割点
    # 小于等于均值的放左子集，大于均值的放右子集
    for instance in dataset:
        if instance[feature_index] <= value:
            left.append(instance)
        else:
            right.append(instance)
    return left, right


def calculate_entropy(dataset):
    """计算数据集的信息熵"""
    class_counts = {}
    for instance in dataset:
        class_label = instance[-1]
        if class_label not in class_counts:
            class_counts[class_label] = 0
        class_counts[class_label] += 1

    entropy = 0.0
    total_instances = len(dataset)
    for count in class_counts.values():
        probability = count / total_instances
        entropy -= probability * math.log2(probability)

    return entropy


def calculate_information_gain(dataset, feature_index, value):
    """计算信息增益"""
    # 计算原始数据集的熵
    original_entropy = calculate_entropy(dataset)

    # 分割数据集
    left, right = split_dataset(dataset, feature_index, value)
    if len(left) == 0 or len(right) == 0:
        return 0  # 分割无效，信息增益为0

    # 计算分割后的熵
    total_instances = len(dataset)
    left_entropy = calculate_entropy(left)
    right_entropy = calculate_entropy(right)
    split_entropy = (len(left) / total_instances) * left_entropy + (len(right) / total_instances) * right_entropy

    # 信息增益 = 原始熵 - 分割后的熵
    return original_entropy - split_entropy


def find_best_split(dataset):
    """找到最佳的分割特征和分割值"""
    num_features = len(dataset[0]) - 1  # 减去类别列
    best_gain = 0.0
    best_feature = -1
    best_value = None

    for feature_index in range(num_features):
        # 获取该特征的所有值
        feature_values = [instance[feature_index] for instance in dataset]
        # 尝试不同的分割点（这里使用所有可能值作为候选分割点）
        for value in feature_values:
            gain = calculate_information_gain(dataset, feature_index, value)
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_index
                best_value = value

    return best_feature, best_value, best_gain


def majority_class(class_list):
    """返回出现次数最多的类别"""
    class_counts = {}
    for class_label in class_list:
        if class_label not in class_counts:
            class_counts[class_label] = 0
        class_counts[class_label] += 1
    # 按出现次数排序
    sorted_classes = sorted(class_counts.items(), key=lambda x: x[1], reverse=True)
    return sorted_classes[0][0]


def build_tree(dataset, depth=0, max_depth=5):
    """递归构建决策树"""
    # 提取所有类别
    class_list = [instance[-1] for instance in dataset]

    # 终止条件1: 所有实例属于同一类别
    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]

    # 终止条件2: 达到最大深度（防止过拟合）
    if depth >= max_depth:
        return majority_class(class_list)

    # 找到最佳分割点
    best_feature, best_value, best_gain = find_best_split(dataset)

    # 终止条件3: 没有信息增益
    if best_gain <= 0:
        return majority_class(class_list)

    # 创建决策树节点
    tree = {}
    tree["feature"] = best_feature
    tree["value"] = best_value

    # 分割数据集并递归构建子树
    left, right = split_dataset(dataset, best_feature, best_value)
    tree["left"] = build_tree(left, depth + 1, max_depth)
    tree["right"] = build_tree(right, depth + 1, max_depth)

    return tree


def predict(instance, tree):
    """使用决策树进行预测"""
    # 如果是叶子节点，返回类别
    if not isinstance(tree, dict):
        return tree

    # 否则递归预测
    feature = tree["feature"]
    value = tree["value"]

    if instance[feature] <= value:
        return predict(instance, tree["left"])
    else:
        return predict(instance, tree["right"])


def get_accuracy(test_set, tree):
    """计算预测准确率"""
    correct = 0
    for instance in test_set:
        prediction = predict(instance, tree)
        if prediction == instance[-1]:
            correct += 1
    return (correct / len(test_set)) * 100


def main():
    # 加载数据集
    dataset = load_dataset('wine.data')
    random.shuffle(dataset)  # 随机打乱数据

    # 分割训练集和测试集（70%训练，30%测试）
    split_index = int(0.7 * len(dataset))
    training_set = dataset[:split_index]
    test_set = dataset[split_index:]

    print(f"训练集大小: {len(training_set)}")
    print(f"测试集大小: {len(test_set)}")

    # 构建决策树
    tree = build_tree(training_set, max_depth=5)

    # 计算准确率
    accuracy = get_accuracy(test_set, tree)
    print(f"分类准确率: {accuracy:.2f}%")


if __name__ == "__main__":
    main()
