import csv
from collections import Counter, defaultdict
from math import log2


def load_dataset(filename):
    try:
        with open(filename, 'r') as file:
            reader = csv.reader(file)
            header = next(reader)  # 假设第一行是标题行
            dataset = [row for row in reader]
        return header, dataset
    except FileNotFoundError:
        print(f"Error: File {filename} not found.")
        return None, None
    except Exception as e:
        print(f"An error occurred while loading the dataset: {e}")
        return None, None


def calculate_entropy(labels):
    if not labels:
        return 0
    label_counts = Counter(labels)
    total = sum(label_counts.values())
    entropy = -sum((count / total) * log2(count / total) for count in label_counts.values())
    return entropy


def split_dataset(dataset, feature_index, value):
    return [row for row in dataset if row[feature_index] == value]


def choose_best_feature_to_split(dataset):
    base_entropy = calculate_entropy([row[-1] for row in dataset])
    num_features = len(dataset[0]) - 1
    best_info_gain = 0.0
    best_feature = -1

    for i in range(num_features):
        unique_vals = set(row[i] for row in dataset)
        new_entropy = sum(
            (len(split_dataset(dataset, i, value)) / float(len(dataset))) *
            calculate_entropy([row[-1] for row in split_dataset(dataset, i, value)])
            for value in unique_vals
        )
        info_gain = base_entropy - new_entropy

        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feature = i

    return best_feature


def majority_vote(class_list):
    if not class_list:
        return None
    return Counter(class_list).most_common(1)[0][0]


def create_decision_tree(dataset, labels, max_depth=None, current_depth=0):
    class_list = [row[-1] for row in dataset]

    if not class_list or all(class_ == class_list[0] for class_ in class_list):
        return class_list[0] if class_list else None

    if len(dataset[0]) == 1 or (max_depth is not None and current_depth >= max_depth):
        return majority_vote(class_list)

    best_feature = choose_best_feature_to_split(dataset)
    best_feature_label = labels[best_feature]

    tree = {best_feature_label: {}}
    feature_values = set(row[best_feature] for row in dataset)

    sub_labels = labels[:]  # 创建标签副本
    del sub_labels[best_feature]  # 删除已用过的标签

    for value in feature_values:
        subset = split_dataset(dataset, best_feature, value)
        tree[best_feature_label][value] = create_decision_tree(subset, sub_labels, max_depth, current_depth + 1)

    return tree


def print_decision_tree(tree, indent=''):
    if isinstance(tree, dict):
        for key in tree:
            print(f"{indent}{key}")
            sub_tree = tree[key]
            if isinstance(sub_tree, dict):
                for value in sub_tree:
                    print(f"{indent}  {value}:")
                    print_decision_tree(sub_tree[value], indent + '    ')
            else:
                print(f"{indent}  -> {sub_tree}")
    else:
        print(f"{indent}-> {tree}")


# 示例主程序
if __name__ == '__main__':
    filename = 'wine.data'  # 应该从命令行参数或配置文件中获取
    header, dataset = load_dataset(filename)
    if dataset is None:
        exit(1)

    labels = header[:-1]  # 假设最后一列是类别标签
    decision_tree = create_decision_tree(dataset, labels, max_depth=3)  # 设置最大深度为3以简化输出
    print("Decision Tree:")
    print_decision_tree(decision_tree)