import math
import csv
# 计算数据集的信息熵
def calculate_entropy(data):
    class_counts = {}
    total_samples = len(data)
    for row in data:
        label = row[-1]
        if label in class_counts:
            class_counts[label] += 1
        else:
            class_counts[label] = 1
    entropy = 0
    for count in class_counts.values():
        p = count / total_samples
        entropy -= p * math.log2(p)
    return entropy

# 按照给定属性划分数据集
def split_data(data, attribute_index):
    split_data_dict = {}
    for row in data:
        attribute_value = row[attribute_index]
        if attribute_value in split_data_dict:
            split_data_dict[attribute_value].append(row)
        else:
            split_data_dict[attribute_value] = [row]
    return split_data_dict

# 计算属性的信息增益
def calculate_information_gain(data, attribute_index):
    entropy = calculate_entropy(data)
    split_data_dict = split_data(data, attribute_index)
    weighted_entropy = 0
    total_samples = len(data)
    for subset in split_data_dict.values():
        subset_size = len(subset)
        subset_entropy = calculate_entropy(subset)
        weighted_entropy += (subset_size / total_samples) * subset_entropy
    return entropy - weighted_entropy

# 选择信息增益最大的属性
def choose_best_attribute(data):
    num_attributes = len(data[0]) - 1
    best_attribute_index = -1
    max_gain = -1
    for i in range(num_attributes):
        gain = calculate_information_gain(data, i)
        if gain > max_gain:
            max_gain = gain
            best_attribute_index = i
    return best_attribute_index

# 创建决策树（递归构建）
def create_decision_tree(data, attributes):
    class_list = [row[-1] for row in data]
    if len(set(class_list)) == 1:
        return class_list[0]
    if len(attributes) == 0:
        return max(set(class_list), key=class_list.count)
    best_attribute_index = choose_best_attribute(data)
    best_attribute = attributes[best_attribute_index]
    tree = {best_attribute: {}}
    del attributes[best_attribute_index]
    split_data_dict = split_data(data, best_attribute_index)
    for value, subset in split_data_dict.items():
        sub_attributes = attributes.copy()
        tree[best_attribute][value] = create_decision_tree(subset, sub_attributes)
    return tree

# 加载数据集
def load_data(file_path):
    data = []
    with open(file_path, 'r') as file:
        reader = csv.reader(file)
        for row in reader:
            row_data = [val for val in row]
            data.append(row_data)
    return data

if __name__ == "__main__":
    file_path = 'wine/wine.data'  # 同样Wine数据集
    data = load_data(file_path)
    attributes = data[0][:-1]
    del data[0]
    decision_tree = create_decision_tree(data, attributes)
    print(decision_tree)