import numpy as np
import pandas as pd
from collections import Counter

# 加载数据集
def load_data():
    data = pd.read_csv('./wine/wine.data', header=None)
    return data

# 计算熵
def calc_entropy(data):
    labels = data[:, -1]  # 假设最后一列是标签
    unique_labels, counts = np.unique(labels, return_counts=True)
    probabilities = counts / counts.sum()
    entropy = -np.sum(probabilities * np.log2(probabilities))
    return entropy

# 计算信息增益
def calc_info_gain(data, feature):
    total_entropy = calc_entropy(data)
    parent_set = data
    values = np.unique(data[:, feature])
    info_gain = total_entropy
    for value in values:
        sub_data = parent_set[data[:, feature] == value]
        info_gain -= len(sub_data) / len(parent_set) * calc_entropy(sub_data)
    return info_gain

# 选择最佳测试属性
def choose_best_feature(data):
    best_feature = None
    max_info_gain = -1
    for feature in range(data.shape[1] - 1):
        info_gain = calc_info_gain(data, feature)
        if info_gain > max_info_gain:
            max_info_gain = info_gain
            best_feature = feature
    return best_feature

# 创建决策树
def create_tree(data, features):
    if len(np.unique(data[:, -1])) == 1:
        return np.unique(data[:, -1])[0]
    if len(features) == 0:
        return Counter(data[:, -1]).most_common(1)[0][0]
    
    best_feature = choose_best_feature(data)
    tree = {str(best_feature): {}}
    features = [i for i in features if i != best_feature]
    values = np.unique(data[:, best_feature])
    for value in values:
        sub_data = data[data[:, best_feature] == value]
        subtree = create_tree(sub_data, features)
        tree[str(best_feature)][str(value)] = subtree
    return tree

# 预测
def predict(tree, sample):
    while isinstance(tree, dict):
        feature = list(tree.keys())[0]
        feature_index = int(feature)
        feature_value = str(sample[feature_index])
        if feature_value not in tree[feature]:
            return Counter(sample[-1]).most_common(1)[0][0]  # 如果值不在树中，返回最常见的标签
        tree = tree[feature][feature_value]
    return tree

# 测试决策树
def test_tree(tree, data):
    predictions = []
    for sample in data:
        prediction = predict(tree, sample)
        predictions.append(prediction)
    accuracy = np.mean(predictions == data[:, -1])
    return accuracy

# 打印决策树
def print_tree(tree, indent=""):
    if isinstance(tree, dict):
        for key, value in tree.items():
            print(indent + str(key) + ":")
            print_tree(value, indent + "  ")
    else:
        print(indent + "-> " + str(tree))

# 主函数
def main():
    data = load_data().values
    features = list(range(data.shape[1] - 1))
    tree = create_tree(data, features)
    accuracy = test_tree(tree, data)
    print("Decision Tree:")
    print_tree(tree)
    print("\nAccuracy:", accuracy)

if __name__ == "__main__":
    main()