import numpy as np
import pandas as pd
from math import log

# 加载数据集
def load_dataset(filename):
    data = pd.read_csv(filename, header=None)
    return data

# 计算信息熵
def calc_entropy(y):
    class_labels = np.unique(y)
    entropy = 0.0
    for cls in class_labels:
        p_cls = len(y[y == cls]) / len(y)
        entropy -= p_cls * log(p_cls, 2)
    return entropy

# 计算信息增益
def calc_information_gain(X, y, feature_index):
    total_entropy = calc_entropy(y)
    values = X[:, feature_index]
    unique_vals = np.unique(values)
    weighted_entropy = 0.0
    for value in unique_vals:
        sub_y = y[values == value]
        weighted_entropy += (len(sub_y) / len(y)) * calc_entropy(sub_y)
    return total_entropy - weighted_entropy

# 选择最佳测试属性
def choose_best_feature_to_split(X, y):
    num_features = X.shape[1]
    best_feature = -1
    max_info_gain = -1
    for i in range(num_features):
        info_gain = calc_information_gain(X, y, i)
        if info_gain > max_info_gain:
            max_info_gain = info_gain
            best_feature = i
    return best_feature

# 创建树
def create_tree(X, y, features, depth=0, max_depth=None):
    if len(np.unique(y)) == 1:
        return np.unique(y)[0]
    if len(X.shape) == 1 or depth == max_depth:
        return np.bincount(y).argmax()
    
    best_feat = choose_best_feature_to_split(X, y)
    tree = {f'feature_{best_feat}': {}}
    features = [i for i in features if i != best_feat]
    values = X[:, best_feat]
    unique_vals = np.unique(values)
    
    for value in unique_vals:
        sub_X = X[values == value, :]
        sub_y = y[values == value]
        tree[f'feature_{best_feat}'][value] = create_tree(sub_X, sub_y, features, depth+1, max_depth)
    return tree

# 训练模型
def train(X, y, max_depth=None):
    features = list(range(X.shape[1]))
    return create_tree(X, y, features, max_depth=max_depth)

# 预测
def predict(tree, sample):
    feature_index = 0
    while isinstance(tree, dict):
        key = list(tree.keys())[0]
        feature_index = int(key.split('_')[1])
        feature_value = sample[feature_index]
        tree = tree[key][feature_value]
    return tree

# 主函数
def main():
    filename = 'wine.data'
    data = load_dataset(filename)
    X = data.iloc[:, 1:].values
    y = data.iloc[:, 0].values

    max_depth = 3
    my_tree = train(X, y, max_depth)

    print("Decision Tree:")
    print(my_tree)

    # 测试模型
    test_sample = X[0]  # 使用第一个样本作为测试样本
    predicted_class = predict(my_tree, test_sample)
    print(f"The predicted class for the first sample is: {predicted_class}")

if __name__ == "__main__":
    main()