import pandas as pd
import numpy as np
from math import log2

# 读取数据
data = pd.read_csv('wine.data', header=None)

# 将数据分为训练集和测试集
train_data = pd.concat([data.iloc[0:40], data.iloc[59:99], data.iloc[130:160]])
test_data = pd.concat([data.iloc[40:59], data.iloc[99:130], data.iloc[160:]])

# 计算信息熵
def entropy(data):
    label_counts = data.iloc[:, 0].value_counts()
    probs = label_counts / len(data)
    return -np.sum(probs * np.log2(probs))

# 计算信息增益
def information_gain(data, feature):
    unique_vals = data[feature].unique()
    entropy_before = entropy(data)
    entropy_after = 0
    for val in unique_vals:
        sub_data = data[data[feature] == val]
        entropy_after += len(sub_data) / len(data) * entropy(sub_data)
    return entropy_before - entropy_after

# 选择最佳测试属性
def choose_best_feature(data):
    best_feature = None
    max_info_gain = -1
    for feature in data.columns[1:]:
        info_gain = information_gain(data, feature)
        if info_gain > max_info_gain:
            max_info_gain = info_gain
            best_feature = feature
    return best_feature

# 定义节点类
class Node:
    def __init__(self, feature=None, threshold=None, label=None):
        self.feature = feature
        self.threshold = threshold
        self.label = label
        self.children = {}

# 创建决策树
def create_decision_tree(data):
    label_counts = data.iloc[:, 0].value_counts()
    # 如果只有一种标签，返回该标签
    if len(label_counts) == 1:
        return Node(label=label_counts.index[0])
    # 如果没有特征可用，返回出现次数最多的标签
    if len(data.columns) == 1:
        return Node(label=label_counts.idxmax())
    # 选择最佳测试属性
    best_feature = choose_best_feature(data)
    node = Node(feature=best_feature)
    unique_vals = data[best_feature].unique()
    # 递归创建子节点
    for val in unique_vals:
        sub_data = data[data[best_feature] == val]
        if len(sub_data) == 0:
            node.children[val] = Node(label=label_counts.idxmax())
        else:
            node.children[val] = create_decision_tree(sub_data.drop(columns=[best_feature]))
    return node

# 预测函数
def predict(node, sample):
    if node.label is not None:
        return node.label
    feature_val = sample[node.feature]
    for threshold, child in node.children.items():
        if feature_val <= threshold:
            return predict(child, sample)
    return predict(node.children[threshold], sample)

# 测试函数
def test(decision_tree, test_data):
    correct_count = 0
    for _, sample in test_data.iterrows():
        prediction = predict(decision_tree, sample)
        if prediction == sample[0]:
            correct_count += 1
    accuracy = correct_count / len(test_data)
    return accuracy

# 创建决策树并测试分类精度
decision_tree = create_decision_tree(train_data)
accuracy = 0
for i in range(len(test_data)):
    if predict(test_data[i], tree) == test_label[i]:
        accuracy += 1
accuracy /= len(test_data)
print("分类精度为: ", accuracy)
