import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from urllib.request import urlretrieve
import random
from collections import Counter

# 设置全局中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 黑体
plt.rcParams['axes.unicode_minus'] = False     # 解决负号显示问题

class ID3DecisionTree:
    def __init__(self, max_depth=None):
        """初始化ID3决策树"""
        self.tree = None
        self.max_depth = max_depth
        
    def calculate_entropy(self, y):
        """计算信息熵"""
        class_counts = Counter(y)
        entropy = 0
        
        for count in class_counts.values():
            probability = count / len(y)
            entropy -= probability * np.log2(probability)
            
        return entropy
    
    def calculate_information_gain(self, X, y, feature_index, threshold):
        """计算信息增益"""
        # 计算父节点的熵
        parent_entropy = self.calculate_entropy(y)
        
        # 根据特征和阈值划分数据
        left_mask = X[:, feature_index] <= threshold
        right_mask = ~left_mask
        
        left_y = y[left_mask]
        right_y = y[right_mask]
        
        # 如果所有样本都被分到一边，信息增益为0
        if len(left_y) == 0 or len(right_y) == 0:
            return 0
        
        # 计算子节点的加权熵
        left_entropy = self.calculate_entropy(left_y)
        right_entropy = self.calculate_entropy(right_y)
        
        child_entropy = (len(left_y) / len(y)) * left_entropy + \
                       (len(right_y) / len(y)) * right_entropy
        
        # 计算信息增益
        information_gain = parent_entropy - child_entropy
        
        return information_gain
    
    def find_best_split(self, X, y):
        """寻找最佳分裂特征和阈值"""
        best_gain = -1
        best_feature = None
        best_threshold = None
        
        # 遍历每个特征
        for feature_idx in range(X.shape[1]):
            # 对连续特征值进行离散化，使用唯一值作为可能的阈值
            thresholds = np.unique(X[:, feature_idx])
            
            # 尝试每个阈值
            for threshold in thresholds:
                gain = self.calculate_information_gain(X, y, feature_idx, threshold)
                
                # 更新最佳分裂
                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature_idx
                    best_threshold = threshold
        
        return best_feature, best_threshold, best_gain
    
    def build_tree(self, X, y, depth=0):
        """递归构建决策树"""
        # 如果所有样本属于同一类别，返回叶节点
        if len(np.unique(y)) == 1:
            return {'class': y[0], 'is_leaf': True}
        
        # 如果达到最大深度或没有特征可分，返回出现次数最多的类别
        if self.max_depth is not None and depth >= self.max_depth or X.shape[1] == 0:
            most_common_class = Counter(y).most_common(1)[0][0]
            return {'class': most_common_class, 'is_leaf': True}
        
        # 寻找最佳分裂
        best_feature, best_threshold, best_gain = self.find_best_split(X, y)
        
        # 如果信息增益为0，无法进一步分裂
        if best_gain <= 0:
            most_common_class = Counter(y).most_common(1)[0][0]
            return {'class': most_common_class, 'is_leaf': True}
        
        # 划分数据
        left_mask = X[:, best_feature] <= best_threshold
        right_mask = ~left_mask
        
        X_left, y_left = X[left_mask], y[left_mask]
        X_right, y_right = X[right_mask], y[right_mask]
        
        # 递归构建左右子树
        left_subtree = self.build_tree(X_left, y_left, depth + 1)
        right_subtree = self.build_tree(X_right, y_right, depth + 1)
        
        # 返回当前节点
        return {
            'feature': best_feature,
            'threshold': best_threshold,
            'left': left_subtree,
            'right': right_subtree,
            'is_leaf': False
        }
    
    def fit(self, X, y):
        """训练模型，构建决策树"""
        self.tree = self.build_tree(X, y)
    
    def predict_sample(self, x, tree=None):
        """预测单个样本"""
        if tree is None:
            tree = self.tree
            
        # 如果是叶节点，返回类别
        if tree['is_leaf']:
            return tree['class']
        
        # 根据特征值决定进入左子树还是右子树
        feature = tree['feature']
        threshold = tree['threshold']
        
        if x[feature] <= threshold:
            return self.predict_sample(x, tree['left'])
        else:
            return self.predict_sample(x, tree['right'])
    
    def predict(self, X):
        """预测多个样本"""
        return np.array([self.predict_sample(x) for x in X])
    
    def accuracy(self, y_true, y_pred):
        """计算准确率"""
        return np.sum(y_true == y_pred) / len(y_true)

# 加载Wine数据集
def load_wine_data():
    """加载Wine数据集并返回特征和标签"""
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
    urlretrieve(url, "wine.data")
    
    # 定义列名
    columns = ['Class', 'Alcohol', 'Malic_acid', 'Ash', 'Alcalinity_of_ash', 'Magnesium', 
               'Total_phenols', 'Flavanoids', 'Nonflavanoid_phenols', 'Proanthocyanins', 
               'Color_intensity', 'Hue', 'OD280/OD315_of_diluted_wines', 'Proline']
    df = pd.read_csv("wine.data", header=None, names=columns)
    
    # 提取特征和标签
    X = df.iloc[:, 1:].values  # 特征
    y = df.iloc[:, 0].values   # 标签
    
    return X, y

# 主函数
def main():
    # 加载数据
    X, y = load_wine_data()
    
    # 数据集基本情况
    print("Wine数据集基本情况：")
    print(f"记录数：{X.shape[0]}")
    print(f"属性数：{X.shape[1]}")
    print("数据内容：13个葡萄酒化学特征和1个类别标签（3种葡萄酒）")
    print(f"类别分布：{np.bincount(y)}")
    
    # 随机划分训练集和测试集（70%训练，30%测试）
    indices = list(range(len(X)))
    random.shuffle(indices)
    split_idx = int(0.7 * len(X))
    train_indices, test_indices = indices[:split_idx], indices[split_idx:]
    
    X_train, X_test = X[train_indices], X[test_indices]
    y_train, y_test = y[train_indices], y[test_indices]
    
    # 测试不同最大深度的效果
    max_depths = range(1, 11)
    accuracies = []
    
    for depth in max_depths:
        tree = ID3DecisionTree(max_depth=depth)
        tree.fit(X_train, y_train)
        y_pred = tree.predict(X_test)
        acc = tree.accuracy(y_test, y_pred)
        accuracies.append(acc)
        print(f"最大深度={depth}时，准确率：{acc:.4f}")
    
    # 绘制不同最大深度的准确率曲线
    plt.figure(figsize=(10, 6))
    plt.plot(max_depths, accuracies, marker='o')
    plt.title('树深度与准确率关系')
    plt.xlabel('最大深度')
    plt.ylabel('准确率')
    plt.xticks(max_depths)
    plt.grid(True)
    plt.show()
    
    # 使用最佳深度进行最终预测
    best_depth = max_depths[np.argmax(accuracies)]
    print(f"\n最佳树深度为：{best_depth}")
    
    tree = ID3DecisionTree(max_depth=best_depth)
    tree.fit(X_train, y_train)
    y_pred = tree.predict(X_test)
    
    # 打印部分预测结果
    print("\n部分预测结果对比：")
    print("真实标签 | 预测标签")
    print("-" * 20)
    for true, pred in zip(y_test[:10], y_pred[:10]):
        print(f"    {true}    |    {pred}")

if __name__ == "__main__":
    main()
    
