
import matplotlib
matplotlib.use('Agg')

import pandas as pd
import numpy as np
from collections import Counter
import random
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
import os

print("--- 开始执行任务3：从零实现 ID3 决策树  ---")
print("信息：使用 'Agg' 后端，将不会在屏幕上弹出图片窗口。")

def calculate_entropy(y):
    counts = np.bincount(y)
    probabilities = counts[counts > 0] / len(y)
    return -np.sum(probabilities * np.log2(probabilities))
class Node:
    def __init__(self, feature_index=None, threshold=None, left=None, right=None, *, value=None):
        self.feature_index, self.threshold, self.left, self.right, self.value = feature_index, threshold, left, right, value
    def is_leaf_node(self):
        return self.value is not None
class DecisionTreeID3:
    def __init__(self, min_samples_split=2, max_depth=100):
        self.min_samples_split, self.max_depth, self.root = min_samples_split, max_depth, None
    def fit(self, X, y):
        self.label_map, y_int = np.unique(y, return_inverse=True)
        self.root = self._grow_tree(X, y_int)
    def _grow_tree(self, X, y, depth=0):
        n_samples = len(y)
        if n_samples == 0: return Node(value=None)
        if (depth >= self.max_depth or len(np.unique(y)) == 1 or n_samples < self.min_samples_split):
            return Node(value=np.bincount(y).argmax() if len(y) > 0 else None)
        best_feature, best_threshold = self._best_split(X, y)
        if best_feature is None: return Node(value=np.bincount(y).argmax())
        left_indices = np.where(X[:, best_feature] <= best_threshold)[0]
        right_indices = np.where(X[:, best_feature] > best_threshold)[0]
        left = self._grow_tree(X[left_indices, :], y[left_indices], depth + 1)
        right = self._grow_tree(X[right_indices, :], y[right_indices], depth + 1)
        return Node(best_feature, best_threshold, left, right)
    def _best_split(self, X, y):
        best_gain, split_index, split_threshold = -1, None, None
        parent_entropy = calculate_entropy(y)
        for feature_index in range(X.shape[1]):
            thresholds = np.unique(X[:, feature_index])
            for threshold in thresholds:
                left_indices = np.where(X[:, feature_index] <= threshold)[0]
                right_indices = np.where(X[:, feature_index] > threshold)[0]
                if len(left_indices) == 0 or len(right_indices) == 0: continue
                n, n_l, n_r = len(y), len(left_indices), len(right_indices)
                e_l, e_r = calculate_entropy(y[left_indices]), calculate_entropy(y[right_indices])
                gain = parent_entropy - ((n_l / n) * e_l + (n_r / n) * e_r)
                if gain > best_gain:
                    best_gain, split_index, split_threshold = gain, feature_index, threshold
        return split_index, split_threshold
    def predict(self, X):
        predictions_int = np.array([self._traverse_tree(x, self.root) for x in X])
        valid_indices = [i for i, v in enumerate(predictions_int) if v is not None]
        predictions_int = predictions_int[valid_indices].astype(int)
        return self.label_map[predictions_int]
    def _traverse_tree(self, x, node):
        if node.is_leaf_node(): return node.value
        if x[node.feature_index] <= node.threshold: return self._traverse_tree(x, node.left)
        return self._traverse_tree(x, node.right)

print("\n正在加载完整的 Wine 数据集...")
col_names = ['class_label', 'alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280/od315_of_diluted_wines', 'proline']
wine_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
wine_df_full = pd.read_csv(wine_url, header=None, names=col_names)
print("成功从UCI在线地址加载Wine数据集。")
X_wine = wine_df_full.drop('class_label', axis=1).values
y_wine = wine_df_full['class_label'].values
random.seed(42)
combined_wine = list(zip(X_wine, y_wine))
random.shuffle(combined_wine)
X_wine_shuffled, y_wine_shuffled = zip(*combined_wine)
split_index_wine = int(0.8 * len(X_wine_shuffled))
X_train_w = np.array(X_wine_shuffled[:split_index_wine])
y_train_w = np.array(y_wine_shuffled[:split_index_wine])
X_test_w = np.array(X_wine_shuffled[split_index_wine:])
y_test_w = np.array(y_wine_shuffled[split_index_wine:])
print(f"Wine数据集划分完毕: {len(X_train_w)} 条训练数据, {len(X_test_w)} 条测试数据。")
tree = DecisionTreeID3(max_depth=10)
print("开始构建决策树...")
tree.fit(X_train_w, y_train_w)
print("决策树构建完成。")
print("开始使用决策树进行预测...")
predictions_w = tree.predict(X_test_w)
print("预测完成。")
accuracy_w = np.sum(predictions_w == y_test_w) / len(y_test_w)
print(f"\n--- ID3决策树在 Wine 测试集上的分类结果 ---")
print(f"预测准确率: {accuracy_w:.4f}")

print("\n正在绘制分类结果的混淆矩阵...")
labels = sorted(wine_df_full['class_label'].unique())
cm = confusion_matrix(y_test_w, predictions_w, labels=labels)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)
plt.title('ID3 Decision Tree Confusion Matrix', fontsize=16)
plt.xlabel('Predicted Label', fontsize=12)
plt.ylabel('True Label', fontsize=12)

image_filename = 'task3_id3_matrix_GUARANTEED.png' # 使用一个新的文件名
save_path = os.path.join(os.getcwd(), image_filename)
try:
    plt.savefig(save_path, dpi=300)
    print("\n" + "="*50)
    print(f"!!! 任务三图片保存成功 !!!")
    print(f"完整路径: {save_path}")
    print("请检查您的文件夹。")
    print("="*50 + "\n")
except Exception as e:
    print(f"\n错误：保存图片失败，原因为: {e}\n")


print("--- 任务3 执行完毕 ---")
