import numpy as np
from collections import Counter
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle, Arrow

class DecisionTree:
    def __init__(self, max_depth=None, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.tree = None
        self.feature_names = None
        self.class_names = None
        self.feature_importances_ = None
    
    def fit(self, X, y, feature_names=None, class_names=None):
        self.feature_names = feature_names
        self.class_names = class_names
        self.tree = self._build_tree(X, y, depth=0)
        self._calculate_feature_importance(X.shape[1])
    
    def _build_tree(self, X, y, depth):
        if len(set(y)) == 1:
            return {'class': y[0], 'samples': len(y)}
        if len(X) < self.min_samples_split:
            return {'class': Counter(y).most_common(1)[0][0], 'samples': len(y)}
        if self.max_depth is not None and depth >= self.max_depth:
            return {'class': Counter(y).most_common(1)[0][0], 'samples': len(y)}
        
        best_feature, best_threshold = self._find_best_split(X, y)
        if best_feature is None:
            return {'class': Counter(y).most_common(1)[0][0], 'samples': len(y)}
        
        left_indices = X[:, best_feature] == best_threshold
        right_indices = ~left_indices
        
        left_tree = self._build_tree(X[left_indices], y[left_indices], depth+1)
        right_tree = self._build_tree(X[right_indices], y[right_indices], depth+1)
        
        return {
            'feature': best_feature,
            'threshold': best_threshold,
            'left': left_tree,
            'right': right_tree,
            'samples': len(y)
        }
    
    def _find_best_split(self, X, y):
        best_gini = float('inf')
        best_feature = None
        best_threshold = None
        
        for feature in range(X.shape[1]):
            thresholds = set(X[:, feature])
            for threshold in thresholds:
                left_indices = X[:, feature] == threshold
                right_indices = ~left_indices
                
                if len(y[left_indices]) == 0 or len(y[right_indices]) == 0:
                    continue
                
                gini = self._gini_impurity(y[left_indices], y[right_indices])
                if gini < best_gini:
                    best_gini = gini
                    best_feature = feature
                    best_threshold = threshold
        
        return best_feature, best_threshold
    
    def _gini_impurity(self, left_y, right_y):
        n_left = len(left_y)
        n_right = len(right_y)
        n_total = n_left + n_right
        
        gini_left = 1.0 - sum((np.sum(left_y == c) / n_left) ** 2 for c in set(left_y))
        gini_right = 1.0 - sum((np.sum(right_y == c) / n_right) ** 2 for c in set(right_y))
        
        return (n_left / n_total) * gini_left + (n_right / n_total) * gini_right
    
    def predict(self, X):
        return np.array([self._predict_one(x, self.tree) for x in X])
    
    def _predict_one(self, x, tree):
        if 'class' in tree:
            return tree['class']
        if x[tree['feature']] == tree['threshold']:
            return self._predict_one(x, tree['left'])
        else:
            return self._predict_one(x, tree['right'])
    
    def _calculate_feature_importance(self, n_features):
        self.feature_importances_ = np.zeros(n_features)
        self._traverse_tree(self.tree)
        self.feature_importances_ /= np.sum(self.feature_importances_)
    
    def _traverse_tree(self, node):
        if 'feature' in node:
            self.feature_importances_[node['feature']] += node['samples']
            self._traverse_tree(node['left'])
            self._traverse_tree(node['right'])
    
    def plot_tree(self, figsize=(12, 8)):
        """使用matplotlib绘制决策树"""
        fig, ax = plt.subplots(figsize=figsize)
        ax.axis('off')
        self._plot_node(ax, self.tree, (0.5, 0.9), 0.4, 0.1)
        plt.title("决策树可视化", pad=20, fontsize=16)
        plt.tight_layout()
        plt.show()
    
    def _plot_node(self, ax, node, xy, width, height):
        if 'class' in node:
            class_name = str(node['class']) if self.class_names is None else self.class_names[node['class']]
            ax.add_patch(Circle(xy, 0.03, color='lightgreen' if node['class'] == 1 else 'lightcoral'))
            ax.text(xy[0], xy[1], 
                    f"类别: {class_name}\n样本数: {node['samples']}", 
                    ha='center', va='center', bbox=dict(facecolor='white', alpha=0.8))
        else:
            feature_name = f"特征{node['feature']}" if self.feature_names is None else self.feature_names[node['feature']]
            threshold_value = node['threshold']
            ax.add_patch(Rectangle((xy[0]-0.05, xy[1]-0.02), 0.1, 0.04, color='skyblue'))
            ax.text(xy[0], xy[1], 
                    f"{feature_name} = {threshold_value}?\n样本数: {node['samples']}", 
                    ha='center', va='center', bbox=dict(facecolor='white', alpha=0.8))
            
            # 绘制左右子树
            left_xy = (xy[0] - width/2, xy[1] - height)
            right_xy = (xy[0] + width/2, xy[1] - height)
            
            ax.annotate("", xy=left_xy, xytext=xy, 
                        arrowprops=dict(arrowstyle="->", lw=1.5, color='gray'))
            ax.annotate("", xy=right_xy, xytext=xy, 
                        arrowprops=dict(arrowstyle="->", lw=1.5, color='gray'))
            
            ax.text((xy[0] + left_xy[0])/2, (xy[1] + left_xy[1])/2, "是", ha='center', va='center')
            ax.text((xy[0] + right_xy[0])/2, (xy[1] + right_xy[1])/2, "否", ha='center', va='center')
            
            self._plot_node(ax, node['left'], left_xy, width/2, height)
            self._plot_node(ax, node['right'], right_xy, width/2, height)
    
    def plot_feature_importance(self):
        """绘制特征重要性"""
        if self.feature_importances_ is None:
            print("请先训练模型")
            return
        
        plt.figure(figsize=(10, 5))
        features = self.feature_names if self.feature_names else [f"特征{i}" for i in range(len(self.feature_importances_))]
        bars = plt.bar(range(len(features)), self.feature_importances_, color='dodgerblue')
        
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height,
                    f'{height:.2f}', ha='center', va='bottom')
        
        plt.title("特征重要性分析", pad=20, fontsize=14)
        plt.xlabel("特征")
        plt.ylabel("重要性得分")
        plt.xticks(range(len(features)), features, rotation=45)
        plt.grid(axis='y', linestyle='--', alpha=0.7)
        plt.tight_layout()
        plt.show()

# 西瓜数据集
watermelon_data = [
    ['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
    ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
    ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '好瓜'],
    ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '好瓜'],
    ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '好瓜'],
    ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'],
    ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '坏瓜'],
    ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '坏瓜'],
    ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '坏瓜'],
    ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '坏瓜'],
    ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '坏瓜'],
    ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '坏瓜'],
    ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '坏瓜'],
    ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜']
]

# 数据预处理
X = np.array([row[:-1] for row in watermelon_data])
y = np.array([row[-1] for row in watermelon_data])

# 特征编码
encoders = [LabelEncoder() for _ in range(X.shape[1])]
for i in range(X.shape[1]):
    X[:, i] = encoders[i].fit_transform(X[:, i])

# 标签编码
y_encoder = LabelEncoder()
y = y_encoder.fit_transform(y)

# 训练决策树
dt = DecisionTree(max_depth=3)
feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']
class_names = y_encoder.inverse_transform([0, 1])
dt.fit(X, y, feature_names=feature_names, class_names=class_names)

# 可视化
print("="*50)
print("决策树可视化与分析")
print("="*50)

# 绘制决策树
dt.plot_tree(figsize=(14, 8))

# 绘制特征重要性
dt.plot_feature_importance()

# 模型评估（留一法）
accuracies = []
for i in range(len(X)):
    X_train = np.delete(X, i, axis=0)
    y_train = np.delete(y, i)
    X_test = X[i:i+1]
    y_test = y[i:i+1]
    
    dt_eval = DecisionTree(max_depth=3)
    dt_eval.fit(X_train, y_train, feature_names=feature_names, class_names=class_names)
    pred = dt_eval.predict(X_test)
    accuracies.append(accuracy_score(y_test, pred))

print(f"\n模型评估结果（留一法交叉验证）:")
print(f"平均准确率: {np.mean(accuracies):.2%}")
print(f"最低准确率: {np.min(accuracies):.2%}")
print(f"最高准确率: {np.max(accuracies):.2%}")