"""
Week 2: PAC学习框架实现
PAC (Probably Approximately Correct) Learning Framework
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class PACLearning:
    """PAC学习框架类"""
    
    def __init__(self):
        self.name = "PAC Learning Framework"
    
    def generate_concept_data(self, n_samples: int, concept_func: Callable, noise_rate: float = 0.0) -> tuple[np.ndarray, np.ndarray]:
        """
        生成概念学习数据
        Args:
            n_samples: 样本数量
            concept_func: 目标概念函数
            noise_rate: 噪声率
        Returns:
            X: 特征矩阵
            y: 标签向量
        """
        np.random.seed(42)
        X = np.random.uniform(-1, 1, (n_samples, 2))
        y = np.array([concept_func(x) for x in X])
        
        # 添加噪声
        if noise_rate > 0:
            n_noise = int(n_samples * noise_rate)
            noise_indices = np.random.choice(n_samples, n_noise, replace=False)
            y[noise_indices] = 1 - y[noise_indices]
        
        return X, y
    
    def rectangle_concept(self, x: np.ndarray, bounds: tuple[float, float, float, float] = (-0.5, 0.5, -0.5, 0.5)) -> int:
        """
        矩形概念: 点在矩形内为正类，否则为负类
        Args:
            x: 输入点 [x1, x2]
            bounds: 矩形边界 (x1_min, x1_max, x2_min, x2_max)
        Returns:
            标签 (0 或 1)
        """
        x1_min, x1_max, x2_min, x2_max = bounds
        return 1 if (x1_min <= x[0] <= x1_max and x2_min <= x[1] <= x2_max) else 0
    
    def circle_concept(self, x: np.ndarray, center: tuple[float, float] = (0, 0), radius: float = 0.5) -> int:
        """
        圆形概念: 点在圆内为正类，否则为负类
        """
        distance = np.sqrt((x[0] - center[0])**2 + (x[1] - center[1])**2)
        return 1 if distance <= radius else 0
    
    def empirical_risk(self, X: np.ndarray, y: np.ndarray, hypothesis: Callable) -> float:
        """
        计算经验风险 (训练误差)
        """
        predictions = np.array([hypothesis(x) for x in X])
        return np.mean(predictions != y)
    
    def true_risk(self, hypothesis: Callable, true_concept: Callable, n_test: int = 10000) -> float:
        """
        估计真实风险 (泛化误差)
        """
        X_test = np.random.uniform(-1, 1, (n_test, 2))
        y_true = np.array([true_concept(x) for x in X_test])
        y_pred = np.array([hypothesis(x) for x in X_test])
        return np.mean(y_pred != y_true)
    
    def hoeffding_bound(self, n: int, delta: float) -> float:
        """
        Hoeffding界: P(|R(h) - R_emp(h)| > ε) ≤ 2exp(-2nε²)
        给定置信度δ，返回误差界ε
        """
        return np.sqrt(np.log(2/delta) / (2*n))
    
    def sample_complexity(self, epsilon: float, delta: float) -> int:
        """
        样本复杂度: 达到(ε,δ)-PAC学习所需的最少样本数
        """
        return int(np.ceil(np.log(2/delta) / (2*epsilon**2)))
    
    def demonstrate_pac_learning(self):
        """演示PAC学习过程"""
        print("=== PAC学习框架演示 ===\n")
        
        # 1. 定义目标概念
        true_concept = lambda x: self.rectangle_concept(x, (-0.3, 0.3, -0.3, 0.3))
        
        # 2. 不同样本大小的学习效果
        sample_sizes = [10, 50, 100, 500, 1000]
        empirical_risks = []
        true_risks = []
        hoeffding_bounds = []
        
        delta = 0.1  # 置信度参数
        
        print("样本大小\t经验风险\t真实风险\tHoeffding界")
        print("-" * 50)
        
        for n in sample_sizes:
            # 生成训练数据
            X_train, y_train = self.generate_concept_data(n, true_concept)
            
            # 学习假设 (简单的矩形拟合)
            positive_points = X_train[y_train == 1]
            if len(positive_points) > 0:
                x1_min, x1_max = positive_points[:, 0].min(), positive_points[:, 0].max()
                x2_min, x2_max = positive_points[:, 1].min(), positive_points[:, 1].max()
                learned_hypothesis = lambda x: self.rectangle_concept(x, (x1_min, x1_max, x2_min, x2_max))
            else:
                learned_hypothesis = lambda x: 0
            
            # 计算风险
            emp_risk = self.empirical_risk(X_train, y_train, learned_hypothesis)
            true_risk = self.true_risk(learned_hypothesis, true_concept)
            hoeff_bound = self.hoeffding_bound(n, delta)
            
            empirical_risks.append(emp_risk)
            true_risks.append(true_risk)
            hoeffding_bounds.append(hoeff_bound)
            
            print(f"{n}\t\t{emp_risk:.4f}\t\t{true_risk:.4f}\t\t{hoeff_bound:.4f}")
        
        # 3. 可视化结果
        self.visualize_pac_results(sample_sizes, empirical_risks, true_risks, hoeffding_bounds)
        
        # 4. 样本复杂度分析
        print(f"\n=== 样本复杂度分析 ===")
        epsilons = [0.1, 0.05, 0.01]
        deltas = [0.1, 0.05, 0.01]
        
        print("ε\tδ\t所需样本数")
        print("-" * 30)
        for eps in epsilons:
            for delta in deltas:
                n_required = self.sample_complexity(eps, delta)
                print(f"{eps}\t{delta}\t{n_required}")
    
    def visualize_pac_results(self, sample_sizes: list[int], empirical_risks: list[float], 
                             true_risks: list[float], hoeffding_bounds: list[float]):
        """可视化PAC学习结果"""
        plt.figure(figsize=(15, 5))
        
        # 风险对比
        plt.subplot(1, 3, 1)
        plt.plot(sample_sizes, empirical_risks, 'bo-', label='经验风险', markersize=6)
        plt.plot(sample_sizes, true_risks, 'ro-', label='真实风险', markersize=6)
        plt.xlabel('样本大小')
        plt.ylabel('风险')
        plt.title('经验风险 vs 真实风险')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.xscale('log')
        
        # Hoeffding界
        plt.subplot(1, 3, 2)
        plt.plot(sample_sizes, hoeffding_bounds, 'go-', label='Hoeffding界', markersize=6)
        risk_diff = [abs(t - e) for t, e in zip(true_risks, empirical_risks)]
        plt.plot(sample_sizes, risk_diff, 'mo-', label='实际风险差', markersize=6)
        plt.xlabel('样本大小')
        plt.ylabel('误差界')
        plt.title('Hoeffding界 vs 实际误差')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.xscale('log')
        plt.yscale('log')
        
        # 样本复杂度
        plt.subplot(1, 3, 3)
        epsilons = np.logspace(-2, -0.5, 20)
        delta = 0.1
        complexities = [self.sample_complexity(eps, delta) for eps in epsilons]
        plt.plot(epsilons, complexities, 'co-', markersize=4)
        plt.xlabel('精度 ε')
        plt.ylabel('所需样本数')
        plt.title(f'样本复杂度 (δ={delta})')
        plt.grid(True, alpha=0.3)
        plt.xscale('log')
        plt.yscale('log')
        
        plt.tight_layout()
        plt.show()
    
    def demonstrate_no_free_lunch(self):
        """演示No Free Lunch定理"""
        print("\n=== No Free Lunch定理演示 ===\n")
        
        # 创建两个不同的概念
        concept1 = lambda x: self.rectangle_concept(x, (-0.3, 0.3, -0.3, 0.3))
        concept2 = lambda x: self.circle_concept(x, (0, 0), 0.3)
        
        # 两种不同的学习算法
        def rectangle_learner(X, y):
            """矩形学习器"""
            positive_points = X[y == 1]
            if len(positive_points) > 0:
                x1_min, x1_max = positive_points[:, 0].min(), positive_points[:, 0].max()
                x2_min, x2_max = positive_points[:, 1].min(), positive_points[:, 1].max()
                return lambda x: self.rectangle_concept(x, (x1_min, x1_max, x2_min, x2_max))
            else:
                return lambda x: 0
        
        def circle_learner(X, y):
            """圆形学习器 (简化版)"""
            positive_points = X[y == 1]
            if len(positive_points) > 0:
                center = positive_points.mean(axis=0)
                distances = [np.linalg.norm(p - center) for p in positive_points]
                radius = np.mean(distances)
                return lambda x: self.circle_concept(x, center, radius)
            else:
                return lambda x: 0
        
        n_samples = 100
        
        # 在概念1上测试
        print("在矩形概念上的表现:")
        X1, y1 = self.generate_concept_data(n_samples, concept1)
        
        h1_rect = rectangle_learner(X1, y1)
        h1_circle = circle_learner(X1, y1)
        
        risk1_rect = self.true_risk(h1_rect, concept1)
        risk1_circle = self.true_risk(h1_circle, concept1)
        
        print(f"  矩形学习器风险: {risk1_rect:.4f}")
        print(f"  圆形学习器风险: {risk1_circle:.4f}")
        
        # 在概念2上测试
        print("\n在圆形概念上的表现:")
        X2, y2 = self.generate_concept_data(n_samples, concept2)
        
        h2_rect = rectangle_learner(X2, y2)
        h2_circle = circle_learner(X2, y2)
        
        risk2_rect = self.true_risk(h2_rect, concept2)
        risk2_circle = self.true_risk(h2_circle, concept2)
        
        print(f"  矩形学习器风险: {risk2_rect:.4f}")
        print(f"  圆形学习器风险: {risk2_circle:.4f}")
        
        print(f"\nNo Free Lunch定理说明:")
        print(f"  没有一个学习算法在所有问题上都是最优的")
        print(f"  算法的性能取决于问题的性质和算法的归纳偏置")

def demonstrate_feasibility():
    """演示学习可行性"""
    pac = PACLearning()
    
    print("=== 学习可行性演示 ===\n")
    
    # PAC学习演示
    pac.demonstrate_pac_learning()
    
    # No Free Lunch演示
    pac.demonstrate_no_free_lunch()
    
    print(f"\n=== 学习可行性总结 ===")
    print(f"1. PAC学习框架提供了学习可行性的理论基础")
    print(f"2. Hoeffding界给出了泛化误差的概率界")
    print(f"3. 样本复杂度告诉我们需要多少数据才能学好")
    print(f"4. No Free Lunch定理提醒我们没有万能的算法")
    print(f"5. 归纳偏置是学习成功的关键")

if __name__ == "__main__":
    demonstrate_feasibility()