"""
Week 4: 梯度下降算法实现
Gradient Descent Algorithm Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Optional
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class GradientDescentRegression:
    """使用梯度下降的线性回归"""
    
    def __init__(self, learning_rate: float = 0.01, max_iterations: int = 1000, 
                 tolerance: float = 1e-6):
        self.learning_rate = learning_rate
        self.max_iterations = max_iterations
        self.tolerance = tolerance
        self.theta = None
        self.cost_history = []
        self.theta_history = []
        
    def add_intercept(self, X: np.ndarray) -> np.ndarray:
        """添加截距项"""
        return np.column_stack([np.ones(X.shape[0]), X])
    
    def hypothesis(self, X: np.ndarray) -> np.ndarray:
        """假设函数"""
        return X @ self.theta
    
    def cost_function(self, X: np.ndarray, y: np.ndarray) -> float:
        """代价函数"""
        m = X.shape[0]
        predictions = self.hypothesis(X)
        cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2)
        return cost
    
    def compute_gradient(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
        """
        计算梯度: ∇J(θ) = (1/m) * Xᵀ(Xθ - y)
        """
        m = X.shape[0]
        predictions = self.hypothesis(X)
        gradient = (1 / m) * X.T @ (predictions - y)
        return gradient
    
    def fit(self, X: np.ndarray, y: np.ndarray, verbose: bool = False) -> 'GradientDescentRegression':
        """使用梯度下降训练模型"""
        # 添加截距项
        X_with_intercept = self.add_intercept(X)
        m, n = X_with_intercept.shape
        
        # 初始化参数
        self.theta = np.zeros(n)
        self.cost_history = []
        self.theta_history = []
        
        # 梯度下降迭代
        for i in range(self.max_iterations):
            # 计算代价
            cost = self.cost_function(X_with_intercept, y)
            self.cost_history.append(cost)
            self.theta_history.append(self.theta.copy())
            
            # 计算梯度
            gradient = self.compute_gradient(X_with_intercept, y)
            
            # 更新参数
            self.theta = self.theta - self.learning_rate * gradient
            
            # 检查收敛
            if i > 0 and abs(self.cost_history[-2] - self.cost_history[-1]) < self.tolerance:
                if verbose:
                    print(f"在第{i+1}次迭代后收敛")
                break
            
            if verbose and (i % 100 == 0 or i < 10):
                print(f"迭代 {i+1}: 代价 = {cost:.6f}, 梯度范数 = {np.linalg.norm(gradient):.6f}")
        
        return self
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        if self.theta is None:
            raise ValueError("模型尚未训练")
        
        X_with_intercept = self.add_intercept(X)
        return self.hypothesis(X_with_intercept)
    
    def score(self, X: np.ndarray, y: np.ndarray) -> float:
        """计算R²得分"""
        y_pred = self.predict(X)
        ss_res = np.sum((y - y_pred) ** 2)
        ss_tot = np.sum((y - np.mean(y)) ** 2)
        return 1 - (ss_res / ss_tot)

class LearningRateAnalyzer:
    """学习率分析器"""
    
    def __init__(self):
        self.name = "Learning Rate Analyzer"
    
    def compare_learning_rates(self, X: np.ndarray, y: np.ndarray, 
                             learning_rates: list[float], max_iterations: int = 1000):
        """比较不同学习率的效果"""
        results = {}
        
        plt.figure(figsize=(15, 10))
        
        # 代价函数收敛曲线
        plt.subplot(2, 3, 1)
        for lr in learning_rates:
            model = GradientDescentRegression(learning_rate=lr, max_iterations=max_iterations)
            model.fit(X, y)
            
            plt.plot(model.cost_history, label=f'α = {lr}', linewidth=2)
            results[lr] = {
                'model': model,
                'final_cost': model.cost_history[-1],
                'iterations': len(model.cost_history)
            }
        
        plt.xlabel('迭代次数')
        plt.ylabel('代价函数值')
        plt.title('不同学习率的收敛曲线')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        # 参数收敛轨迹 (仅对2D参数空间)
        if X.shape[1] == 1:  # 一元回归
            plt.subplot(2, 3, 2)
            
            # 创建代价函数表面
            theta0_range = np.linspace(-2, 4, 50)
            theta1_range = np.linspace(-1, 3, 50)
            Theta0, Theta1 = np.meshgrid(theta0_range, theta1_range)
            
            X_with_intercept = np.column_stack([np.ones(X.shape[0]), X])
            J_surface = np.zeros_like(Theta0)
            
            for i in range(Theta0.shape[0]):
                for j in range(Theta0.shape[1]):
                    theta_temp = np.array([Theta0[i, j], Theta1[i, j]])
                    predictions = X_with_intercept @ theta_temp
                    J_surface[i, j] = (1 / (2 * len(y))) * np.sum((predictions - y) ** 2)
            
            # 绘制等高线
            contour = plt.contour(Theta0, Theta1, J_surface, levels=20, alpha=0.6)
            
            # 绘制参数轨迹
            for lr in learning_rates:
                model = results[lr]['model']
                theta_history = np.array(model.theta_history)
                if len(theta_history) > 0:
                    plt.plot(theta_history[:, 0], theta_history[:, 1], 
                            'o-', markersize=3, label=f'α = {lr}')
            
            plt.xlabel('θ₀')
            plt.ylabel('θ₁')
            plt.title('参数空间中的收敛轨迹')
            plt.legend()
        
        # 最终参数对比
        plt.subplot(2, 3, 3)
        lrs = list(results.keys())
        final_costs = [results[lr]['final_cost'] for lr in lrs]
        iterations = [results[lr]['iterations'] for lr in lrs]
        
        plt.bar(range(len(lrs)), final_costs, alpha=0.7)
        plt.xlabel('学习率')
        plt.ylabel('最终代价')
        plt.title('不同学习率的最终代价')
        plt.xticks(range(len(lrs)), [f'{lr}' for lr in lrs])
        
        # 收敛速度对比
        plt.subplot(2, 3, 4)
        plt.bar(range(len(lrs)), iterations, alpha=0.7, color='orange')
        plt.xlabel('学习率')
        plt.ylabel('收敛迭代次数')
        plt.title('收敛速度对比')
        plt.xticks(range(len(lrs)), [f'{lr}' for lr in lrs])
        
        # 学习率 vs 收敛性能
        plt.subplot(2, 3, 5)
        plt.loglog(lrs, final_costs, 'bo-', markersize=6, label='最终代价')
        plt.xlabel('学习率 (log scale)')
        plt.ylabel('最终代价 (log scale)')
        plt.title('学习率 vs 性能')
        plt.grid(True, alpha=0.3)
        
        # 梯度范数变化
        plt.subplot(2, 3, 6)
        for lr in learning_rates:
            model = results[lr]['model']
            X_with_intercept = np.column_stack([np.ones(X.shape[0]), X])
            
            gradient_norms = []
            for theta in model.theta_history:
                predictions = X_with_intercept @ theta
                gradient = (1 / len(y)) * X_with_intercept.T @ (predictions - y)
                gradient_norms.append(np.linalg.norm(gradient))
            
            plt.plot(gradient_norms, label=f'α = {lr}', linewidth=2)
        
        plt.xlabel('迭代次数')
        plt.ylabel('梯度范数')
        plt.title('梯度范数变化')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        plt.tight_layout()
        plt.show()
        
        return results

def generate_regression_data(n_samples: int = 100, noise: float = 0.1) -> tuple[np.ndarray, np.ndarray]:
    """生成回归数据"""
    np.random.seed(42)
    X = np.random.randn(n_samples, 1)
    y = 2 * X.flatten() + 1 + noise * np.random.randn(n_samples)
    return X, y

def demonstrate_gradient_descent():
    """演示梯度下降算法"""
    print("=== 梯度下降算法演示 ===\n")
    
    # 生成数据
    X, y = generate_regression_data(n_samples=50, noise=0.2)
    
    # 1. 基本梯度下降
    print("1. 基本梯度下降训练")
    model = GradientDescentRegression(learning_rate=0.1, max_iterations=1000)
    model.fit(X, y, verbose=True)
    
    print(f"   最终参数: θ₀={model.theta[0]:.4f}, θ₁={model.theta[1]:.4f}")
    print(f"   最终代价: {model.cost_history[-1]:.6f}")
    print(f"   R²得分: {model.score(X, y):.4f}")
    
    # 可视化结果
    plt.figure(figsize=(15, 5))
    
    # 数据和拟合结果
    plt.subplot(1, 3, 1)
    plt.scatter(X, y, alpha=0.6, color='blue', label='训练数据')
    
    X_line = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
    y_line = model.predict(X_line)
    plt.plot(X_line, y_line, 'r-', linewidth=2, 
             label=f'拟合直线: y = {model.theta[0]:.2f} + {model.theta[1]:.2f}x')
    
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('梯度下降拟合结果')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 代价函数收敛
    plt.subplot(1, 3, 2)
    plt.plot(model.cost_history, 'b-', linewidth=2)
    plt.xlabel('迭代次数')
    plt.ylabel('代价函数值')
    plt.title('代价函数收敛曲线')
    plt.grid(True, alpha=0.3)
    
    # 参数收敛轨迹
    plt.subplot(1, 3, 3)
    theta_history = np.array(model.theta_history)
    plt.plot(theta_history[:, 0], theta_history[:, 1], 'ro-', markersize=3)
    plt.plot(theta_history[0, 0], theta_history[0, 1], 'go', markersize=10, label='起始点')
    plt.plot(theta_history[-1, 0], theta_history[-1, 1], 'ro', markersize=10, label='终点')
    plt.xlabel('θ₀')
    plt.ylabel('θ₁')
    plt.title('参数收敛轨迹')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def demonstrate_learning_rate_analysis():
    """演示学习率分析"""
    print("\n=== 学习率分析 ===\n")
    
    # 生成数据
    X, y = generate_regression_data(n_samples=100, noise=0.1)
    
    # 不同学习率
    learning_rates = [0.001, 0.01, 0.1, 0.3, 0.5, 1.0]
    
    print("比较不同学习率的效果...")
    analyzer = LearningRateAnalyzer()
    results = analyzer.compare_learning_rates(X, y, learning_rates, max_iterations=500)
    
    # 打印结果总结
    print("\n学习率分析结果:")
    print("学习率\t最终代价\t收敛迭代次数")
    print("-" * 40)
    
    for lr in learning_rates:
        result = results[lr]
        print(f"{lr}\t{result['final_cost']:.6f}\t{result['iterations']}")

def compare_with_normal_equation():
    """与正规方程对比"""
    print("\n=== 梯度下降 vs 正规方程 ===\n")
    
    # 生成数据
    X, y = generate_regression_data(n_samples=100, noise=0.1)
    
    # 梯度下降
    gd_model = GradientDescentRegression(learning_rate=0.1, max_iterations=1000)
    gd_model.fit(X, y)
    
    # 正规方程
    X_with_intercept = np.column_stack([np.ones(X.shape[0]), X])
    theta_normal = np.linalg.inv(X_with_intercept.T @ X_with_intercept) @ X_with_intercept.T @ y
    
    print("参数对比:")
    print(f"梯度下降: θ₀={gd_model.theta[0]:.6f}, θ₁={gd_model.theta[1]:.6f}")
    print(f"正规方程: θ₀={theta_normal[0]:.6f}, θ₁={theta_normal[1]:.6f}")
    print(f"参数差异: {np.linalg.norm(gd_model.theta - theta_normal):.8f}")
    
    # 计算代价
    gd_cost = gd_model.cost_function(X_with_intercept, y)
    normal_cost = (1 / (2 * len(y))) * np.sum((X_with_intercept @ theta_normal - y) ** 2)
    
    print(f"\n代价对比:")
    print(f"梯度下降: {gd_cost:.8f}")
    print(f"正规方程: {normal_cost:.8f}")
    print(f"代价差异: {abs(gd_cost - normal_cost):.10f}")

if __name__ == "__main__":
    demonstrate_gradient_descent()
    demonstrate_learning_rate_analysis()
    compare_with_normal_equation()