"""
Week 3: 线性回归实现
Linear Regression Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Optional
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class LinearRegression:
    """线性回归类"""
    
    def __init__(self):
        self.theta = None
        self.cost_history = []
        self.name = "Linear Regression"
    
    def add_intercept(self, X: np.ndarray) -> np.ndarray:
        """添加截距项"""
        return np.column_stack([np.ones(X.shape[0]), X])
    
    def hypothesis(self, X: np.ndarray) -> np.ndarray:
        """假设函数 h(x) = θᵀx"""
        return X @ self.theta
    
    def cost_function(self, X: np.ndarray, y: np.ndarray) -> float:
        """代价函数 J(θ) = 1/(2m) * ||Xθ - y||²"""
        m = X.shape[0]
        predictions = self.hypothesis(X)
        cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2)
        return cost
    
    def normal_equation(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
        """
        正规方程求解: θ = (XᵀX)⁻¹Xᵀy
        """
        try:
            # 计算 XᵀX
            XtX = X.T @ X
            
            # 检查是否可逆
            if np.linalg.det(XtX) == 0:
                print("警告: XᵀX不可逆，使用伪逆")
                theta = np.linalg.pinv(X) @ y
            else:
                # 正规方程
                theta = np.linalg.inv(XtX) @ X.T @ y
            
            return theta
        except np.linalg.LinAlgError:
            print("线性代数错误，使用伪逆")
            return np.linalg.pinv(X) @ y
    
    def fit_normal_equation(self, X: np.ndarray, y: np.ndarray):
        """使用正规方程拟合模型"""
        # 添加截距项
        X_with_intercept = self.add_intercept(X)
        
        # 求解参数
        self.theta = self.normal_equation(X_with_intercept, y)
        
        # 计算最终代价
        final_cost = self.cost_function(X_with_intercept, y)
        self.cost_history = [final_cost]
        
        return self
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        if self.theta is None:
            raise ValueError("模型尚未训练，请先调用fit方法")
        
        X_with_intercept = self.add_intercept(X)
        return self.hypothesis(X_with_intercept)
    
    def score(self, X: np.ndarray, y: np.ndarray) -> float:
        """计算R²决定系数"""
        y_pred = self.predict(X)
        ss_res = np.sum((y - y_pred) ** 2)
        ss_tot = np.sum((y - np.mean(y)) ** 2)
        return 1 - (ss_res / ss_tot)

class MultipleLinearRegression(LinearRegression):
    """多元线性回归"""
    
    def __init__(self):
        super().__init__()
        self.feature_names = None
    
    def fit(self, X: np.ndarray, y: np.ndarray, feature_names: Optional[list] = None):
        """拟合多元线性回归模型"""
        self.feature_names = feature_names or [f"x{i+1}" for i in range(X.shape[1])]
        return self.fit_normal_equation(X, y)
    
    def get_coefficients(self) -> dict:
        """获取回归系数"""
        if self.theta is None:
            return {}
        
        coef_dict = {"截距": self.theta[0]}
        for i, name in enumerate(self.feature_names):
            coef_dict[name] = self.theta[i + 1]
        
        return coef_dict

def generate_linear_data(n_samples: int = 100, n_features: int = 1, 
                        noise: float = 0.1, random_state: int = 42) -> tuple[np.ndarray, np.ndarray]:
    """生成线性回归数据"""
    np.random.seed(random_state)
    
    X = np.random.randn(n_samples, n_features)
    
    # 真实参数
    true_theta = np.random.randn(n_features + 1)  # 包括截距
    
    # 添加截距项并生成y
    X_with_intercept = np.column_stack([np.ones(n_samples), X])
    y = X_with_intercept @ true_theta + noise * np.random.randn(n_samples)
    
    return X, y, true_theta

def demonstrate_linear_regression():
    """演示线性回归"""
    print("=== 线性回归演示 ===\n")
    
    # 1. 一元线性回归
    print("1. 一元线性回归")
    X, y, true_theta = generate_linear_data(n_samples=50, n_features=1, noise=0.2)
    
    # 训练模型
    model = LinearRegression()
    model.fit_normal_equation(X, y)
    
    print(f"   真实参数: θ₀={true_theta[0]:.3f}, θ₁={true_theta[1]:.3f}")
    print(f"   估计参数: θ₀={model.theta[0]:.3f}, θ₁={model.theta[1]:.3f}")
    print(f"   R²得分: {model.score(X, y):.4f}")
    
    # 可视化
    plt.figure(figsize=(15, 5))
    
    plt.subplot(1, 3, 1)
    plt.scatter(X, y, alpha=0.6, color='blue', label='训练数据')
    
    # 绘制拟合直线
    X_line = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
    y_line = model.predict(X_line)
    plt.plot(X_line, y_line, 'r-', linewidth=2, label=f'拟合直线: y = {model.theta[0]:.2f} + {model.theta[1]:.2f}x')
    
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('一元线性回归')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 2. 多元线性回归
    print("\n2. 多元线性回归")
    X_multi, y_multi, true_theta_multi = generate_linear_data(n_samples=100, n_features=2, noise=0.1)
    
    model_multi = MultipleLinearRegression()
    model_multi.fit(X_multi, y_multi, feature_names=['x1', 'x2'])
    
    print(f"   真实参数: {true_theta_multi}")
    print(f"   估计参数: {model_multi.theta}")
    print(f"   R²得分: {model_multi.score(X_multi, y_multi):.4f}")
    
    # 系数可视化
    plt.subplot(1, 3, 2)
    coef_dict = model_multi.get_coefficients()
    names = list(coef_dict.keys())
    values = list(coef_dict.values())
    
    plt.bar(names, values, alpha=0.7)
    plt.title('回归系数')
    plt.ylabel('系数值')
    plt.xticks(rotation=45)
    
    # 预测vs真实值
    plt.subplot(1, 3, 3)
    y_pred = model_multi.predict(X_multi)
    plt.scatter(y_multi, y_pred, alpha=0.6)
    
    # 完美预测线
    min_val = min(y_multi.min(), y_pred.min())
    max_val = max(y_multi.max(), y_pred.max())
    plt.plot([min_val, max_val], [min_val, max_val], 'r--', label='完美预测')
    
    plt.xlabel('真实值')
    plt.ylabel('预测值')
    plt.title('预测 vs 真实值')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def demonstrate_normal_equation_analysis():
    """演示正规方程分析"""
    print("\n=== 正规方程分析 ===\n")
    
    # 1. 不同样本大小的影响
    sample_sizes = [20, 50, 100, 200, 500]
    errors = []
    
    print("样本大小对参数估计的影响:")
    print("样本大小\t参数误差\tR²得分")
    print("-" * 40)
    
    for n in sample_sizes:
        X, y, true_theta = generate_linear_data(n_samples=n, n_features=2, noise=0.1)
        
        model = LinearRegression()
        model.fit_normal_equation(X, y)
        
        # 计算参数误差
        param_error = np.linalg.norm(model.theta - true_theta)
        r2_score = model.score(X, y)
        
        errors.append(param_error)
        print(f"{n}\t\t{param_error:.4f}\t\t{r2_score:.4f}")
    
    # 可视化参数误差随样本大小的变化
    plt.figure(figsize=(12, 4))
    
    plt.subplot(1, 2, 1)
    plt.plot(sample_sizes, errors, 'bo-', markersize=6)
    plt.xlabel('样本大小')
    plt.ylabel('参数估计误差')
    plt.title('样本大小 vs 参数估计误差')
    plt.grid(True, alpha=0.3)
    plt.xscale('log')
    plt.yscale('log')
    
    # 2. 噪声水平的影响
    noise_levels = [0.01, 0.05, 0.1, 0.2, 0.5]
    noise_errors = []
    
    print(f"\n噪声水平对参数估计的影响:")
    print("噪声水平\t参数误差\tR²得分")
    print("-" * 40)
    
    for noise in noise_levels:
        X, y, true_theta = generate_linear_data(n_samples=100, n_features=2, noise=noise)
        
        model = LinearRegression()
        model.fit_normal_equation(X, y)
        
        param_error = np.linalg.norm(model.theta - true_theta)
        r2_score = model.score(X, y)
        
        noise_errors.append(param_error)
        print(f"{noise}\t\t{param_error:.4f}\t\t{r2_score:.4f}")
    
    plt.subplot(1, 2, 2)
    plt.plot(noise_levels, noise_errors, 'ro-', markersize=6)
    plt.xlabel('噪声水平')
    plt.ylabel('参数估计误差')
    plt.title('噪声水平 vs 参数估计误差')
    plt.grid(True, alpha=0.3)
    plt.xscale('log')
    plt.yscale('log')
    
    plt.tight_layout()
    plt.show()

def demonstrate_matrix_analysis():
    """演示矩阵分析"""
    print("\n=== 矩阵分析 ===\n")
    
    # 生成数据
    X, y, _ = generate_linear_data(n_samples=50, n_features=2, noise=0.1)
    X_with_intercept = np.column_stack([np.ones(X.shape[0]), X])
    
    # 计算相关矩阵
    XtX = X_with_intercept.T @ X_with_intercept
    Xty = X_with_intercept.T @ y
    
    print("设计矩阵 X 的形状:", X_with_intercept.shape)
    print("XᵀX 矩阵:")
    print(XtX)
    print(f"\nXᵀX 的条件数: {np.linalg.cond(XtX):.2f}")
    print(f"XᵀX 的行列式: {np.linalg.det(XtX):.4f}")
    
    # 特征值分析
    eigenvals, eigenvecs = np.linalg.eig(XtX)
    print(f"\nXᵀX 的特征值: {eigenvals}")
    
    # 求解
    theta_normal = np.linalg.inv(XtX) @ Xty
    theta_pinv = np.linalg.pinv(X_with_intercept) @ y
    
    print(f"\n正规方程解: {theta_normal}")
    print(f"伪逆解: {theta_pinv}")
    print(f"解的差异: {np.linalg.norm(theta_normal - theta_pinv):.6f}")

if __name__ == "__main__":
    demonstrate_linear_regression()
    demonstrate_normal_equation_analysis()
    demonstrate_matrix_analysis()