"""
Week 6: 逻辑回归实现
Logistic Regression Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Optional
from sklearn.datasets import make_classification
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class LogisticRegression:
    """逻辑回归类"""
    
    def __init__(self, learning_rate: float = 0.01, max_iterations: int = 1000, 
                 tolerance: float = 1e-6):
        self.learning_rate = learning_rate
        self.max_iterations = max_iterations
        self.tolerance = tolerance
        self.theta = None
        self.cost_history = []
        
    def add_intercept(self, X: np.ndarray) -> np.ndarray:
        """添加截距项"""
        return np.column_stack([np.ones(X.shape[0]), X])
    
    def sigmoid(self, z: np.ndarray) -> np.ndarray:
        """
        Sigmoid函数: σ(z) = 1/(1 + e^(-z))
        使用数值稳定的实现
        """
        # 防止数值溢出
        z = np.clip(z, -500, 500)
        return 1 / (1 + np.exp(-z))
    
    def hypothesis(self, X: np.ndarray) -> np.ndarray:
        """假设函数 h(x) = σ(θᵀx)"""
        return self.sigmoid(X @ self.theta)
    
    def cost_function(self, X: np.ndarray, y: np.ndarray) -> float:
        """
        逻辑回归代价函数 (交叉熵)
        J(θ) = -(1/m) * Σ[y*log(h(x)) + (1-y)*log(1-h(x))]
        """
        m = X.shape[0]
        h = self.hypothesis(X)
        
        # 防止log(0)
        epsilon = 1e-15
        h = np.clip(h, epsilon, 1 - epsilon)
        
        cost = -(1/m) * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))
        return cost
    
    def compute_gradient(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
        """
        计算梯度: ∇J(θ) = (1/m) * Xᵀ(h(x) - y)
        """
        m = X.shape[0]
        h = self.hypothesis(X)
        gradient = (1/m) * X.T @ (h - y)
        return gradient
    
    def fit(self, X: np.ndarray, y: np.ndarray, verbose: bool = False) -> 'LogisticRegression':
        """使用梯度下降训练逻辑回归模型"""
        # 添加截距项
        X_with_intercept = self.add_intercept(X)
        m, n = X_with_intercept.shape
        
        # 初始化参数
        self.theta = np.zeros(n)
        self.cost_history = []
        
        # 梯度下降迭代
        for i in range(self.max_iterations):
            # 计算代价
            cost = self.cost_function(X_with_intercept, y)
            self.cost_history.append(cost)
            
            # 计算梯度
            gradient = self.compute_gradient(X_with_intercept, y)
            
            # 更新参数
            self.theta = self.theta - self.learning_rate * gradient
            
            # 检查收敛
            if i > 0 and abs(self.cost_history[-2] - self.cost_history[-1]) < self.tolerance:
                if verbose:
                    print(f"在第{i+1}次迭代后收敛")
                break
            
            if verbose and (i % 100 == 0 or i < 10):
                print(f"迭代 {i+1}: 代价 = {cost:.6f}, 梯度范数 = {np.linalg.norm(gradient):.6f}")
        
        return self
    
    def predict_proba(self, X: np.ndarray) -> np.ndarray:
        """预测概率"""
        if self.theta is None:
            raise ValueError("模型尚未训练")
        
        X_with_intercept = self.add_intercept(X)
        return self.hypothesis(X_with_intercept)
    
    def predict(self, X: np.ndarray, threshold: float = 0.5) -> np.ndarray:
        """预测类别"""
        probabilities = self.predict_proba(X)
        return (probabilities >= threshold).astype(int)
    
    def score(self, X: np.ndarray, y: np.ndarray) -> float:
        """计算准确率"""
        predictions = self.predict(X)
        return np.mean(predictions == y)

class LogisticRegressionAnalyzer:
    """逻辑回归分析器"""
    
    def __init__(self):
        self.name = "Logistic Regression Analyzer"
    
    def generate_classification_data(self, n_samples: int = 200, n_features: int = 2, 
                                   n_classes: int = 2, random_state: int = 42) -> tuple[np.ndarray, np.ndarray]:
        """生成分类数据"""
        X, y = make_classification(
            n_samples=n_samples,
            n_features=n_features,
            n_redundant=0,
            n_informative=n_features,
            n_classes=n_classes,
            n_clusters_per_class=1,
            random_state=random_state
        )
        return X, y
    
    def plot_decision_boundary(self, X: np.ndarray, y: np.ndarray, model: LogisticRegression, 
                             title: str = "逻辑回归决策边界"):
        """绘制决策边界"""
        plt.figure(figsize=(10, 8))
        
        # 创建网格
        h = 0.02
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                            np.arange(y_min, y_max, h))
        
        # 预测网格点
        grid_points = np.c_[xx.ravel(), yy.ravel()]
        Z = model.predict_proba(grid_points)
        Z = Z.reshape(xx.shape)
        
        # 绘制决策边界和概率等高线
        plt.contourf(xx, yy, Z, levels=50, alpha=0.8, cmap='RdYlBu')
        plt.colorbar(label='预测概率')
        
        # 绘制决策边界 (概率=0.5)
        plt.contour(xx, yy, Z, levels=[0.5], colors='black', linewidths=2, linestyles='--')
        
        # 绘制数据点
        scatter = plt.scatter(X[:, 0], X[:, 1], c=y, cmap='RdYlBu', edgecolors='black')
        plt.colorbar(scatter, label='真实标签')
        
        plt.xlabel('特征 1')
        plt.ylabel('特征 2')
        plt.title(title)
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def analyze_sigmoid_function(self):
        """分析Sigmoid函数"""
        z = np.linspace(-10, 10, 1000)
        sigmoid_z = 1 / (1 + np.exp(-z))
        
        plt.figure(figsize=(15, 5))
        
        # Sigmoid函数
        plt.subplot(1, 3, 1)
        plt.plot(z, sigmoid_z, 'b-', linewidth=2, label='σ(z) = 1/(1+e^(-z))')
        plt.axhline(y=0.5, color='r', linestyle='--', alpha=0.7, label='决策阈值')
        plt.axvline(x=0, color='r', linestyle='--', alpha=0.7)
        plt.xlabel('z')
        plt.ylabel('σ(z)')
        plt.title('Sigmoid函数')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # Sigmoid导数
        plt.subplot(1, 3, 2)
        sigmoid_derivative = sigmoid_z * (1 - sigmoid_z)
        plt.plot(z, sigmoid_derivative, 'g-', linewidth=2, label="σ'(z) = σ(z)(1-σ(z))")
        plt.xlabel('z')
        plt.ylabel("σ'(z)")
        plt.title('Sigmoid函数导数')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 对数几率
        plt.subplot(1, 3, 3)
        # 避免log(0)和log(1)
        p = np.linspace(0.01, 0.99, 1000)
        logit = np.log(p / (1 - p))
        plt.plot(p, logit, 'r-', linewidth=2, label='logit(p) = log(p/(1-p))')
        plt.axhline(y=0, color='k', linestyle='--', alpha=0.7)
        plt.axvline(x=0.5, color='k', linestyle='--', alpha=0.7)
        plt.xlabel('概率 p')
        plt.ylabel('logit(p)')
        plt.title('对数几率函数')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    def compare_learning_rates(self, X: np.ndarray, y: np.ndarray, 
                             learning_rates: list = [0.001, 0.01, 0.1, 1.0]):
        """比较不同学习率的效果"""
        plt.figure(figsize=(15, 10))
        
        results = {}
        
        # 收敛曲线
        plt.subplot(2, 2, 1)
        for lr in learning_rates:
            model = LogisticRegression(learning_rate=lr, max_iterations=1000)
            model.fit(X, y)
            
            plt.plot(model.cost_history, label=f'α = {lr}', linewidth=2)
            results[lr] = model
        
        plt.xlabel('迭代次数')
        plt.ylabel('代价函数值')
        plt.title('不同学习率的收敛曲线')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        # 最终准确率
        plt.subplot(2, 2, 2)
        accuracies = [results[lr].score(X, y) for lr in learning_rates]
        plt.bar(range(len(learning_rates)), accuracies, alpha=0.7)
        plt.xlabel('学习率')
        plt.ylabel('准确率')
        plt.title('不同学习率的最终准确率')
        plt.xticks(range(len(learning_rates)), [f'{lr}' for lr in learning_rates])
        
        # 参数收敛轨迹 (仅显示前两个参数)
        plt.subplot(2, 2, 3)
        for lr in learning_rates:
            model = results[lr]
            if hasattr(model, 'theta_history'):  # 如果记录了参数历史
                theta_history = np.array(model.theta_history)
                plt.plot(theta_history[:, 0], theta_history[:, 1], 
                        'o-', markersize=2, label=f'α = {lr}')
        
        plt.xlabel('θ₀')
        plt.ylabel('θ₁')
        plt.title('参数收敛轨迹')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 决策边界对比 (选择最佳学习率)
        plt.subplot(2, 2, 4)
        best_lr = learning_rates[np.argmax(accuracies)]
        best_model = results[best_lr]
        
        # 简化的决策边界绘制
        h = 0.1
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                            np.arange(y_min, y_max, h))
        
        grid_points = np.c_[xx.ravel(), yy.ravel()]
        Z = best_model.predict(grid_points)
        Z = Z.reshape(xx.shape)
        
        plt.contourf(xx, yy, Z, alpha=0.3, cmap='RdYlBu')
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap='RdYlBu', edgecolors='black')
        plt.xlabel('特征 1')
        plt.ylabel('特征 2')
        plt.title(f'最佳决策边界 (α = {best_lr})')
        
        plt.tight_layout()
        plt.show()
        
        return results

def evaluate_classification_performance(y_true: np.ndarray, y_pred: np.ndarray, 
                                      y_prob: np.ndarray = None):
    """评估分类性能"""
    from sklearn.metrics import confusion_matrix, classification_report
    
    # 混淆矩阵
    cm = confusion_matrix(y_true, y_pred)
    
    # 计算各种指标
    tn, fp, fn, tp = cm.ravel()
    
    accuracy = (tp + tn) / (tp + tn + fp + fn)
    precision = tp / (tp + fp) if (tp + fp) > 0 else 0
    recall = tp / (tp + fn) if (tp + fn) > 0 else 0
    f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
    
    print("=== 分类性能评估 ===")
    print(f"准确率 (Accuracy): {accuracy:.4f}")
    print(f"精确率 (Precision): {precision:.4f}")
    print(f"召回率 (Recall): {recall:.4f}")
    print(f"F1分数: {f1:.4f}")
    
    print(f"\n混淆矩阵:")
    print(f"真负例: {tn}, 假正例: {fp}")
    print(f"假负例: {fn}, 真正例: {tp}")
    
    # 可视化混淆矩阵
    plt.figure(figsize=(8, 6))
    plt.imshow(cm, interpolation='nearest', cmap='Blues')
    plt.title('混淆矩阵')
    plt.colorbar()
    
    classes = ['负例', '正例']
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes)
    plt.yticks(tick_marks, classes)
    
    # 添加数值标注
    thresh = cm.max() / 2.
    for i, j in np.ndindex(cm.shape):
        plt.text(j, i, format(cm[i, j], 'd'),
                ha="center", va="center",
                color="white" if cm[i, j] > thresh else "black")
    
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.tight_layout()
    plt.show()

def demonstrate_logistic_regression():
    """演示逻辑回归"""
    print("=== 逻辑回归演示 ===\n")
    
    analyzer = LogisticRegressionAnalyzer()
    
    # 1. Sigmoid函数分析
    print("1. Sigmoid函数分析")
    analyzer.analyze_sigmoid_function()
    
    # 2. 生成分类数据
    print("2. 生成分类数据并训练模型")
    X, y = analyzer.generate_classification_data(n_samples=200, n_features=2)
    
    # 训练逻辑回归模型
    model = LogisticRegression(learning_rate=0.1, max_iterations=1000)
    model.fit(X, y, verbose=True)
    
    print(f"   最终参数: {model.theta}")
    print(f"   训练准确率: {model.score(X, y):.4f}")
    
    # 3. 决策边界可视化
    print("3. 决策边界可视化")
    analyzer.plot_decision_boundary(X, y, model)
    
    # 4. 学习率比较
    print("4. 学习率比较")
    learning_rates = [0.001, 0.01, 0.1, 0.5]
    results = analyzer.compare_learning_rates(X, y, learning_rates)
    
    # 5. 性能评估
    print("5. 性能评估")
    y_pred = model.predict(X)
    y_prob = model.predict_proba(X)
    evaluate_classification_performance(y, y_pred, y_prob)

if __name__ == "__main__":
    demonstrate_logistic_regression()