"""
Week 14: 从零实现深度神经网络
Deep Neural Network Implementation from Scratch
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Callable
from sklearn.datasets import make_classification, make_regression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class ActivationFunction:
    """激活函数类"""
    
    @staticmethod
    def sigmoid(x: np.ndarray) -> np.ndarray:
        """Sigmoid激活函数"""
        return 1 / (1 + np.exp(-np.clip(x, -250, 250)))
    
    @staticmethod
    def sigmoid_derivative(x: np.ndarray) -> np.ndarray:
        """Sigmoid导数"""
        s = ActivationFunction.sigmoid(x)
        return s * (1 - s)
    
    @staticmethod
    def tanh(x: np.ndarray) -> np.ndarray:
        """Tanh激活函数"""
        return np.tanh(x)
    
    @staticmethod
    def tanh_derivative(x: np.ndarray) -> np.ndarray:
        """Tanh导数"""
        return 1 - np.tanh(x) ** 2
    
    @staticmethod
    def relu(x: np.ndarray) -> np.ndarray:
        """ReLU激活函数"""
        return np.maximum(0, x)
    
    @staticmethod
    def relu_derivative(x: np.ndarray) -> np.ndarray:
        """ReLU导数"""
        return (x > 0).astype(float)
    
    @staticmethod
    def leaky_relu(x: np.ndarray, alpha: float = 0.01) -> np.ndarray:
        """Leaky ReLU激活函数"""
        return np.where(x > 0, x, alpha * x)
    
    @staticmethod
    def leaky_relu_derivative(x: np.ndarray, alpha: float = 0.01) -> np.ndarray:
        """Leaky ReLU导数"""
        return np.where(x > 0, 1, alpha)

class Layer:
    """神经网络层"""
    
    def __init__(self, input_size: int, output_size: int, 
                 activation: str = 'relu', initialization: str = 'xavier'):
        self.input_size = input_size
        self.output_size = output_size
        self.activation = activation
        
        # 初始化权重和偏置
        self.initialize_parameters(initialization)
        
        # 缓存前向传播的值
        self.z = None  # 线性输出
        self.a = None  # 激活输出
        self.input_cache = None  # 输入缓存
        
        # 梯度
        self.dW = None
        self.db = None
    
    def initialize_parameters(self, method: str):
        """初始化参数"""
        if method == 'xavier':
            # Xavier初始化
            self.W = np.random.randn(self.input_size, self.output_size) * np.sqrt(1.0 / self.input_size)
        elif method == 'he':
            # He初始化 (适用于ReLU)
            self.W = np.random.randn(self.input_size, self.output_size) * np.sqrt(2.0 / self.input_size)
        elif method == 'normal':
            # 标准正态分布
            self.W = np.random.randn(self.input_size, self.output_size) * 0.01
        else:
            # 零初始化
            self.W = np.zeros((self.input_size, self.output_size))
        
        self.b = np.zeros((1, self.output_size))
    
    def forward(self, X: np.ndarray) -> np.ndarray:
        """前向传播"""
        self.input_cache = X
        self.z = X @ self.W + self.b
        
        # 应用激活函数
        if self.activation == 'sigmoid':
            self.a = ActivationFunction.sigmoid(self.z)
        elif self.activation == 'tanh':
            self.a = ActivationFunction.tanh(self.z)
        elif self.activation == 'relu':
            self.a = ActivationFunction.relu(self.z)
        elif self.activation == 'leaky_relu':
            self.a = ActivationFunction.leaky_relu(self.z)
        elif self.activation == 'linear':
            self.a = self.z
        else:
            raise ValueError(f"Unknown activation function: {self.activation}")
        
        return self.a
    
    def backward(self, dA: np.ndarray) -> np.ndarray:
        """反向传播"""
        m = self.input_cache.shape[0]
        
        # 计算激活函数的导数
        if self.activation == 'sigmoid':
            dZ = dA * ActivationFunction.sigmoid_derivative(self.z)
        elif self.activation == 'tanh':
            dZ = dA * ActivationFunction.tanh_derivative(self.z)
        elif self.activation == 'relu':
            dZ = dA * ActivationFunction.relu_derivative(self.z)
        elif self.activation == 'leaky_relu':
            dZ = dA * ActivationFunction.leaky_relu_derivative(self.z)
        elif self.activation == 'linear':
            dZ = dA
        else:
            raise ValueError(f"Unknown activation function: {self.activation}")
        
        # 计算梯度
        self.dW = (1/m) * self.input_cache.T @ dZ
        self.db = (1/m) * np.sum(dZ, axis=0, keepdims=True)
        
        # 计算输入的梯度
        dX = dZ @ self.W.T
        
        return dX

class NeuralNetwork:
    """神经网络类"""
    
    def __init__(self, layer_sizes: [int], activations: [str], 
                 initialization: str = 'xavier'):
        self.layer_sizes = layer_sizes
        self.activations = activations
        self.layers = []
        
        # 创建层
        for i in range(len(layer_sizes) - 1):
            layer = Layer(
                input_size=layer_sizes[i],
                output_size=layer_sizes[i + 1],
                activation=activations[i],
                initialization=initialization
            )
            self.layers.append(layer)
        
        # 训练历史
        self.history = {
            'train_loss': [],
            'val_loss': [],
            'train_acc': [],
            'val_acc': []
        }
    
    def forward(self, X: np.ndarray) -> np.ndarray:
        """前向传播"""
        current_input = X
        
        for layer in self.layers:
            current_input = layer.forward(current_input)
        
        return current_input
    
    def backward(self, y_true: np.ndarray, y_pred: np.ndarray):
        """反向传播"""
        # 计算输出层的梯度
        m = y_true.shape[0]
        
        if self.layers[-1].activation == 'sigmoid':
            # 二分类交叉熵损失
            dA = -(y_true / y_pred - (1 - y_true) / (1 - y_pred)) / m
        else:
            # 均方误差损失
            dA = (y_pred - y_true) / m
        
        # 反向传播
        current_gradient = dA
        
        for layer in reversed(self.layers):
            current_gradient = layer.backward(current_gradient)
    
    def update_parameters(self, learning_rate: float):
        """更新参数"""
        for layer in self.layers:
            layer.W -= learning_rate * layer.dW
            layer.b -= learning_rate * layer.db
    
    def compute_loss(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
        """计算损失"""
        if self.layers[-1].activation == 'sigmoid':
            # 二分类交叉熵损失
            epsilon = 1e-15
            y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
            return -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
        else:
            # 均方误差损失
            return np.mean((y_true - y_pred) ** 2)
    
    def compute_accuracy(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
        """计算准确率"""
        if self.layers[-1].activation == 'sigmoid':
            # 二分类
            predictions = (y_pred > 0.5).astype(int)
            return np.mean(predictions == y_true)
        else:
            # 回归任务，计算R²
            ss_res = np.sum((y_true - y_pred) ** 2)
            ss_tot = np.sum((y_true - np.mean(y_true)) ** 2)
            return 1 - (ss_res / ss_tot)
    
    def train(self, X_train: np.ndarray, y_train: np.ndarray,
              X_val: np.ndarray = None, y_val: np.ndarray = None,
              epochs: int = 100, learning_rate: float = 0.01,
              batch_size: int = 32, verbose: bool = True):
        """训练神经网络"""
        
        for epoch in range(epochs):
            # 随机打乱数据
            indices = np.random.permutation(len(X_train))
            
            epoch_loss = 0
            n_batches = 0
            
            # 小批量训练
            for i in range(0, len(X_train), batch_size):
                batch_indices = indices[i:i+batch_size]
                X_batch = X_train[batch_indices]
                y_batch = y_train[batch_indices]
                
                # 前向传播
                y_pred = self.forward(X_batch)
                
                # 计算损失
                loss = self.compute_loss(y_batch, y_pred)
                epoch_loss += loss
                n_batches += 1
                
                # 反向传播
                self.backward(y_batch, y_pred)
                
                # 更新参数
                self.update_parameters(learning_rate)
            
            # 计算训练和验证指标
            train_pred = self.forward(X_train)
            train_loss = self.compute_loss(y_train, train_pred)
            train_acc = self.compute_accuracy(y_train, train_pred)
            
            self.history['train_loss'].append(train_loss)
            self.history['train_acc'].append(train_acc)
            
            if X_val is not None and y_val is not None:
                val_pred = self.forward(X_val)
                val_loss = self.compute_loss(y_val, val_pred)
                val_acc = self.compute_accuracy(y_val, val_pred)
                
                self.history['val_loss'].append(val_loss)
                self.history['val_acc'].append(val_acc)
                
                if verbose and epoch % 10 == 0:
                    print(f"Epoch {epoch}: Train Loss={train_loss:.4f}, "
                          f"Train Acc={train_acc:.4f}, Val Loss={val_loss:.4f}, "
                          f"Val Acc={val_acc:.4f}")
            else:
                if verbose and epoch % 10 == 0:
                    print(f"Epoch {epoch}: Train Loss={train_loss:.4f}, "
                          f"Train Acc={train_acc:.4f}")
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        return self.forward(X)

class ActivationComparison:
    """激活函数比较"""
    
    def __init__(self):
        self.activations = ['sigmoid', 'tanh', 'relu', 'leaky_relu']
    
    def compare_activations_on_classification(self):
        """在分类任务上比较激活函数"""
        print("=== 激活函数在分类任务上的比较 ===")
        
        # 生成数据
        X, y = make_classification(n_samples=1000, n_features=20, n_informative=15,
                                 n_redundant=5, n_classes=2, random_state=42)
        
        # 标准化
        scaler = StandardScaler()
        X = scaler.fit_transform(X)
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )
        
        y_train = y_train.reshape(-1, 1)
        y_test = y_test.reshape(-1, 1)
        
        results = {}
        
        for activation in self.activations:
            print(f"训练使用 {activation} 激活函数的网络...")
            
            # 创建网络
            if activation == 'sigmoid':
                activations = ['sigmoid', 'sigmoid', 'sigmoid']
            else:
                activations = [activation, activation, 'sigmoid']  # 输出层用sigmoid
            
            nn = NeuralNetwork(
                layer_sizes=[20, 64, 32, 1],
                activations=activations,
                initialization='he' if activation == 'relu' else 'xavier'
            )
            
            # 训练
            nn.train(X_train, y_train, X_test, y_test,
                    epochs=100, learning_rate=0.01, verbose=False)
            
            # 评估
            train_pred = nn.predict(X_train)
            test_pred = nn.predict(X_test)
            
            train_acc = nn.compute_accuracy(y_train, train_pred)
            test_acc = nn.compute_accuracy(y_test, test_pred)
            
            results[activation] = {
                'history': nn.history,
                'train_acc': train_acc,
                'test_acc': test_acc,
                'network': nn
            }
        
        self.visualize_activation_comparison(results)
        return results
    
    def visualize_activation_comparison(self, results: ):
        """可视化激活函数比较"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 10))
        
        colors = ['blue', 'red', 'green', 'orange']
        
        # 1. 激活函数形状
        ax1 = axes[0, 0]
        x = np.linspace(-5, 5, 100)
        
        ax1.plot(x, ActivationFunction.sigmoid(x), color=colors[0], 
                linewidth=2, label='Sigmoid')
        ax1.plot(x, ActivationFunction.tanh(x), color=colors[1], 
                linewidth=2, label='Tanh')
        ax1.plot(x, ActivationFunction.relu(x), color=colors[2], 
                linewidth=2, label='ReLU')
        ax1.plot(x, ActivationFunction.leaky_relu(x), color=colors[3], 
                linewidth=2, label='Leaky ReLU')
        
        ax1.set_xlabel('输入')
        ax1.set_ylabel('输出')
        ax1.set_title('激活函数形状')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 激活函数导数
        ax2 = axes[0, 1]
        
        ax2.plot(x, ActivationFunction.sigmoid_derivative(x), color=colors[0], 
                linewidth=2, label="Sigmoid'")
        ax2.plot(x, ActivationFunction.tanh_derivative(x), color=colors[1], 
                linewidth=2, label="Tanh'")
        ax2.plot(x, ActivationFunction.relu_derivative(x), color=colors[2], 
                linewidth=2, label="ReLU'")
        ax2.plot(x, ActivationFunction.leaky_relu_derivative(x), color=colors[3], 
                linewidth=2, label="Leaky ReLU'")
        
        ax2.set_xlabel('输入')
        ax2.set_ylabel('导数')
        ax2.set_title('激活函数导数')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 训练损失
        ax3 = axes[0, 2]
        
        for i, (activation, result) in enumerate(results.items()):
            ax3.plot(result['history']['train_loss'], color=colors[i], 
                    linewidth=2, label=f'{activation}')
        
        ax3.set_xlabel('训练轮次')
        ax3.set_ylabel('训练损失')
        ax3.set_title('训练损失比较')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 验证损失
        ax4 = axes[1, 0]
        
        for i, (activation, result) in enumerate(results.items()):
            ax4.plot(result['history']['val_loss'], color=colors[i], 
                    linewidth=2, label=f'{activation}')
        
        ax4.set_xlabel('训练轮次')
        ax4.set_ylabel('验证损失')
        ax4.set_title('验证损失比较')
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        # 5. 准确率比较
        ax5 = axes[1, 1]
        
        activations = list(results.keys())
        train_accs = [results[act]['train_acc'] for act in activations]
        test_accs = [results[act]['test_acc'] for act in activations]
        
        x_pos = np.arange(len(activations))
        width = 0.35
        
        ax5.bar(x_pos - width/2, train_accs, width, 
               label='训练准确率', color='lightblue', alpha=0.7)
        ax5.bar(x_pos + width/2, test_accs, width, 
               label='测试准确率', color='lightcoral', alpha=0.7)
        
        ax5.set_xlabel('激活函数')
        ax5.set_ylabel('准确率')
        ax5.set_title('最终准确率比较')
        ax5.set_xticks(x_pos)
        ax5.set_xticklabels(activations)
        ax5.legend()
        ax5.grid(True, alpha=0.3)
        
        # 6. 梯度分析 (以第一个隐藏层为例)
        ax6 = axes[1, 2]
        
        gradient_norms = []
        
        for activation in activations:
            nn = results[activation]['network']
            # 计算第一层权重梯度的范数
            grad_norm = np.linalg.norm(nn.layers[0].dW)
            gradient_norms.append(grad_norm)
        
        bars = ax6.bar(activations, gradient_norms, color=colors[:len(activations)], alpha=0.7)
        ax6.set_xlabel('激活函数')
        ax6.set_ylabel('梯度范数')
        ax6.set_title('第一层梯度范数比较')
        ax6.grid(True, alpha=0.3)
        
        # 添加数值标注
        for bar, norm in zip(bars, gradient_norms):
            height = bar.get_height()
            ax6.text(bar.get_x() + bar.get_width()/2., height,
                    f'{norm:.3f}', ha='center', va='bottom')
        
        plt.tight_layout()
        plt.show()

def demonstrate_neural_networks():
    """演示神经网络"""
    print("=== 深度神经网络从零实现 ===\n")
    
    # 1. 激活函数比较
    print("1. 激活函数比较")
    activation_comp = ActivationComparison()
    activation_results = activation_comp.compare_activations_on_classification()
    
    # 2. 深度网络训练
    print("\n2. 深度网络训练演示")
    
    # 生成回归数据
    X, y = make_regression(n_samples=1000, n_features=10, noise=0.1, random_state=42)
    
    # 标准化
    scaler_X = StandardScaler()
    scaler_y = StandardScaler()
    X = scaler_X.fit_transform(X)
    y = scaler_y.fit_transform(y.reshape(-1, 1))
    
    # 分割数据
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )
    
    # 创建深度网络
    deep_nn = NeuralNetwork(
        layer_sizes=[10, 128, 64, 32, 1],
        activations=['relu', 'relu', 'relu', 'linear'],
        initialization='he'
    )
    
    print("训练深度神经网络...")
    deep_nn.train(X_train, y_train, X_test, y_test,
                  epochs=200, learning_rate=0.001, verbose=True)
    
    # 可视化训练过程
    plt.figure(figsize=(12, 4))
    
    plt.subplot(1, 2, 1)
    plt.plot(deep_nn.history['train_loss'], label='训练损失')
    plt.plot(deep_nn.history['val_loss'], label='验证损失')
    plt.xlabel('训练轮次')
    plt.ylabel('损失')
    plt.title('深度网络训练过程')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.subplot(1, 2, 2)
    plt.plot(deep_nn.history['train_acc'], label='训练R²')
    plt.plot(deep_nn.history['val_acc'], label='验证R²')
    plt.xlabel('训练轮次')
    plt.ylabel('R²得分')
    plt.title('深度网络性能')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()
    
    # 3. 总结
    print("\n=== 深度神经网络总结 ===")
    print("1. 激活函数选择很重要：ReLU通常是好选择")
    print("2. 权重初始化影响训练：He初始化适合ReLU")
    print("3. 深度网络可以学习复杂模式")
    print("4. 需要注意梯度消失/爆炸问题")
    print("5. 正则化和dropout有助于防止过拟合")

if __name__ == "__main__":
    demonstrate_neural_networks()