import numpy as np
from typing import Optional
from adam import Adam

class FeedForward:
    def __init__(self, embedding_dim: int, hidden_dim: int):
        # Xavier/He initialization for w1: std = sqrt(2 / fan_in)
        std_w1 = np.sqrt(2.0 / embedding_dim)
        # Xavier/He initialization for w2: std = sqrt(2 / fan_in)
        std_w2 = np.sqrt(2.0 / hidden_dim)
        
        self.w1 = np.random.normal(0.0, std_w1, (embedding_dim, hidden_dim))
        self.b1 = np.zeros((1, hidden_dim))  # Bias initialized to 0
        self.w2 = np.random.normal(0.0, std_w2, (hidden_dim, embedding_dim))
        self.b2 = np.zeros((1, embedding_dim))  # Bias initialized to 0
        
        self.input: Optional[np.ndarray] = None
        self.hidden_pre_activation: Optional[np.ndarray] = None
        self.hidden_post_activation: Optional[np.ndarray] = None
        
        self.optimizer_w1 = Adam((embedding_dim, hidden_dim))
        self.optimizer_b1 = Adam((1, hidden_dim))
        self.optimizer_w2 = Adam((hidden_dim, embedding_dim))
        self.optimizer_b2 = Adam((1, embedding_dim))
    
    def layer_type(self) -> str:
        return "FeedForward"
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:
        hidden_pre_activation = input_data @ self.w1 + self.b1
        hidden_post_activation = np.maximum(hidden_pre_activation, 0.0)  # ReLU
        
        output = hidden_post_activation @ self.w2 + self.b2
        
        # Cache values
        self.input = input_data.copy()
        self.hidden_pre_activation = hidden_pre_activation.copy()
        self.hidden_post_activation = hidden_post_activation.copy()
        
        return output + input_data  # residual connection (no LayerNorm here)
    
    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        # Unwrap cached values
        input_data = self.input
        hidden_pre_activation = self.hidden_pre_activation
        hidden_post_activation = self.hidden_post_activation
        
        # Compute gradients for W2 and b2
        grad_w2 = hidden_post_activation.T @ grads
        grad_b2 = np.sum(grads, axis=0, keepdims=True)  # Shape: [1, embedding_dim]
        
        # Gradient w.r.t. hidden_post_activation
        grad_hidden_post_activation = grads @ self.w2.T
        
        # Gradient through ReLU
        relu_grad = (hidden_pre_activation > 0.0).astype(np.float32)
        grad_hidden_pre_activation = grad_hidden_post_activation * relu_grad
        
        # Gradient w.r.t. W1 and b1
        grad_w1 = input_data.T @ grad_hidden_pre_activation
        grad_b1 = np.sum(grad_hidden_pre_activation, axis=0, keepdims=True)  # Shape: [1, hidden_dim]
        
        # Gradient w.r.t. input (through feed-forward computation)
        grad_input_feedforward = grad_hidden_pre_activation @ self.w1.T
        
        # Add gradient from residual connection
        # Forward: output = W2(ReLU(W1*input + b1)) + b2 + input
        # Backward: grad_input = grad_feedforward + grad_residual
        grad_input = grad_input_feedforward + grads
        
        # Update parameters via Adam optimizer
        self.optimizer_w2.step(self.w2, grad_w2, lr)
        self.optimizer_b2.step(self.b2, grad_b2, lr)
        self.optimizer_w1.step(self.w1, grad_w1, lr)
        self.optimizer_b1.step(self.b1, grad_b1, lr)
        
        return grad_input