import numpy as np
from typing import Optional
from adam import Adam

class LayerNorm:
    def __init__(self, embedding_dim: int):
        self.epsilon = 1e-5  # Small constant for stability
        self.gamma = np.ones((1, embedding_dim))  # Initialize gamma to 1
        self.beta = np.zeros((1, embedding_dim))  # Initialize beta to 0
        
        self.cached_input: Optional[np.ndarray] = None
        self.cached_mean: Optional[np.ndarray] = None
        self.cached_std: Optional[np.ndarray] = None
        
        self.optimizer_gamma = Adam((1, embedding_dim))
        self.optimizer_beta = Adam((1, embedding_dim))
    
    def normalize(self, input_data: np.ndarray) -> np.ndarray:
        mean = np.mean(input_data, axis=1, keepdims=True)  # Mean per token
        std = np.std(input_data, axis=1, keepdims=True)  # Std per token
        
        # Cache values for backward pass
        self.cached_input = input_data.copy()
        self.cached_mean = mean.copy()
        self.cached_std = std.copy()
        
        normalized = (input_data - mean) / (std + self.epsilon)
        return self.gamma * normalized + self.beta
    
    def layer_type(self) -> str:
        return "LayerNorm"
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:
        return self.normalize(input_data)
    
    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        input_data = self.cached_input
        mean = self.cached_mean
        std = self.cached_std
        
        normalized = (input_data - mean) / (std + self.epsilon)
        n_features = input_data.shape[1]  # Number of features per token
        
        # Gradients w.r.t. gamma and beta
        grad_gamma = np.sum(normalized * grads, axis=0, keepdims=True)
        grad_beta = np.sum(grads, axis=0, keepdims=True)
        
        # Gradient w.r.t. normalized values
        grad_normalized = self.gamma * grads
        
        # LayerNorm backward pass with full chain rule
        variance = std * std + self.epsilon
        grad_var = np.sum(grad_normalized * normalized, axis=1, keepdims=True) * (-0.5) / (variance * np.sqrt(variance))
        grad_mean = np.sum(grad_normalized, axis=1, keepdims=True) * (-1.0) / (std + self.epsilon) + grad_var * np.sum(input_data - mean, axis=1, keepdims=True) * (-2.0) / n_features
        
        grad_input = (
            grad_normalized / (std + self.epsilon) +
            grad_var * 2.0 * (input_data - mean) / n_features +
            grad_mean / n_features
        )
        
        # Update learnable parameters
        self.optimizer_gamma.step(self.gamma, grad_gamma, lr)
        self.optimizer_beta.step(self.beta, grad_beta, lr)
        
        return grad_input