import numpy as np
from self_attention import SelfAttention
from multi_head_attention import MultiHeadAttention
from feed_forward import FeedForward
from layer_norm import LayerNorm

class TransformerBlock:
    def __init__(self, embedding_dim: int, hidden_dim: int, num_heads: int = 1):
        if num_heads > 1:
            self.attention = MultiHeadAttention(embedding_dim, num_heads)
        else:
            self.attention = SelfAttention(embedding_dim)
        self.feed_forward = FeedForward(embedding_dim, hidden_dim)
        self.norm1 = LayerNorm(embedding_dim)  # After attention
        self.norm2 = LayerNorm(embedding_dim)  # After feed forward
    
    def layer_type(self) -> str:
        return "TransformerBlock"
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:
        # Standard Transformer architecture: attention + norm -> feedforward + norm
        # NOTE: The residual connection is handled inside the attention/feed_forward forward methods.
        attention_out = self.attention.forward(input_data)
        norm1_out = self.norm1.normalize(attention_out)
        
        feed_forward_out = self.feed_forward.forward(norm1_out)
        norm2_out = self.norm2.normalize(feed_forward_out)
        
        return norm2_out
    
    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        # The forward pass is: norm2(res2(norm1(res1(input))))
        # where res is a residual connection: input + sublayer(input)
        # The backward pass must correctly propagate gradients through the additions.

        # Backward through second LayerNorm
        # grads is dL/d(norm2_out). grad_res2 is dL/d(feed_forward_out)
        grad_res2 = self.norm2.backward(grads, lr)
        
        # Backward through the second residual connection and feed-forward layer.
        # feed_forward_out = norm1_out + raw_ffn(norm1_out)
        # The gradient dL/d(norm1_out) is the sum of the gradient from the residual path
        # and the gradient from the ffn path.
        grad_from_ffn = self.feed_forward.backward(grad_res2, lr)
        grad_norm1 = grad_res2 + grad_from_ffn # Correctly sum gradients
        
        # Backward through first LayerNorm
        # grad_norm1 is dL/d(norm1_out). grad_res1 is dL/d(attention_out)
        grad_res1 = self.norm1.backward(grad_norm1, lr)
        
        # Backward through the first residual connection and attention layer.
        # attention_out = input_data + raw_attn(input_data)
        # The gradient dL/d(input_data) is the sum of gradients from both paths.
        grad_from_attn = self.attention.backward(grad_res1, lr)
        grad_input = grad_res1 + grad_from_attn # Correctly sum gradients
        
        return grad_input
