import numpy as np
from typing import Optional, Tuple
from adam import Adam
from self_attention import SelfAttention

class MultiHeadAttention:
    def __init__(self, embedding_dim: int, num_heads: int):
        assert embedding_dim % num_heads == 0, "embedding_dim must be divisible by num_heads"
        
        self.embedding_dim = embedding_dim
        self.num_heads = num_heads
        self.head_dim = embedding_dim // num_heads
        
        # Create multiple self-attention heads
        self.heads = [SelfAttention(self.head_dim) for _ in range(num_heads)]
        
        # Linear projection to reduce dimensionality for each head
        self.head_projections = []
        self.optimizer_head_projections = []
        for _ in range(num_heads):
            # 修复：使用head_dim而不是embedding_dim来初始化投影矩阵
            std = np.sqrt(2.0 / self.head_dim)
            proj = np.random.normal(0.0, std, (embedding_dim, self.head_dim))
            self.head_projections.append(proj)
            self.optimizer_head_projections.append(Adam((embedding_dim, self.head_dim)))
        
        # Output projection matrix to combine all heads back to full dimension
        # 修复：使用head_dim * num_heads而不是embedding_dim来初始化输出投影矩阵
        std = np.sqrt(2.0 / (self.head_dim * num_heads))
        self.w_out = np.random.normal(0.0, std, (self.head_dim * num_heads, embedding_dim))
        self.optimizer_w_out = Adam((self.head_dim * num_heads, embedding_dim))
        
        # Caches for backward pass
        self.cached_inputs: Optional[np.ndarray] = None
        self.cached_combined_output: Optional[np.ndarray] = None

    def project_to_head(self, input_data: np.ndarray, head_index: int) -> np.ndarray:
        return input_data @ self.head_projections[head_index]
    
    def combine_heads(self, head_outputs: list) -> np.ndarray:
        return np.concatenate(head_outputs, axis=-1)
    
    def layer_type(self) -> str:
        return "MultiHeadAttention"
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:
        self.cached_inputs = input_data.copy()
        
        head_outputs = []
        for i in range(self.num_heads):
            projected_input = self.project_to_head(input_data, i)
            head_output = self.heads[i].forward(projected_input)
            head_outputs.append(head_output)
        
        combined_output = self.combine_heads(head_outputs)
        self.cached_combined_output = combined_output.copy()

        projected_output = combined_output @ self.w_out
        
        # Add residual connection
        output = projected_output + input_data
        return output

    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        # The forward pass is: output = (combine_heads(heads(project(input))) @ w_out) + input
        # `grads` is dL/d(output)

        # Gradient from the residual connection. This will be added to the gradient from the main path at the end.
        grad_input_residual = grads.copy()

        # Gradient for the output projection (w_out)
        # dL/d(projected_output) = grads
        grad_w_out = self.cached_combined_output.T @ grads
        self.optimizer_w_out.step(self.w_out, grad_w_out, lr)

        # Gradient to propagate back to the combined head outputs
        # dL/d(combined_output)
        grad_combined = grads @ self.w_out.T

        # Split the gradient for each head (undoing the concatenation)
        grad_heads = np.split(grad_combined, self.num_heads, axis=-1)

        # Initialize the gradient for the input of the multi-head block from the projection paths
        grad_input_projections = np.zeros_like(self.cached_inputs)

        for i in range(self.num_heads):
            # Propagate gradient back through the i-th attention head
            # Input: dL/d(head_output_i), Output: dL/d(projected_input_i)
            grad_projected_input = self.heads[i].backward(grad_heads[i], lr)

            # Gradient for the i-th head projection matrix
            # dL/d(head_projections[i]) = cached_inputs.T @ dL/d(projected_input_i)
            grad_head_proj = self.cached_inputs.T @ grad_projected_input
            self.optimizer_head_projections[i].step(self.head_projections[i], grad_head_proj, lr)

            # Gradient to propagate back to the input of the projection for this head's path
            # dL/d(input_data) from this path
            grad_input_projections += grad_projected_input @ self.head_projections[i].T

        # Total gradient for the input is the sum of gradients from the residual path and all projection paths
        total_grad_input = grad_input_residual + grad_input_projections
        return total_grad_input