import numpy as np
from typing import Optional, Tuple
from adam import Adam

# Constants
EMBEDDING_DIM = 128

class SelfAttention:
    def __init__(self, embedding_dim: int):
        self.embedding_dim = embedding_dim
        
        # Xavier/He initialization: std = sqrt(2 / fan_in)
        std = np.sqrt(2.0 / embedding_dim)
        
        self.w_q = np.random.normal(0.0, std, (embedding_dim, embedding_dim))
        self.w_k = np.random.normal(0.0, std, (embedding_dim, embedding_dim))
        self.w_v = np.random.normal(0.0, std, (embedding_dim, embedding_dim))
        
        self.cached_input: Optional[np.ndarray] = None
        self.cached_q: Optional[np.ndarray] = None
        self.cached_k: Optional[np.ndarray] = None
        self.cached_v: Optional[np.ndarray] = None
        self.cached_attn_weights: Optional[np.ndarray] = None

        self.optimizer_w_q = Adam((embedding_dim, embedding_dim))
        self.optimizer_w_k = Adam((embedding_dim, embedding_dim))
        self.optimizer_w_v = Adam((embedding_dim, embedding_dim))
    
    def compute_qkv(self, input_data: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        q = input_data @ self.w_q
        k = input_data @ self.w_k
        v = input_data @ self.w_v
        return q, k, v

    def attention(self, q: np.ndarray, k: np.ndarray, v: np.ndarray) -> np.ndarray:
        dk = np.sqrt(self.embedding_dim)
        scores = (q @ k.T) / dk
        
        # Apply causal masking only when sequence length > 1 to prevent NaN issues.
        if scores.shape[0] > 1:
            seq_len = scores.shape[0]
            # 使用一个大的负数而不是-infinity来避免NaN问题
            mask = np.triu(np.ones((seq_len, seq_len)), k=1) * -1e9
            scores = scores + mask
        
        weights = self.softmax(scores)
        self.cached_attn_weights = weights.copy()
        return weights @ v

    def softmax(self, scores: np.ndarray) -> np.ndarray:
        exp_scores = np.exp(scores - np.max(scores, axis=-1, keepdims=True))
        return exp_scores / np.sum(exp_scores, axis=-1, keepdims=True)

    @staticmethod
    def softmax_backward(softmax_output: np.ndarray, grad_output: np.ndarray) -> np.ndarray:
        dot_product = np.sum(softmax_output * grad_output, axis=-1, keepdims=True)
        grad_input = softmax_output * (grad_output - dot_product)
        return grad_input

    def layer_type(self) -> str:
        return "SelfAttention"

    def forward(self, input_data: np.ndarray) -> np.ndarray:
        self.cached_input = input_data.copy()
        q, k, v = self.compute_qkv(input_data)
        self.cached_q, self.cached_k, self.cached_v = q.copy(), k.copy(), v.copy()
        attention_output = self.attention(q, k, v)
        return attention_output + input_data  # residual connection

    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        grad_attention_output = grads
        grad_input_residual = grads

        grad_weights = grad_attention_output @ self.cached_v.T
        grad_v = self.cached_attn_weights.T @ grad_attention_output

        grad_scores = self.softmax_backward(self.cached_attn_weights, grad_weights)

        scale = np.sqrt(self.embedding_dim)
        grad_q = (grad_scores @ self.cached_k) / scale
        grad_k = (grad_scores.T @ self.cached_q) / scale

        grad_w_q = self.cached_input.T @ grad_q
        grad_w_k = self.cached_input.T @ grad_k
        grad_w_v = self.cached_input.T @ grad_v

        grad_input_attention = (grad_q @ self.w_q.T) + (grad_k @ self.w_k.T) + (grad_v @ self.w_v.T)

        total_grad_input = grad_input_attention + grad_input_residual

        self.optimizer_w_q.step(self.w_q, grad_w_q, lr)
        self.optimizer_w_k.step(self.w_k, grad_w_k, lr)
        self.optimizer_w_v.step(self.w_v, grad_w_v, lr)

        return total_grad_input
