import numpy as np
from typing import Optional, List
from vocab import Vocab
from adam import Adam

# Constants
MAX_SEQ_LEN = 80

class Embeddings:
    def __init__(self, vocab: Vocab, embedding_dim: int = 128):
        self.embedding_dim = embedding_dim
        self.token_embeddings = self._init_embeddings(len(vocab.words), embedding_dim)
        self.positional_embeddings = self._init_positional_embeddings(MAX_SEQ_LEN, embedding_dim)
        self.cached_input: Optional[np.ndarray] = None
        self.token_optimizer = Adam((len(vocab.words), embedding_dim))
        self.positional_optimizer = Adam((MAX_SEQ_LEN, embedding_dim))
    
    @staticmethod
    def _init_embeddings(vocab_size: int, embedding_dim: int) -> np.ndarray:
        return np.random.normal(0.0, 0.02, (vocab_size, embedding_dim))
    
    @staticmethod
    def _init_positional_embeddings(max_seq_len: int, embedding_dim: int) -> np.ndarray:
        return np.random.normal(0.0, 0.02, (max_seq_len, embedding_dim))
    
    @staticmethod
    def _get_token_embeddings(embeddings: np.ndarray, token_ids: List[int]) -> np.ndarray:
        token_embeds = np.zeros((len(token_ids), embeddings.shape[1]))
        for i, token_id in enumerate(token_ids):
            if token_id >= embeddings.shape[0]:
                raise ValueError(f"Token ID {token_id} out of bounds for vocab size {embeddings.shape[0]}")
            token_embeds[i, :] = embeddings[token_id, :]
        return token_embeds
    
    @staticmethod
    def _get_positional_embeddings(positional_encodings: np.ndarray, seq_len: int) -> np.ndarray:
        if seq_len > positional_encodings.shape[0]:
            raise ValueError(f"Sequence length {seq_len} exceeds maximum {positional_encodings.shape[0]}")
        return positional_encodings[:seq_len, :]
    
    def embed_tokens(self, token_ids: List[int]) -> np.ndarray:
        token_embeds = self._get_token_embeddings(self.token_embeddings, token_ids)
        position_embeds = self._get_positional_embeddings(self.positional_embeddings, len(token_ids))
        return token_embeds + position_embeds  # Element-wise sum
    
    def layer_type(self) -> str:
        return "Embeddings"
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:  # input shape is [1, sequence_length]
        self.cached_input = input_data.copy()
        token_ids = [int(x) for x in input_data.flatten()]
        return self.embed_tokens(token_ids)  # shape is [sequence_length, embedding_dim]
    
    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        input_data = self.cached_input
        token_ids = [int(x) for x in input_data.flatten()]
        
        # Initialize gradients for embeddings
        token_grads = np.zeros_like(self.token_embeddings)
        positional_grads = np.zeros_like(self.positional_embeddings)
        
        for i, token_id in enumerate(token_ids):
            if token_id >= self.token_embeddings.shape[0]:
                raise ValueError(f"Token ID {token_id} out of bounds for vocab size {self.token_embeddings.shape[0]}")
            grad_row = grads[i, :]
            
            # Accumulate token embedding gradients
            token_grads[token_id, :] += grad_row
            
            # Accumulate positional embedding gradients
            positional_grads[i, :] += grad_row
        
        self.token_optimizer.step(self.token_embeddings, token_grads, lr)
        self.positional_optimizer.step(self.positional_embeddings, positional_grads, lr)
        
        # Return gradient to propagate further back
        return grads.copy()