import numpy as np
from typing import List, Protocol, runtime_checkable
from vocab import Vocab

# Constants
MAX_SEQ_LEN = 80
EMBEDDING_DIM = 128
HIDDEN_DIM = 256

@runtime_checkable
class Layer(Protocol):
    def layer_type(self) -> str:
        ...
    
    def forward(self, input_data: np.ndarray) -> np.ndarray:
        ...
    
    def backward(self, grads: np.ndarray, lr: float) -> np.ndarray:
        ...

class LLM:
    def __init__(self, vocab: Vocab, network: List[Layer], tokenizer_mode: str = 'word'):
        self.vocab = vocab
        self.network = network
        self.tokenizer_mode = tokenizer_mode
    
    def network_description(self) -> str:
        return ", ".join([layer.layer_type() for layer in self.network])
    
    def predict(self, text: str) -> str:
        output_tokens = self.forward(text)
        
        if not output_tokens:
            return ""
        
        token_strs = [self.vocab.decode[token_id] for token_id in output_tokens if token_id in self.vocab.decode]
        
        if self.tokenizer_mode == 'char':
            # For character-based models, intelligently join tokens with spaces
            result = ""
            for i, token in enumerate(token_strs):
                # Add space before token if:
                # 1. It's not the first token AND
                # 2. The previous token is not a special token like ":" or "?" AND
                # 3. The current token is not a punctuation mark or special token
                if i > 0 and token_strs[i-1] not in [":", "?", ".", "!", ";", ","] and \
                   token not in [":", "?", ".", "!", ";", ",", "</s>"]:
                    result += " "
                result += token
            return result
        else:
            # For word-based models, join with spaces
            return " ".join(token_strs)

    def forward(self, text: str) -> List[int]:
        tokenized = self.tokenize(text)
        output_tokens: List[int] = []
        
        if not tokenized:
            return output_tokens
        
        input_len = len(tokenized)
        
        if input_len >= MAX_SEQ_LEN:
            return output_tokens
        
        for _ in range(MAX_SEQ_LEN - input_len):
            if len(tokenized) >= MAX_SEQ_LEN:
                break
            
            token_input = np.array(tokenized, dtype=np.float32).reshape(1, -1)
            input_data = token_input
            
            for layer in self.network:
                input_data = layer.forward(input_data)
            
            logits = input_data
            
            if logits.shape[0] == 0:
                break
            
            last_logit = logits[-1:, :]
            
            probs = self.softmax(last_logit)
            
            tokens = self.greedy_decode(probs)
            
            next_token = tokens[-1]
            
            output_tokens.append(next_token)
            tokenized.append(next_token)
            
            if next_token == self.vocab.encode_word("</s>"):
                break
        
        return output_tokens
    
    def train(self, data: List[str], epochs: int, lr: float):
        tokenized_data = [self.tokenize(input_text) for input_text in data]
        
        for epoch in range(epochs):
            total_loss = 0.0
            for training_row in tokenized_data:
                # Skip rows that are too short to train on, preventing the NaN bug.
                if len(training_row) < 3:
                    continue
                
                input_ids = training_row[:-1]
                target_ids = training_row[1:]
                
                input_data = np.zeros((1, len(input_ids)), dtype=np.float32)
                input_data[0, :] = input_ids
                
                for layer in self.network:
                    input_data = layer.forward(input_data)
                
                logits = input_data
                probs = self.softmax(logits)
                
                total_loss += self.cross_entropy_loss_step(probs, target_ids)
                
                grads_output = self.compute_gradients_step(probs, target_ids)
                
                self.clip_gradients(grads_output, 5.0)
                
                for layer in reversed(self.network):
                    grads_output = layer.backward(grads_output, lr)
            
            print(f"Epoch {epoch}: Loss = {total_loss / len(tokenized_data):.4f}")
    
    def tokenize(self, text: str) -> List[int]:
        tokens = []
        if self.tokenizer_mode == 'char':
            # For Chinese, we split by space to separate special tokens ("用户:", "</s>")
            # from the character-based text.
            for part in text.split(' '):
                if not part:
                    continue
                # Check if the part is a special token that should be treated as a whole
                if part in self.vocab.encode:
                    token_id = self.vocab.encode_word(part)
                    if token_id is not None:
                        tokens.append(token_id)
                else:
                    # Otherwise, tokenize character by character
                    for char in part:
                        token_id = self.vocab.encode_word(char)
                        if token_id is not None:
                            tokens.append(token_id)
        else:
            # Original 'word' mode for English
            for word in text.split():
                if word == "</s>":
                    token_id = self.vocab.encode_word(word)
                    if token_id is not None:
                        tokens.append(token_id)
                    continue
                
                current_word = ""
                for c in word:
                    if c in ".,!?;:":
                        if current_word:
                            token_id = self.vocab.encode_word(current_word)
                            if token_id is not None:
                                tokens.append(token_id)
                            current_word = ""
                        
                        token_id = self.vocab.encode_word(c)
                        if token_id is not None:
                            tokens.append(token_id)
                    else:
                        current_word += c
                
                if current_word:
                    token_id = self.vocab.encode_word(current_word)
                    if token_id is not None:
                        tokens.append(token_id)
        return tokens

    @staticmethod
    def softmax(logits: np.ndarray) -> np.ndarray:
        result = logits.copy()
        for i in range(result.shape[0]):
            row = result[i, :]
            max_val = np.max(row)
            exp_values = np.exp(row - max_val)
            sum_exp = np.sum(exp_values)
            result[i, :] = exp_values / sum_exp
        return result
    
    @staticmethod
    def greedy_decode(probs: np.ndarray) -> List[int]:
        return [np.argmax(row) for row in probs]
    
    @staticmethod
    def cross_entropy_loss_step(probs: np.ndarray, target: List[int]) -> float:
        loss = 0.0
        if probs.shape[0] != len(target):
             return 0.0
        for row_idx in range(probs.shape[0]):
            prob_target = probs[row_idx, target[row_idx]]
            loss -= np.log(max(prob_target, 1e-15))
        return loss / len(target)
    
    @staticmethod
    def compute_gradients_step(probs: np.ndarray, target: List[int]) -> np.ndarray:
        grads = probs.copy()
        if probs.shape[0] != len(target):
            raise ValueError("Probs and target must have the same number of rows")
        
        batch_size = len(target)
        for row_idx in range(grads.shape[0]):
            grads[row_idx, target[row_idx]] -= 1.0
        
        grads = grads / batch_size
        return grads
    
    @staticmethod
    def clip_gradients(grads: np.ndarray, max_norm: float):
        norm = np.sqrt(np.sum(grads ** 2))
        if norm > max_norm:
            scale = max_norm / norm
            grads *= scale
