import math
from dataclasses import dataclass
from typing import Any, Optional, Tuple

import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn

@dataclass
class ModelArgs:
    # llama 7B model hyperparameters
    dim : int = 4096
    n_layers : int = 32
    n_heads : int = 32
    n_kv_heads : Optional[int] = None
    vocab_size : int = 32000
    hidden_dim : Optional[int] = None
    # MLP hidden layer size will be multiple of 
    multiple_of : int = 256
    norm_eps : float = 1e-5
    max_seq_len : int = 2048
    dropout : float = 0.0
    
def precompute_freqs_cis(dim : int, end : int, theta : float = 10000.0):
    freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
    t = torch.arange(end, device=freqs.device)
    freqs = torch.outer(t, freqs).float()
    freqs_cos = torch.cos(freqs)
    freqs_sin = torch.sin(freqs)
    return freqs_cos, freqs_sin
    # return shape : max_seq_len, dim // 2
    
def repeat_kv(
    x : torch.Tensor,
    n_rep : int
) -> torch.Tensor:
    batch_size, seq_len, n_kv_heads, head_dim = x.shape
    if n_rep == 1:
        return x
    return (
        x[:, :, :, None, :]
        .expand(batch_size, seq_len, n_kv_heads, n_rep, head_dim)
        .reshape(batch_size, seq_len, n_kv_heads * n_rep, head_dim)
    )
    
def reshape_for_broadcasting(
    freqs_cis : torch.Tensor, 
    x : torch.Tensor) -> torch.Tensor:
    # freqs_cis : original freqs_sin or freqs_cos tensor
    # x : xq_r or xq_i tensor shape is (batch_size, seq_len, n_heads, head_dim // 2, 1)
    ndim = x.ndim
    assert 0 <= 1 < ndim
    # freqs_cis shape : seq_len, 1
    # x shape : 1, 8, 32, 64
    # freqs_cis shape : 8, 64
    assert freqs_cis.shape == (x.shape[1], x.shape[-1])
    shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
    # shape freqs_cis from (seq_len, 1) to (1, seq_len, 1, 1, 1)
    return freqs_cis.view(shape)
    
def apply_rotary_pos_emb(
    xq : torch.Tensor,
    xk : torch.Tensor,
    freqs_cos : torch.Tensor,
    freqs_sin : torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
    # input xq shape : batch_size, seq_len, n_heads, head_dim
    # xq shape : 1, 8, 32, 128
    # xk shape : 1, 8, 8, 128
    
    # reshape xq, xk to match the complex representation
    xq_r, xq_i = xq.float().reshape(xq.shape[:-1] + (-1, 2)).unbind(-1)
    # xq_r, xq_i shape : 1, 8, 32, 64
    xk_r, xk_i = xk.float().reshape(xk.shape[:-1] + (-1, 2)).unbind(-1)
    # xk_r, xk_i shape : 1, 8, 8, 64
    
    # xq_r shape : batch_size, seq_len, n_heads, head_dim // 2, 1
    # xq_i shape : batch_size, seq_len, n_heads, head_dim // 2, 1
    # reshape freqs_cos, freqs_sin for broadcasting
    freqs_cos = reshape_for_broadcasting(freqs_cos, xq_r)
    # input freqs_sin(cos) shape : (seq_len, 1)
    # reshaped freqs_sin(cos) shape : (1, seq_len, 1, 1, 1)
    freqs_sin = reshape_for_broadcasting(freqs_sin, xq_i)
    
    # apply rotary positional embedding
    xq_out_r = xq_r * freqs_cos - xq_i * freqs_sin
    xq_out_i = xq_r * freqs_sin + xq_i * freqs_cos
    # shape : batch_size, seq_len, n_heads, head_dim // 2, 1
    xk_out_r = xk_r * freqs_cos - xk_i * freqs_sin
    xk_out_i = xk_r * freqs_sin + xk_i * freqs_cos
    
    # flatten last two dimension
    # first stack the last two dimension of xq_out_r, xq_out_i
    # stacked shape : batch_size, seq_len, n_heads, head_dim // 2, 1, 2
    # keep first three dimension and flatten to the last
    # flattened shape : batch_size, seq_len, n_heads, head_dim
    xq_out = torch.stack([xq_out_r, xq_out_i], dim = -1).flatten(3)
    xk_out = torch.stack([xk_out_r, xk_out_i], dim = -1).flatten(3)
    
    return xq_out.type_as(xq), xk_out.type_as(xk)
    # return shape : batch_size, seq_len, n_heads, head_dim
    
class Attention(nn.Module):
    def __init__(self, args : ModelArgs):
        super().__init__()
        self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
        assert args.n_heads % self.n_kv_heads == 0
        model_parallel_size = 1
        self.n_local_heads = args.n_heads // model_parallel_size
        self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
        self.n_rep = self.n_local_heads // self.n_local_kv_heads
        self.head_dim = args.dim // args.n_heads
        self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)
        self.wk = nn.Linear(args.dim, args.n_kv_heads * self.head_dim, bias=False)
        self.wv = nn.Linear(args.dim, args.n_kv_heads * self.head_dim, bias=False)
        self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)
        self.atten_dropout = nn.Dropout(p = args.dropout)
        self.residual_dropout = nn.Dropout(p = args.dropout)
        self.dropout = args.dropout
        
        # use flash attention or manual implementation
        self.flash = hasattr(torch.nn.functional, "scaled_dot_product_attention")
        if not self.flash:
            print("Using slow attention implementation. Flash Attention requires Pytorch >= 2.0")
            mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float('-inf'))
            mask = torch.triu(mask, diagonal=1)
            self.register_buffer('mask', mask)
            
    def forward(
        self,
        x : torch.Tensor,
        freqs_cos : torch.Tensor,
        freqs_sin : torch.Tensor
    ):
        # input shape : batch_size, seq_len, args.dim
        # x shape : 1, 8, 4096
        batch_size, seq_len, _ = x.shape
        
        #QKV
        xq : torch.Tensor = self.wq(x)
        # xq shape : 1, 8, 4096
        xk : torch.Tensor = self.wk(x)
        # xk shape : 1, 8, 1024
        xv : torch.Tensor = self.wv(x)
        # xv.shape : 1, 8, 1024
        
        # split QKV into heads
        xq = xq.view(batch_size, seq_len, self.n_local_heads, self.head_dim)
        # xq shape : 1, 8, 32, 128
        xk = xk.view(batch_size, seq_len, self.n_kv_heads, self.head_dim)
        # xk shape : 1, 8, 8, 128
        xv = xv.view(batch_size, seq_len, self.n_kv_heads, self.head_dim)
        # xv.shape : 1, 8, 8, 128
        
        # xq, xk shape : batch_size, seq_len, n_heads, head_dim
        # RoPE relative positional embedding
        xq, xk = apply_rotary_pos_emb(xq, xk, freqs_cos, freqs_sin)
        
        # grouped multiquery attention: expand out keys and values
        xk = repeat_kv(xk, self.n_rep) # (batch_size, seq_len, n_local_heads * n_rep, head_dim)
        xv = repeat_kv(xv, self.n_rep)
        
        # transpose
        xq = xq.transpose(1, 2) # (batch_size, n_local_heads, seq_len, head_dim)
        xk = xk.transpose(1, 2) # (batch_size, n_local_kv_heads * n_rep, seq_len, head_dim)
        xv = xv.transpose(1, 2) # (batch_size, n_local_kv_heads * n_rep, seq_len, head_dim)
        
        # flash attention
        if self.flash:
            output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None, dropout_p=self.dropout if self.training else 0.0, is_causal=True)
        else:
            # manual implementation attention
            scores = torch.matmul(xq, xk.transpose(2, 3)) / math.sqrt(self.head_dim)
            # scores.shape : batch_size, n_local_heads, seq_len, seq_len
            assert hasattr(self, "mask")
            # mask : 1, 1, seq_len, seq_len and diagonal is 0 and upper triangle is -inf
            scores = scores + self.mask[:, :, :seq_len, :seq_len]
            scores = F.softmax(scores.float(), dim = -1).type_as(xq)
            scores = self.atten_dropout(scores)
            output = torch.matmul(scores, xv)
            # output shape is batch_size, n_local_heads, seq_len, head_dim
            
        # restore time as batch dimension and concat heads
        output = output.transpose(1, 2).contiguous().view(batch_size, seq_len, -1)
        
        # final projection into the residual stream
        output = self.wo(output)
        # output shape : batch_size, seq_len, args.dim
        output = self.residual_dropout(output)
        return output
       
class FeedForward(nn.Module):
    def __init__(self, dim : int, hidden_dim : int, multiple_of : int, dropout : float):
        super().__init__()
        if hidden_dim is None:
            hidden_dim = 4 * dim
            hidden_dim = int(2 * hidden_dim / 3)
            hidden_dim = hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
        self.w1 = nn.Linear(dim, hidden_dim, bias = False)
        self.w2 = nn.Linear(hidden_dim, dim, bias = False)
        self.w3 = nn.Linear(dim, hidden_dim, bias = False)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, x : torch.Tensor):
        return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))

class RMSNorm(nn.Module):
    def __init__(self, dim : int, eps : float):
        super().__init__()
        self.eps = eps
        self.weight = nn.Parameter(torch.ones(dim))
        
    def _norm(self, x : torch.Tensor):
        return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)
    
    def forward(self, x : torch.Tensor):
        # x shape : batch_size, seq_len, dim : 1, 8, 4096
        output = self._norm(x.float().type_as(x))
        return output.type_as(x) * self.weight.type_as(x)
    
class TransformerBlock(nn.Module):
    def __init__(self, layer_idx : int, args : ModelArgs):
        super().__init__()
        self.n_heads = args.n_heads
        self.dim = args.dim
        self.head_dim = args.dim // args.n_heads
        self.attention = Attention(args)
        self.feed_forward = FeedForward(
            dim = args.dim,
            hidden_dim = args.hidden_dim,
            multiple_of = args.multiple_of,
            dropout = args.dropout
        )
        self.layer_id = layer_idx
        self.attention_norm = RMSNorm(args.dim, eps = args.norm_eps)
        self.ffn_norm = RMSNorm(args.dim, eps = args.norm_eps)
        
    def forward(
        self,
        x : torch.Tensor,
        freqs_cos : torch.Tensor,
        freqs_sin : torch.Tensor
    ):
        # input before norm
        h = x + self.attention.forward(self.attention_norm.forward(x), freqs_cos, freqs_sin)
        out = h + self.feed_forward.forward(self.ffn_norm.forward(h))
        return out
         
class Transformer(nn.Module):
    last_loss : Optional[torch.Tensor]
    
    def __init__(self, params : ModelArgs):
        super().__init__()
        self.params = params
        self.vocab_size = params.vocab_size
        self.n_layers = params.n_layers
        
        self.a = nn.Linear
        self.token_embedding = nn.Embedding(params.vocab_size, params.dim)
        self.dropout = nn.Dropout(p = params.dropout)
        self.layers = nn.ModuleList()
        for layer_idx in range(params.n_layers):
            self.layers.append(TransformerBlock(layer_idx, params))
        self.norm = RMSNorm(params.dim, eps=params.norm_eps)
        self.output  = nn.Linear(params.dim, params.vocab_size, bias=False)
        
        # share the unembedding parameters with the embedding layer
        self.token_embedding.weight = self.output.weight
        
        # some useful precompute for the RoPE relative positional embeddings
        freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)
        # freq_cos shape : self.params.dim // self.params.n_heads // 2, self.params.max_seq_len
        # freq_sin shape : self.params.dim // self.params.n_heads // 2, self.params.max_seq_len
        self.register_buffer('freqs_cos', freqs_cos)
        self.register_buffer('freqs_sin', freqs_sin)
        
        # init all weights
        self.apply(self._init_weights)
        
        # apply special scaled init to the residual projection
        for pn, p in self.named_parameters():
            if pn.endswith("w3.weight") or pn.endswith("wo.weight"):
                torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * params.n_layers))
        
        # Initialize attribute for the loss of the last forward call.
        self.last_loss = None
        
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.normal_(module.weight, mean = 0.0, std=0.02)
            if module.bias is not None:
                torch.nn.init.zeros_(module.bias)
            elif isinstance(module, nn.Embedding):
                torch.nn.init.normal_(module.weight, mean = 0.0, std=0.02)
                
        
    def forward(self, tokens : torch.Tensor, targets : Optional[torch.Tensor] = None) -> torch.Tensor:
        batch_size, seq_len = tokens.shape
        # input shape : (1, 8)
        h = self.token_embedding(tokens)
        # h shape : [1, 8, 4094] batch_size, seq_len, dim
        h = self.dropout(h)
            
        # freqs_cos, freqs_sin shape : [2048, 64]
        freqs_cos = self.freqs_cos[:seq_len]
        # freqs_cos shape : [8 , 64]
        freqs_sin = self.freqs_sin[:seq_len]
            
        for layer in self.layers:
            h = layer.forward(h, freqs_cos, freqs_sin)
        h = self.norm.forward(h)
            
        if targets is not None:
            # if we are given some desired targets also calculate the loss
            logits = self.output(h)
            # logits shape : batch_size, seq_len, vocab_size
            # logits.view(-1, logits.size(-1)) -> shape : batch_size * seq_len, vocab_size
            self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
        else:
            # using list [-1] to preserve the time dim
            logits = self.output(h[:, [-1], :])
            
        return logits
        
    @torch.inference_mode()
    def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
        for _ in range(max_new_tokens):
            # if the sequence context is growing too long, we must crop it at block size
            idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
            # forward the model to get the logits for the index in the sequence
            logits = self.forward(idx_cond)
            # crop to get the final time step
            logits = logits[:, -1, :]
            if temperature == 0.0:
                _, idx_next = torch.topk(logits, k = 1, dim = -1)
            else:
                # pluck the logits at the final step and scale by desired temperature
                logits = logits / temperature
                if top_k is not None:
                    value, _ = torch.topk(logits, min(top_k, logits.size(-1)))
                    logits[logits < value[: [-1]]] = -float('Inf')
                probs = F.softmax(logits, dim = -1)
                idx_next = torch.multinomial(probs, num_samples=1)
            # append the new token to the sequence
            idx = torch.cat([idx, idx_next], dim = 1)
        
        return idx