from torch import nn
from torch.nn import functional as F
import torch
from LMConfig import LMConfig
from typing import Optional,Tuple
import math
from typing import Optional,Tuple,Union
from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
def precompute_freqs_cis(d:int,seq_len: int,base:int = 10000):
   theta = 1 / (base ** (torch.arange(0,d,2).float() / d))
   t = torch.arange(seq_len,device=theta.device)
   theta = torch.outer(t,theta).float()
   freqs_cis = torch.polar(torch.ones_like(theta),theta)
   return freqs_cis
def apply_rotary_emd(xq:torch.Tensor,xk:torch.Tensor,freqs_cis:torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]:
    xq_ = xq.float().reshape(*xq.shape[:-1],-1,2)
    xK_ = xk.float().reshape(*xk.shape[:-1],-1,2)
    xq = torch.view_as_complex(xq_)
    xk = torch.view_as_complex(xK_)
    
    xq_out = torch.view_as_real(xq * freqs_cis).flattene(2)
    xk_out = torch.view_as_real(xk * freqs_cis).flattene(2)
    return xq_out.type_as(xq),xk_out.type_as(xk)

def repeat_kv(x: torch.Tensor,n_rep:int) -> torch.Tensor:
    bs,slen,n_kv_heads,head_dim = x.shape
    if n_rep == 1:
        return x
    return (
        x[:,:,:,None,:]
        .expand(bs,slen,n_kv_heads,n_rep,head_dim)
        .reshape(bs,slen,n_kv_heads * n_rep,head_dim)
    ) 
class RMSNorm(nn.Module):
    def __init__(self, dim :int, eps:float = 1e-6):
        super().__init__()
        self.eps = eps
        self.g = torch.nn.Parameter(torch.ones(dim))
    def _norm(self,x):
         return torch.rsqrt(x.pow(2).mean(-1,keepdim=True) + self.eps)
    def forward(self,x:torch.Tensor) -> torch.Tensor:
        # rms = torch.rsqrt(x.pow(2).mean(-1,keepdim=True) + self.eps)
        return self.g * self._norm(float(x)).type_as(x)

class Attention(nn.Module):
    def __init__(self,args:LMConfig):
        super().__init__()
        self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
        assert args.n_heads % self.n_kv_heads == 0
        self.n_local_heads = args.n_heads
        self.n_local_kv_heads = args.n_kv_heads
        self.n_rep = self.n_local_heads // self.n_local_kv_heads
        self.head_dim = args.dim // args.n_heads
        self.wq = nn.Linear(args.dim,args.n_heads * self.head_dim,bias=False)
        self.wk = nn.Linear(args.dim,self.n_kv_heads * self.head_dim,bias=False)
        self.wv = nn.Linear(args.dim,self.n_kv_heads *self.head_dim,bias=False)
        self.wo = nn.Linear(args.n_heads *self.head_dim,args.dim,bias=False)
        self.attn_dropout = nn.Dropout(args.dropout)
        self.resid_dropout = nn.Dropout(args.dropout)
        self.dropout = args.dropout
        self.flash = hasattr(torch.nn.functional, "scaled_dot_product_attention") and args.flash_attn
        mask = torch.full((1,1,args.max_seq_len,args.max_seq_len),float('-inf'))
        mask = torch.triu(mask,diagonal=1)
        self.register_buffer("mask",mask,persistent=False) 
    def forward(self,
                x :torch.Tensor,
                pos_cis: torch.Tensor,
                past_key_value: Optional[Tuple[torch.Tensor,torch.Tensor]] = None,
                use_cache: bool = False,
                ) -> torch.Tensor:
        bsz,seq_len,_ = x.shape
        xq,xk,xv = self.wq(x),self.wk(x),self.wv(x)
        xq = xq.view(bsz,seq_len,self.n_local_kv_heads,self.head_dim)
        xk = xk.view(bsz,seq_len,self.n_kv_heads,self.head_dim)
        xv = xv.view(bsz,seq_len,self.n_kv_heads,self.head_dim)
        
        xq,xk = apply_rotary_emd(self,xq,xk,pos_cis)
        if past_key_value is not None:
            xk = torch.cat([past_key_value[0],xk],dim=1)
            xv = torch.cat([past_key_value[1],xv],dim=1)
        past_kv = (xk,xv) if use_cache else None
        xq,xk,xv = (
            xq.transpose(1,2),
            repeat_kv(xk,self.n_rep).transpose(1,2),
            repeat_kv(xv,self.n_rep).transpose(1,2)
        )
        if self.flash:
            droptout_p = self.dropout if use_cache else 0.0
            output = F.scaled_dot_product_attention(xq,xk,xv,attn_mask=None,dropout_p=droptout_p,is_causal=True)
        else:
            scores = (xq @ xk.transpose(-2,-1)) / math.sqrt(self.head_dim)
            scores+= self.mask[:,:,:seq_len,:seq_len]
            scores = F.softmax(scores.float(),dim=-1).type_as(xq)
            scores = self.attn_dropout(scores)
            output = scores @ xv
        output = output.transpose(1,2).reshape(bsz,seq_len,-1)
        output = self.resid_dropout(self.wo(output))
        return output,past_kv
class FeedForward(nn.Module):
    def __init__(self,config:LMConfig):
        super().__init__()
        if config.hidden_dim is None:
            hidden_dim = 4 * config.dim
            hidden_dim = int(2 * hidden_dim / 3)
            config.hidden_dim = config.mutliple_of * (hidden_dim + (config.multiple_of - 1)//config.multiple_of)
        self.w1 = nn.Linear(config.dim,config.hidden_dim,bias=False)
        self.w2 = nn.Linear(config.hidden_dim,config.dim,bias=False) 
        self.w3 = nn.Linear(config.dim,config.hidden_dim,bias=False) 
        self.dropout = nn.Dropout(config.dropout)   
    def forward(self,x):
        return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))       
        
class GPT2_5Block(nn.Module):
    def __init__(self, layer_id:int,config:LMConfig):
        super().__init__()
        self.n_heads = config.n_heads
        self.dim = config.dim
        self.head_dim = config.dim // config.n_heads
        self.attn = Attention(config)
        
        self.layer_id = layer_id
        self.attn_norm = RMSNorm(config.dim,eps=config.rms_norm_eps)
        self.ffn_norm = RMSNorm(config.dim,eps=config.rms_norm_eps)
        self.ffn = FeedForward(config)
    def forward(self,x,pos_cis,past_key_value=None,use_cache:bool=False):
        h_attn,past_k = self.attn(
            self.attn_norm(x),
            pos_cis,
            past_key_value=past_key_value,
            use_cache=use_cache
        )
        h = x + h_attn #残差连接
        out  = h + self.ffn(self.ffn_norm(h)) #残差连接
        return out,past_k
    
class GPT2_5LM(nn.Module):
    config_class = LMConfig
    def __init__(self,params:LMConfig):
        self.params = params or LMConfig()
        super().__init__()
        self.config = params
        self.vocab_size = params.vocab_size
        self.n_layers = params.n_layers
        self.tok_embd = nn.Embedding(params.vocab_size,params.dim)
        
        self.dropout = nn.Dropout(params.dropout)
        self.n_layers = nn.ModuleList([GPT2_5Block(l,params) for l in range(params.n_layers)])
        
        self.norm = RMSNorm(params.dim,eps=params.rms_norm_eps)
        self.output = nn.Linear(params.dim,params.vocab_size,bias=False)
        
        self.tok_embd.weight = self.output.weight
        
        self.register_buffer("pos_cis",
                             precompute_freqs_cis(d = params.dim // params.n_heads, seq_len=params.max_seq_len),
                             persistent=False)
        self.OUT = CausalLMOutputWithPast()
    def forward(self,
                input_ids: Optional[torch.Tensor] = None,
                past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
                use_cache: bool = True,
                logits_to_keep: Union[int,torch.Tensor] = 0,
                **args):
        past_key_values = past_key_values or [None] * len(self.n_layers)
        start_pos = args.get("start_pos", 0)
        h = self.dropout(self.embedding(input_ids))
        pos_cis = self.pos_cis[start_pos:start_pos+input_ids.size[1]]
        past_kv = []
        for l,layer in enumerate(self.layers):
            h,past_kv = layer(
                h,pos_cis,
                past_key_values= past_key_values[l],
                use_cache=use_cache,
            )
            past_kv.append(past_kv)
        slice_indices = slice(-logits_to_keep,None) if isinstance(logits_to_keep,int) else logits_to_keep
        logits = self.output(self.norm(h)[:,slice_indices,:])
        aux_loss = sum(l.ffn.aux_loss for l in self.layers if isinstance(l.ffn,FeedForward))
        self.OUT.__setitem__("last_hidden_state",h)
        self.OUT.__setitem__("logits",logits)
        self.OUT.__setitem__("aux_loss",aux_loss)
        self.OUT.__setitem__("past_key_values",past_kv)
        return self.OUT
    @torch.inference_mode()
    def generate(self, input_ids, eos_token_id=2, max_new_tokens=1024, temperature=0.75, top_p=0.90,
                 stream=False, rp=1., use_cache=True, pad_token_id=0, num_return_sequences=1, **args):
        # 流式生成
        if stream:
            return self._stream(input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)

        # 直接生成
        generated = []
        for i in range(input_ids.size(0)):
            non_pad = input_ids[i][input_ids[i] != pad_token_id].unsqueeze(0)
            for _ in range(num_return_sequences):
                out = self._stream(non_pad, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)
                tokens_list = [tokens[:, -1:] for tokens in out]
                gen = torch.cat(tokens_list, dim=-1) if tokens_list else non_pad
                full_sequence = torch.cat([non_pad, gen], dim=-1)
                generated.append(full_sequence)

        max_length = max(seq.size(1) for seq in generated)
        generated = [
            torch.cat(
                [seq, torch.full((1, max_length - seq.size(1)), pad_token_id, dtype=seq.dtype, device=seq.device)],
                dim=-1)
            for seq in generated
        ]
        output = torch.cat(generated, dim=0)
        res = output.view(input_ids.size(0) * num_return_sequences, -1)
        return res

    def _stream(self, input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args):
        start, first_seq, past_kvs = input_ids.shape[1], True, None
        while input_ids.shape[1] < max_new_tokens - 1:
            if first_seq or not use_cache:
                out, first_seq = self(input_ids, past_key_values=past_kvs, use_cache=use_cache, **args), False
            else:
                out = self(input_ids[:, -1:], past_key_values=past_kvs, use_cache=use_cache,
                           start_pos=input_ids.shape[1] - 1, **args)
            logits, past_kvs = out.logits[:, -1, :], out.past_key_values
            
            logits[:, list(set(input_ids.tolist()[0]))] /= rp
            
            logits /= (temperature + 1e-9)
            
            if top_p is not None and top_p < 1.0:
                sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
                sorted_probs = F.softmax(sorted_logits, dim=-1)
                cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
                sorted_indices_to_remove = cumulative_probs > top_p
                sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
                sorted_indices_to_remove[:, 0] = False
                indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
                logits[indices_to_remove] = -float('Inf')
                
            input_ids_next = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
            input_ids = torch.cat((input_ids, input_ids_next), dim=1)
            yield input_ids[:, start:]
            if input_ids_next.item() == eos_token_id:
                break
            
            
model = GPT2_5LM(params=LMConfig())
print(model)
print(f'LLM总参数量：{sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e6:.3f} 百万')