import math
import struct
import inspect
import time

from torch.special import logit

from LMConfig import LMConfig
from typing import Any, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast

class RMSNorm(nn.Module):
    def __init__(self, dim: int, eps: float):
        super(RMSNorm, self).__init__()
        self.eps = eps
        self.weight = nn.Parameter(torch.ones(dim))

    def _norm(self, x):
        return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)

    def forward(self, x):
        output = self._norm(x.float()).type_as(x)
        return output * self.weight

def precompute_pos_cis(dim: int, end: int, theta: float = 10000.0):
    freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
    t = torch.arange(end, device=freqs.device)  # type: ignore
    freqs = torch.outer(t, freqs).float()  # type: ignore
    pos_cis = torch.polar(torch.ones_like(freqs), freqs)  # complex64
    return pos_cis


def apply_rotary_emb(xq, xk, pos_cis):
    def unite_shape(pos_cis, x):
        ndim = x.ndim
        assert 0 <= 1 < ndim
        assert pos_cis.shape == (x.shape[1], x.shape[-1])
        shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
        return pos_cis.view(*shape)

    xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
    xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
    pos_cis = unite_shape(pos_cis, xq_)
    xq_out = torch.view_as_real(xq_ * pos_cis).flatten(3)
    xk_out = torch.view_as_real(xk_ * pos_cis).flatten(3)
    return xq_out.type_as(xq), xk_out.type_as(xk)

def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
    bsz, seq_len, n_kv_heads, head_dim = x.shape
    if n_rep == 1:
        return x
    return (
        x[:, :, :, None, :].expand(bsz, seq_len, n_kv_heads, n_rep, head_dim).reshape(bsz, seq_len, n_kv_heads * n_rep, head_dim)
    )

class Attention(nn.Module):
    def __init__(self, config: LMConfig):
        super(Attention, self).__init__()

        self.n_kv_heads = config.n_kv_heads if config.n_kv_heads is not None else config.n_heads
        assert config.n_heads % self.n_kv_heads == 0

        self.n_local_heads = config.n_heads
        self.n_Local_Kv_heads = config.n_kv_heads
        self.n_rep = self.n_local_heads // self.n_Local_Kv_heads
        self.head_dim = config.dim // config.n_heads

        self.wq = nn.Linear(config.dim, config.n_heads * self.head_dim, bias=False)
        self.wk = nn.Linear(config.dim, config.n_kv_heads * self.head_dim, bias=False)
        self.wv = nn.Linear(config.dim, config.n_kv_heads * self.head_dim, bias=False)
        self.wo = nn.Linear(config.n_heads * self.head_dim, config.dim, bias=False)

        self.k_cache, self.v_cache = None, None

        self.attn_dropout = nn.Dropout(config.dropout)
        self.resid_dropout = nn.Dropout(config.dropout)
        self.dropout = config.dropout
        self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')

        mask = torch.full((1, 1, config.max_seq_len, config.max_seq_len), float('-inf'))
        mask = torch.triu(mask, diagonal=1)
        self.register_buffer('mask', mask, persistent=False)

    def forward(self, x: torch.Tensor, pos_cis: torch.Tensor, kv_cache=False):
        bsz, seq_len, _ = x.shape
        xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)

        xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim)
        xk = xk.view(bsz, seq_len, self.n_kv_heads, self.head_dim)
        xv = xv.view(bsz, seq_len, self.n_kv_heads, self.head_dim)

        xq, xk = apply_rotary_emb(xq, xk, pos_cis)

        if kv_cache and self.eval():
            if seq_len == 1 and all(cache is not None for cache in (self.k_cache, self.v_cache)):
                xk = torch.cat((self.k_cache, xk), dim=1)
                xv = torch.cat((self.v_cache, xv), dim=1)
            self.k_cache, self.v_cache = xk, xv

        xk = repeat_kv(xk, self.n_rep)
        xv = repeat_kv(xv, self.n_rep)

        xq, xk, xv = xq.transpose(1, 2), xk.transpose(1, 2), xv.transpose(1, 2)

        if self.flash and seq_len != 1:
            output = torch.nn.functional.scaled_dot_product_attention(
                xq, xk, xv, attn_mask=None, dropout_p=self.dropout if self.training else 0.0, is_causal=True
            )
        else:
            scores = xq @ xk.mT / math.sqrt(self.head_dim)
            scores = scores + self.mask[:, :, :seq_len, :seq_len]
            scores = F.softmax(scores, dim=-1).type_as(xq)
            scores = self.attn_dropout(scores)
            output = scores @ xv

        output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1)
        output = self.wo(output)
        output = self.resid_dropout(output)
        return output

class FeedForward(nn.Module):
    def __init__(self, dim: int, hidden_dim: int, multiple_of: int, dropout: float):
        super(FeedForward, self).__init__()

        if hidden_dim is None:
            hidden_dim = 4 * dim
            hidden_dim = int(2 * hidden_dim / 3)
            hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
            # print(f'hidden_dim = {hidden_dim} multiple_of = {multiple_of}')

        self.w1 = nn.Linear(dim, hidden_dim, bias=False)
        self.w2 = nn.Linear(hidden_dim, dim, bias=False)
        self.w3 = nn.Linear(dim, hidden_dim, bias=False)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x: torch.Tensor):
        return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))


class MOEGate(nn.Module):
    def __init__(self, config: LMConfig):
        super(MOEGate, self).__init__()
        self.config = config

        self.top_k = config.num_experts_per_tok
        self.n_routed_experts = config.n_routed_experts
        self.scoring_func = config.scoring_func
        self.alpha = config.aux_loss_alpha
        self.seq_aux = config.seq_aux
        self.norm_topk_prob = config.norm_topk_prob
        self.gating_dim = config.dim
        self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
        torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def forward(self, x: torch.Tensor):
        # print(f'x.shape = {x.shape}')
        bsz, seq_len, dim = x.shape
        x = x.view(-1, dim)
        logits = F.linear(x, self.weight, None)
        # print(f'logits.shape = {logits.shape}  weight.shape = {self.weight.shape}')

        if self.scoring_func == 'softmax':
            scores = F.softmax(logits, dim=-1)
        else:
            raise NotImplementedError(f'unsupported scoring function {self.scoring_func}')
        # print(f'scores.shape = {scores.shape}')

        topk_weight, topk_idx = torch.topk(scores, self.top_k, dim=-1)
        # print(f'topk_idx.shape = {topk_idx.shape} topk_weight.shape = {topk_weight.shape}')

        if self.top_k > 1 and self.norm_topk_prob:
            denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
            topk_weight = topk_weight / denominator

        if self.training and self.alpha > 0.0:
            scores_for_aux = scores
            aux_topk = self.top_k
            topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
            if self.seq_aux:
                scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
                ce = torch.zeros(bsz, self.n_routed_experts, device=x.device)
                ce.scatter_add_(1, topk_idx_for_aux_loss, torch.ones(bsz, seq_len * aux_topk, device=x.device)).div_(seq_len * aux_topk / self.n_routed_experts)
                aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
            else:
                mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
                ce = mask_ce.float().mean(0)
                Pi = scores_for_aux.mean(0)
                fi = ce * self.n_routed_experts
                aux_loss = (Pi * fi).sum() * self.alpha
        else:
            aux_loss = None

        return topk_idx, topk_weight, aux_loss

class MOEFeedForward(nn.Module):
    def __init__(self, config: LMConfig):
        super(MOEFeedForward, self).__init__()
        self.config = config
        self.experts = nn.ModuleList(
            FeedForward(dim=config.dim, hidden_dim=config.hidden_dim, dropout=config.dropout, multiple_of=config.multiple_of) for _ in range(config.n_routed_experts)
        )

        self.gate = MOEGate(config)
        if config.n_shared_experts is not None:
            self.shared_experts = FeedForward(dim=config.dim, hidden_dim=config.hidden_dim, dropout=config.dropout, multiple_of=config.multiple_of)

    def forward(self, x: torch.Tensor):
        identity = x
        orig_shape = x.shape
        bsz, seq_len, dim = x.shape

        topk_idx, topk_weight, aux_loss = self.gate(x)
        x = x.view(-1, dim)
        flat_topk_idx = topk_idx.view(-1)

        if self.training:
            x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
            y = torch.empty_like(x, dtype=torch.float16)
            for i, expert in enumerate(self.experts):
                z = expert(x[flat_topk_idx == i])
                y[flat_topk_idx == i] = z
            y = y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)
            y = y.sum(dim=1)
            y = y.view(*orig_shape)
        else:
            y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)

        if self.config.n_shared_experts is not None:
            y = y + self.shared_experts(identity)

        return y

    @torch.no_grad()
    def moe_infer(self, x, flat_expert_indices: torch.Tensor, flat_expert_weight):
        expert_cache = torch.zeros_like(x)
        idxs = flat_expert_indices.argsort()
        cnt = flat_expert_indices.bincount()
        tokens_per_expert = cnt.cpu().numpy().cumsum(0)
        token_idxs = idxs // self.config.num_experts_per_tok

        for i, end_idx in enumerate(tokens_per_expert):
            start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
            if start_idx == end_idx:
                continue

            expert = self.experts[i]
            exp_token_idx = token_idxs[start_idx:end_idx]
            expert_tokens = x[exp_token_idx]
            expert_out = expert(expert_tokens)
            expert_out.mul_(flat_expert_weight[idxs[start_idx:end_idx]])
            aa = exp_token_idx.view(-1, 1)
            bb = aa.repeat(1, x.shape[-1])
            expert_cache.scatter_add_(0, bb, expert_out)

        return expert_cache

class TransformerBlock(nn.Module):
    def __init__(self, layer_id: int, config: LMConfig):
        super(TransformerBlock, self).__init__()
        self.n_heads = config.n_heads
        self.dim = config.dim
        self.head_dim = config.dim // config.n_heads

        self.layer_id = layer_id
        self.attention = Attention(config)
        self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
        self.fnn_norm = RMSNorm(config.dim, eps=config.norm_eps)

        if config.use_moe:
            self.feed_forward = MOEFeedForward(config)
        else:
            self.feed_forward = FeedForward(dim=config.dim, hidden_dim=config.hidden_dim, dropout=config.dropout, multiple_of=config.multiple_of)

    def forward(self, x: torch.Tensor, pos_cis, kv_cache=False):
        h = x + self.attention(self.attention_norm(x), pos_cis, kv_cache=kv_cache)
        out = h + self.feed_forward(self.fnn_norm(h))
        return out

class Transformer(nn.Module):
    config_class = LMConfig
    last_loss = Optional[torch.Tensor]

    def __init__(self, config: LMConfig):
        super(Transformer, self).__init__()
        config = config if config is not None else LMConfig()
        self.config = config
        self.vocab_size = config.vocab_size
        self.n_Layers = config.n_layers

        self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
        self.dropout = nn.Dropout(config.dropout)
        self.layers = torch.nn.ModuleList(
            TransformerBlock(layer_id, config) for layer_id in range(config.n_layers)
        )

        self.norm = RMSNorm(config.dim, eps=config.norm_eps)
        self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
        self.tok_embeddings.weight = self.output.weight
        
        pos_cis = precompute_pos_cis(config.dim // config.n_heads, config.max_seq_len)
        self.register_buffer("pos_cis", pos_cis, persistent=False)
        self.apply(self._init_weights)

        for pn, p in self.named_parameters():
            if pn.endswith("w3.weight") or pn.endswith("wo.weight"):
                torch.nn.init.normal_(p, mean=0, std=0.02 / math.sqrt(2 * config.n_layers))
        self.last_loss = None
        self.OUT = CausalLMOutputWithPast()
        self._no_split_modules = [name for name, _ in self.named_modules()]

    def forward(self, tokens:Optional[torch.Tensor] = None, targets:Optional[torch.Tensor] = None, kv_cache=False, **keyargs):
        current_idx = 0
        if 'input_ids' in keyargs:
            tokens = keyargs['input_ids']
        if 'attention_mask' in keyargs:
            targets = keyargs['attention_mask']
        if 'current_idx' in keyargs:
            current_idx = int(keyargs['current_idx'])

        bst, seq_len = tokens.shape
        h = self.tok_embeddings(tokens)
        h = self.dropout(h)
        pos_cis = self.pos_cis[current_idx:current_idx + seq_len]

        for idx, layer in enumerate(self.layers):
            h = layer(h, pos_cis, kv_cache=kv_cache)

        h = self.norm(h)

        if targets is not None:
            logits = self.output(h)
            logits = logits.view(-1, self.vocab_size)
            self.last_loss = F.cross_entropy(logits, targets.view(-1), ignore_index=0, reduction='none')
        else:
            logits = self.output(h[:, [-1], :])
            self.last_loss = None

        self.OUT.__setitem__('logits', logits)
        self.OUT.__setitem__('last_loss', self.last_loss)
        return self.OUT

    @torch.inference_mode()
    def generate(self, idx, eos, max_new_tokens, temperature=0.7, top_k=8, stream=True, rp=1., kv_cache=True):
        index = idx.shape[1]
        init_inference = True
        while idx.shape[1] < max_new_tokens - 1:
            if init_inference or not kv_cache:
                inference_res, init_inference = self(idx, kv_cache=kv_cache), False
            else:
                inference_res = self(idx[:, -1:], kv_cache=kv_cache, current_idx=idx.shape[1] - 1)

            logits = inference_res.logits
            logits = logits[:, -1, :]

            for token in set(idx.tolist()[0]):
                logits[:, token] /= rp

            if temperature == 0.0:
                _, idx_next = torch.topk(logits, k=1, dim=-1)
            else:
                logits = logits / temperature
                if top_k is not None:
                    v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
                    logits[logits < v[:, [-1]]] = -float('inf')
                probs = F.softmax(logits, dim=-1)
                idx_next = torch.multinomial(probs, 1, generator=None)

            if idx_next == eos:
                break

            idx = torch.cat((idx, idx_next), dim=1)
            if stream:
                yield idx[:, index:]

        if not stream:
            yield idx[:, index:]

    @torch.inference_mode()
    def eval_answer(self, idx):
        idx_cond = idx if idx.size(1) <= self.config.max_seq_len else idx[:, -1:]
        inference_res = self(idx_cond)
        logits = inference_res.logits
        logits = logits[:, -1, :]
        return logits

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
            if module.bias is not None:
                torch.nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)


if __name__ == '__main__':

    bsz, seq_len, dim = 3, 5, 8
    x = torch.rand(bsz, seq_len, dim)
    norm = RMSNorm(dim=dim, eps=1e-6)
    n = norm(x)
    assert n.shape == x.shape
    # print(f'n.shape = {n.shape}')

    config = LMConfig(
        dim=dim,
        n_heads=dim // 2,
        n_kv_heads=dim // 4,
        n_routed_experts=7
    )
    attention = Attention(config)

    pos_cis = precompute_pos_cis(config.dim // config.n_heads, config.max_seq_len)
    pos_cis = pos_cis[0:seq_len]
    a = attention(x, pos_cis=pos_cis)
    assert a.shape == x.shape
    # print(f'a.shape = {a.shape}')

    feedForward = FeedForward(dim=dim, hidden_dim=None, dropout=0.0, multiple_of=8)
    f = feedForward(x)
    # print(f'f.shape = {f.shape}')

    gate = MOEGate(config)
    topk_idx, topk_weight, aux_loss = gate(x)
    # print(f'topk_idx.shape = {topk_idx.shape} topk_weight.shape = {topk_weight.shape} aux_loss.shape = {aux_loss.shape}')

    moeFeedForward = MOEFeedForward(config=config)
    m = moeFeedForward(x)
    # print(f'm.shape = {m.shape}')

    block = TransformerBlock(config=config, layer_id=None)
    b = block(x, pos_cis, kv_cache=False)
    # print(f'b.shape = {b.shape}')

    transformer = Transformer(config=config)
    input = torch.randint(low=0, high=config.vocab_size, size=(bsz, seq_len))
    input2 = torch.randint(low=0, high=config.vocab_size, size=(bsz, seq_len))
    t = transformer(input, input2, kv_cache=False)
    # print(t)