from abc import ABC, abstractmethod
from typing import Optional

from torch import nn
import torch
import torch.nn.functional as F
from dataclasses import dataclass


from model_test.imdb.UsherConfig import UsherConfig


class UsherModule(nn.Module, ABC):
    def __init__(self):
        super().__init__()

    @abstractmethod
    def init_weight(self):
        raise NotImplementedError()


class UsherEmbedding(UsherModule):
    def init_weight(self):
        nn.init.xavier_uniform_(self.weight)

    def __init__(self, args: UsherConfig):
        super().__init__()
        self.weight = nn.Parameter(torch.empty(args.vocab_size, args.dim))

    def forward(self, x):
        return F.embedding(x, self.weight)


class UsherDTNormal(UsherModule):
    def init_weight(self):
        pass

    def __init__(self, args: UsherConfig):
        super().__init__()
        self.alpha = nn.Parameter(torch.Tensor(args.normal_alpha_init))
        self.gamma = nn.Parameter(torch.ones(args.dim))
        self.beta = nn.Parameter(torch.zeros(args.dim))

    def forward(self, x):
        x = F.tanh(self.alpha * x)
        return self.gamma * x + self.beta


class UsherMultiHeadAttention(UsherModule):
    def init_weight(self):
        pass

    def __init__(self, args: UsherConfig):
        super().__init__()
        self.n_heads = args.n_heads
        self.qk_head_dim = args.qk_head_dim
        self.q = nn.Linear(args.dim, args.qk_head_dim * args.n_heads)
        self.k = nn.Linear(args.dim, args.qk_head_dim * args.n_heads)
        self.v = nn.Linear(args.dim, args.dim * args.n_heads)

    def forward(self, x, mask: Optional[torch.Tensor], **keys):
        batch_size, seq_len, _ = x.shape

        x_q = self.q(x).view(batch_size, self.n_heads, seq_len, self.qk_head_dim)
        x_k = self.k(x).view(batch_size, self.n_heads, seq_len, self.qk_head_dim)
        x_qk = torch.matmul(x_q, torch.transpose(x_k, -1, -2))
        x_qk = x_qk / (self.qk_head_dim ** 0.5)
        if mask is not None:
            x_qk = x_qk + mask
        x_qk = x_qk.softmax(dim=-1)

        rx = self.v(x_qk).view(batch_size, self.n_heads, seq_len, self.dim)
        return rx


class UsherMultiLayerPerceptron(UsherModule):
    def init_weight(self):
        pass

    def __init__(self, args: UsherConfig):
        super().__init__()
        self.w1 = nn.Linear(args.dim, args.inter_dim)
        self.w2 = nn.Linear(args.dim, args.inter_dim)
        self.w3 = nn.Linear(args.inter_dim, args.dim)

    def forward(self, x):
        return self.w3(F.silu(self.w1(x)) * self.w2(x))


class UsherExperts(UsherModule):
    def init_weight(self):
        pass

    def __init__(self, args: UsherConfig):
        super().__init__()
        self.w1 = nn.Linear(args.dim, args.moe_inter_dim)
        self.w2 = nn.Linear(args.dim, args.moe_inter_dim)
        self.w3 = nn.Linear(args.moe_inter_dim, args.dim)

    def forward(self, x):
        return self.w3(F.silu(self.w1(x)) * self.w2(x))


class UsherMOE(UsherModule):
    def init_weight(self):
        for expert in self.experts:
            expert.init_weight()

    def __init__(self, args: UsherConfig):
        super().__init__()
        # 永久激活的专家 从前到后
        self.n_shared_experts = args.n_shared_experts
        # 概率激活的专家 从前到后
        self.n_active_experts = args.n_active_experts
        # 门控
        self.gate = nn.Linear(args.dim, args.n_routed_experts - self.n_shared_experts)
        # 专家
        self.experts = nn.ModuleList([UsherExperts(args) for _ in range(args.n_routed_experts)])

    def forward(self, x):
        # 1.根据门控输出激活各个专家的比值
        gate_scope = self.gate(x)
        # 2.获取最前面的n_active_experts专家,输出累加
        top_scope, index = torch.topk(gate_scope, k=self.n_active_experts)
        t_x = torch.zeros_like(x)
        # 3.永久激活专家也累加
        concat = torch.concat(torch.arange(0, self.n_shared_experts), index + self.n_shared_experts)
        for i in concat:
            t_x += self.experts[i](x)
        # 正常输出
        return t_x


class UsherBlock(UsherModule):
    def init_weight(self):
        self.attn.init_weight()
        self.attn_norm.init_weight()
        self.ffn.init_weight()
        self.ffn_norm.init_weight()

    def __init__(self, layer_id, args: UsherConfig):
        super().__init__()
        self.layer_id = layer_id
        self.attn = UsherMultiHeadAttention(args)
        self.attn_norm = UsherDTNormal(args)
        self.ffn = UsherMultiLayerPerceptron(args) if layer_id < args.n_dense_layers else UsherMOE(args)
        self.ffn_norm = UsherDTNormal(args)

    def forward(self, x, mask):
        x = x + self.attn_norm(self.attn(x, mask))
        x = x + self.ffn_norm(self.ffn(x))
        return x


@dataclass
class UsherGenerArgs:
    temperature: float = 1.0
    seq_max_len: int = 4096


class UsherTransformer(UsherModule):
    def __init__(self, args: UsherConfig):
        super().__init__()
        self.emb = UsherEmbedding(args)
        self.blocks = nn.ModuleList([UsherBlock(i, args) for i in range(args.n_layers)])
        self.normal = UsherDTNormal(args)
        self.final = nn.Linear(args.dim, args.vocab_size)

    def init_weight(self):
        self.emb.init_weight()
        for block in self.blocks:
            block.init_weight()
        self.normal.init_weight()

    def forward(self, inputs: torch.Tensor, **keys):
        seq_len = inputs.size(1)
        # 编译为词向量
        x = self.emb(inputs)
        mask = None
        if self.training:
            mask = torch.full((seq_len, seq_len), float("-inf"), device=inputs.device).triu_(1)
        # 模型主体
        for layer in self.blocks:
            x = layer(x, mask)

        # 归一化
        x = self.normal(x)
        # 输出
        x = self.final(x)
        return x

    def generator(self, query: str, arg: UsherGenerArgs, tokenizer, device, **keys):
        input = tokenizer(query, truncation=True, padding="max_length", return_tensors="pt")["input_ids"].to(device)

        generated_ids = input.clone().detach()

        # 2. 开始生成循环
        for _ in range(arg.seq_max_len - input.shape[1]):
            with torch.no_grad():
                # 2.1 执行前向推理（仅预测最后一个token）
                outputs = self.forward(generated_ids)
                next_token_logits = outputs[:, -1, :] / arg.temperature
                next_token_probs = F.softmax(next_token_logits, dim=-1)
                next_token_id = torch.multinomial(next_token_probs, num_samples=1)

            # 2.2 更新生成序列
            generated_ids = torch.cat([generated_ids, next_token_id], dim=-1)

            # 2.3 检查终止条件
            if next_token_id.item() == tokenizer.eos_token_id:
                break

        # 3. 解码结果
        output_text = tokenizer.decode(
            generated_ids[0]
        )
        return output_text
