"""
自回归包装器模块

这个模块实现了自回归生成模型的包装器，主要功能包括：
1. 序列生成和采样
2. 自动填充和截断处理
3. 训练时的损失计算
4. 支持top-k和top-p采样策略
"""

from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from routing_transformer.routing_transformer import RoutingTransformerLM
from routing_transformer.autopadder import Autopadder


def default(value, default):
    """
    默认值处理函数

    参数:
        value: 输入值
        default: 默认值

    返回:
        如果输入值为None则返回默认值，否则返回输入值
    """
    return value if value is not None else default


def top_p(logits, thres=0.9):
    """
    实现nucleus sampling (top-p)采样策略

    通过累积概率筛选token，保留累积概率和小于阈值的token

    参数:
        logits: 模型输出的logits
        thres: 概率阈值（默认0.9）

    返回:
        处理后的logits，未选中的位置设为负无穷
    """
    sorted_logits, sorted_indices = torch.sort(logits, descending=True)
    cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)

    sorted_indices_to_remove = cum_probs > 1.0 - thres
    sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
    sorted_indices_to_remove[:, 0] = 0

    sorted_logits[sorted_indices_to_remove] = float("-inf")
    return sorted_logits.scatter(1, sorted_indices, sorted_logits)


def top_k(logits, thres=0.9):
    """
    实现top-k采样策略

    只保留概率最高的k个token，其他设为负无穷

    参数:
        logits: 模型输出的logits
        thres: 保留比例（默认0.9）

    返回:
        处理后的logits，未选中的位置设为负无穷
    """
    k = int((1 - thres) * logits.shape[-1])
    val, ind = torch.topk(logits, k)
    probs = torch.full_like(logits, float("-inf"))
    probs.scatter_(1, ind, val)
    return probs


def pad_sequence_right(seqs, value):
    """
    对序列进行右侧填充

    参数:
        seqs: 序列列表
        value: 填充值

    返回:
        填充后的序列张量
    """
    m = max([len(s) for s in seqs])
    return torch.stack([F.pad(s, (0, m - len(s))) for s in seqs])


def truncate_sequence(inputs, mask=None, pad_value=0):
    """
    随机截断序列

    参数:
        inputs: 输入序列
        mask: 掩码
        pad_value: 填充值

    返回:
        (截断后的序列, 截断后的掩码)
    """
    b, t, device, dtype = *inputs.shape, inputs.device, inputs.dtype
    mask = default(mask, torch.ones_like(inputs).bool())
    rand_length = random.randint(2, t)
    return inputs[:, :rand_length], mask[:, :rand_length]


class AutoregressiveWrapper(nn.Module):
    """
    自回归模型包装器

    为Routing Transformer提供自回归生成能力的包装器，主要功能：
    1. 序列生成和采样
    2. 训练时的损失计算
    3. 自动处理填充和掩码

    参数:
        net: RoutingTransformerLM实例
        ignore_index: 计算损失时忽略的索引值
        pad_value: 填充值
    """

    def __init__(self, net, ignore_index=None, pad_value=0):
        super().__init__()
        assert isinstance(
            net, RoutingTransformerLM
        ), "generative trainer wrapper can only accept RoutingTransformerLM class"
        self.pad_value = pad_value
        self.ignore_index = default(ignore_index, pad_value)

        self.net = Autopadder(net)
        self.max_seq_len = net.max_seq_len
        self.base_net = net

    def update_kmeans(self):
        """更新k-means聚类中心"""
        self.base_net.update_kmeans()

    @torch.no_grad()
    def generate(
        self,
        start_tokens,
        seq_len,
        eos_token=None,
        temperature=1.0,
        filter_logits_fn=top_k,
        filter_thres=0.9,
        **kwargs
    ):
        """
        自回归生成序列

        参数:
            start_tokens: 起始token序列
            seq_len: 要生成的序列长度
            eos_token: 结束符token
            temperature: 采样温度
            filter_logits_fn: logits过滤函数（默认使用top-k）
            filter_thres: 过滤阈值
            **kwargs: 其他参数

        返回:
            生成的序列
        """
        was_training = self.net.training
        num_dims = len(start_tokens.shape)

        if num_dims == 1:
            start_tokens = start_tokens[None, :]

        b, t = start_tokens.shape

        self.net.eval()
        out = start_tokens
        input_mask = kwargs.pop("input_mask", None)

        if input_mask is None:
            input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)

        for _ in range(seq_len):
            x = out[:, -self.max_seq_len :]
            input_mask = input_mask[:, -self.max_seq_len :]
            logits, _ = self.net(x, input_mask=input_mask, **kwargs)
            logits = logits[:, -1, :]
            filtered_logits = filter_logits_fn(logits, thres=filter_thres)
            probs = F.softmax(filtered_logits / temperature, dim=-1)
            sample = torch.multinomial(probs, 1)

            out = torch.cat((out, sample), dim=-1)
            input_mask = F.pad(input_mask, (1, 0), value=True)
            if eos_token is not None and (sample == eos_token).all():
                break

        out = out[:, t:]

        if num_dims == 1:
            out = out.squeeze(0)

        self.net.train(was_training)
        return out

    def forward(self, x, return_loss=False, randomly_truncate_sequence=False, **kwargs):
        """
        前向传播

        参数:
            x: 输入序列
            return_loss: 是否返回损失值
            randomly_truncate_sequence: 是否随机截断序列
            **kwargs: 其他参数

        返回:
            如果return_loss为True，返回损失值
            否则返回模型输出
        """
        pad = partial(pad_sequence, batch_first=True, padding_value=self.pad_value)

        if not return_loss:
            if not isinstance(x, torch.Tensor):
                x = pad(x)
            return self.net(x, **kwargs)

        m = kwargs.get("input_mask", None)

        if randomly_truncate_sequence:
            x, m = truncate_sequence(x, m, pad_value=self.pad_value)

        if isinstance(x, torch.Tensor):
            xi, xo = x[:, :-1], x[:, 1:]
        else:
            xi = pad(list(map(lambda t: t[:-1], x)))
            xo = pad(list(map(lambda t: t[1:], x)))

        if m is not None:
            assert (
                m.shape == x.shape[0:2]
            ), "input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle"
            kwargs["input_mask"] = m[:, :-1]

        out, aux_loss = self.net(xi, **kwargs)

        loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index=self.ignore_index)
        loss = loss + aux_loss
        return loss
