import torch
from typing import List
from pathlib import Path
from einops import repeat

from cs336_basics.transformer.model import TransformerLM, softmax
from cs336_basics.bpe_tokenizer.tokenizer import Tokenizer
from cs336_basics.transformer.utils import load_checkpoint

ROOT_DIR = Path(__file__).resolve().parent.parent.parent


def nucleus_sample(probs: torch.Tensor, top_p: float):
    """
    核采样（top-p采样）
    将概率分布 probs 从高到低累加，直到概率和超过 p，剩余的概率全部置 0.

    Args:
        probs (Tensor): 待采样的概率分布，形状为 (batch_size, vocab)
        top_p (float):      top-p 阈值

    Return：
        Tensor，核采样后的概率分布，形状与 probs 相同为 (batch_size, vocab)
    """

    # 从高到低排序
    # probs_sorted 是排序结果，indices 是排序后的元素原来的 index
    sorted_probs, sorted_indices = torch.sort(
        probs, descending=True, dim=-1
    )  # 形状 (batch_size, vocab)

    # 求各位置的前缀和
    cumsum_probs = torch.cumsum(sorted_probs, dim=-1)  # 形状 (batch_size, vocab)

    # 各个前缀和是否在 top_p 阈值之内，是则可以保留
    mask = cumsum_probs <= top_p  # 形状 (batch_size, vocab)
    mask[:, 0] = True  # 第一个数必须保留，预防第一个数的概率就已经大于 top_p 的情况

    # 将前缀和大于 top_p 阈值的元素置 0
    masked_probs = sorted_probs * mask  # 形状 (batch_size, vocab)

    # 重新归一化概率
    sum_masked_probs = torch.sum(
        masked_probs, dim=-1, keepdim=True
    )  # 形状 (batch_size, 1)
    masked_probs /= sum_masked_probs

    # 将排序后的元素换回原始顺序
    result_probs = torch.zeros_like(probs)
    result_probs.scatter_(-1, sorted_indices, masked_probs)

    return result_probs


def inference(
    prompts: List[str],
    max_output_tokens: int,
    model_name: str = "TransformerLM",
    top_p: float = 0.9,
    tempreature: float = 1.2,
):
    """
    使用指定模型进行推理

    Args:
        model_name (str):               模型名称
        prompts (List[str]):            一组输入的 prompts
        max_output_tokens (int):        最大输出 token 数
        top_p (float, optional):        top-p 采样阈值
        tempreature (float, optional):  温度缩放参数
    """

    if model_name != "TransformerLM":
        raise ValueError(
            f"模型 '{model_name}' 现在并不支持. 目前支持的模型有：'TransformerLM'"
        )

    # 一些参数的初始化
    end_token = "<|endoftext|>"
    context_length = 256

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # 初始化 tokenizer
    tokenizer = Tokenizer.from_files(
        vocab_filepath=ROOT_DIR.joinpath("output/tokenizer/TinyStoriesV2_vocab.pkl"),
        merges_filepath=ROOT_DIR.joinpath("output/tokenizer/TinyStoriesV2_merges.pkl"),
        special_tokens=[end_token],
    )

    # 对 prompts 进行 tokenize
    prompts_ids: List[torch.Tensor] = (
        []
    )  # 各 prompt tokenize 后的 token id，形状不用关心，后续会做填充
    len_prompts = []  # 各 prompt 的 token 数量
    for prompt in prompts:
        ids = tokenizer.encode(prompt)
        prompts_ids.append(torch.tensor(ids, dtype=torch.int32, device=device))
        len_prompts.append(len(ids))

    len_inputs = torch.tensor(
        len_prompts, dtype=torch.int64, device=device
    )  # 转化为 Tensor，形状为 (batch_size)

    # 将 prompts 右侧填充 end_token 至长度为 context_length
    # 如果形状不一样是无法作为输入进行矩阵运算的
    end_token_id = tokenizer.encode(end_token)[0]
    inputs_ids = torch.full(
        size=(len(prompts), context_length),
        fill_value=end_token_id,
        dtype=torch.int32,
        device=device,
    )  # 全为 <end_token>，形状为 (batch_size, context_length)
    for i, ids in enumerate(prompts_ids):
        inputs_ids[i, : len(ids)] = ids  # 用原始 prompt token ids 覆盖

    # 初始化模型
    model = TransformerLM(
        vocab_size=10000,
        context_length=256,
        num_layers=4,
        d_model=512,
        num_heads=16,
        theta=10000,
        d_ff=1344,
        device=device,
    )

    # 加载模型参数
    load_checkpoint(
        src=ROOT_DIR.joinpath("output/model/model.pt"),
        model=model,
        optimizer=None,
    )

    # 开始推理
    with torch.no_grad():
        ended = torch.zeros(
            len(prompts), dtype=torch.bool, device=device
        )  # 序列是否结束，形状 (batch_size)

        for i in range(max_output_tokens):
            whole_logits = model(
                inputs_ids
            )  # 形状 (batch_size, context_length, vocab_size)

            # 提取每个 sequence 的最后一个 logits，即预测下一个词的 logits
            last_index = len_inputs - 1  # 最后一个 token 的 index，形状 (batch_size)
            indices = repeat(
                last_index, "b -> b 1 v", v=whole_logits.shape[-1]
            )  # 生成 torch.gather 所需的 index，形状为 (bacth_size, 1, vocab_size)
            predict_logits = torch.gather(
                input=whole_logits, dim=1, index=indices
            )  # 最后一个 token 的 logits，形状 (batch_size, 1, vocab_size)
            predict_logits = predict_logits.squeeze(1)  # 形状 (batch_size, vocab_size)

            # temperature scale softmax
            predict_logits /= tempreature
            predict_probs = softmax(predict_logits)  # 形状 (batch_size, vocab_size)

            # nucleus_sample 去掉尾部概率
            top_p_probs = nucleus_sample(
                predict_probs, top_p
            )  # 形状 (batch_size, vocab_size)

            # 从概率分布中采样 1 个 token
            new_token_id = torch.multinomial(top_p_probs, num_samples=1).to(
                dtype=torch.int32
            )  # 形状 (batch_size, 1)

            # 将采样的 token 添加到 inputs_id 中，作为下一轮推理的输入
            new_token_indices = len_inputs.unsqueeze(1)  # 形状 (batch_size, 1)
            inputs_ids.scatter_(dim=1, index=new_token_indices, src=new_token_id)
            len_inputs += 1  # 千万不要忘了给 len_inputs tensor 加 1

            # 判断是否所有序列都已推理结束
            ended = ended | (new_token_id == end_token_id)
            if ended.all():
                break

    # 解码输出序列
    outputs = {}
    for i, prompt in enumerate(prompts):
        output_ids = inputs_ids[i, len_prompts[i] :].cpu().numpy()
        output_text = tokenizer.decode(output_ids, end_token_id)  # type: ignore
        outputs[prompt] = output_text

    return outputs


if __name__ == "__main__":
    prompts = [
        "Once upon a time, there was a girl named Sue.",
        "In case I don't see you, good morning, good afternoon and ",
    ]

    results = inference(prompts=prompts, max_output_tokens=128)
    for prompt in results:
        print(f"'{prompt}': '{results[prompt]}'\n")
