
import os
import torch

from collections import OrderedDict

from typing import Any
from learn_conf.tokenizer import TokenizerTool
from model.model import TransformerModel
from model.part import subsequentMask


def greedy_decode(
        model:TransformerModel,          # 加载的模型
        src: torch.Tensor,
        in_mask: torch.Tensor,
        max_len: int,
        start_symbol: int,
        end_symbol: int,
        device: str,
):
    # size from (batch_size, src_seq_len) to (batch_size, src_seq_len, src_seq_len)
    src_mask = in_mask.bool().unsqueeze(1).expand(-1, src.size(-1), -1).to(device)

    # 先用 编码器来 编码输入
    memory = model.encoder(x=model.pos_encoder(model.embed(src)), mask=src_mask)

    # 创建一个输出缓冲区，用 <bos> 开头, size : (batch_size, tgt_seq_len, d_m)
    ys = torch.zeros(1, 1).fill_(start_symbol).type_as(src.data).to(device)
    # ys = model.pos_encoder(model.embed(ys))

    for i in range(max_len - 1):
        # 循环解码 out size: (batch_size, tgt_seq_len, d_m)
        out = model.decoder(
            x=model.pos_encoder(model.embed(ys)),
            m=memory,
            cross_mask=in_mask.bool().unsqueeze(1).expand(-1, ys.size(1), -1).to(device),
            tgt_mask=subsequentMask(ys.size(1), ys.size(1)).type_as(src.data)
        )
        # out = model.decode(
        #     m=memory,
        #     cross_mask=in_mask.bool().unsqueeze(1).expand(-1, ys.size(1), -1).to(device),
        #     tgt=ys,
        #     tgt_mask=subsequentMask(ys.size(1), ys.size(1)).type_as(src.data)
        # )
        # 拿出 prob 的最后一行，也就是最新生成的 token vector
        prob = model.generate(out[:, -1, :])
        # prob 中最大值的 index, 也就是 token 在 词汇表中的 index
        _, next_word = torch.max(prob, dim=1)
        # 把当前预测出的下一个 token next_word 拼接到已有的序列 ys 后面
        # 最终的 ys size 是 (1, decode_len)
        if next_word.item() == end_symbol:
            break
        next_word = next_word.unsqueeze(0)  # 确保是形状 (1, 1)
        ys = torch.cat([ys, next_word], dim=1)
    return ys


# *****************************************************************************
# brief: 加载训练好的模型
# =============================================================================
def loadModel(
        pt: str,
        config: dict[str, Any],
        device: str,
):

    if not os.path.exists(pt):
        print(f"[!] No model found at {pt}")
        return None

    model = TransformerModel(
        # vocab_size=tokenizer.vocab_size(),  # 词典大小
        vocab_size= config["vocab_size"],  # 词典大小
        N=          config["N"],           # encoder 和 decoder 中 有多少 single layer
        d_model=    config["d_model"],     # 做词嵌入的时候，一个 token 编码成的向量的长度
        headsNum=   config["heads"],       # 注意力机制中有多少个 注意力头
        d_ff=       config["d_ff"],        # encoder, decoder 中 feed-forward 的隐藏层神经元个数
        dropout=    config["dropout"],     # 所有 的 dropout 层 dropout 的 概率
    ).to(device)

    checkpoint = torch.load(pt, map_location=device)
    state_dict = checkpoint['model_state_dict']

    # 去除 'module.' 前缀（如果存在） （多 GPU 训练保存模型的锅）
    new_state_dict = OrderedDict(
        (k[len("module."):] if k.startswith("module.") else k, v)
        for k, v in state_dict.items()
    )
    model.load_state_dict(new_state_dict)

    return model


def interactive_translate(model, tokenizer, device, max_len=150):
    print("Enter English sentences (type 'quit' to exit):")
    while True:
        sentence = input(">> ").strip()
        if sentence.lower() in ["quit", "exit"]:
            print("Bye!")
            break

        # 1. tokenize 输入英文句子
        src, in_mask = tokenizer([sentence])
        src = src.to(device)
        in_mask = in_mask.to(device)

        # 2. 译码得到目标token序列（中文token id）
        ys = greedy_decode(
            model=model,
            src=src,
            in_mask=in_mask,
            max_len=max_len,
            start_symbol=tokenizer.bos_id(),
            end_symbol=tokenizer.eos_id(),
            device=device,
        )

        # 3. 转成中文字符串
        output_str = tokenizer.id2str(ys)

        print("Chinese translation:", output_str)


if __name__ == "__main__":
    device = "cuda" if torch.cuda.is_available() else "cpu"
    tokenizer = TokenizerTool()

    src, in_mask = tokenizer(["computer"])

    model = loadModel(
        pt="/home/ubuntu/wanghanProject/transformerLearn/transformer_code/tmp_22000.pt",
        config={
            "vocab_size": tokenizer.vocab_size(),
            "N": 6,  # encoder, decoder 有多少 layer
            "heads": 8,  # 多少个注意力头
            "d_model": 512,  # 词嵌入的时候一个 token 转为多少维度的向量
            "d_ff": 1024,  # feed-forward 隐藏层的大小
            "dropout": 0.1,  # dropout 的概率
        },
        device=device
    )

    model.eval()  # 切换到推理模式

    interactive_translate(model, tokenizer, device, max_len=150)

     # 目前可以正确翻译的单词： code, device, afternoon, hear, model
     #                      see, name, question, ask, equal, sea, new
     #                       your, you, water, milk, Taiwan,
     # Taiwan is part of China.
     # what is your name?
     # How to deal with the question?
    # Can you answer the question?