import random

import torch
from transformers import AutoConfig, AutoTokenizer

from base_model.nlp.base_model.first_model_mla import forward, UsherConfig, Transformer

orign_data = []

trans_map = {
    "0": "零",
    "1": "一",
    "2": "二",
    "3": "三",
    "4": "四",
    "5": "五",
    "6": "六",
    "7": "七",
    "8": "八",
    "9": "九"
}
# trans_map = {
#     "0": "0",
#     "1": "1",
#     "2": "2",
#     "3": "3",
#     "4": "4",
#     "5": "5",
#     "6": "6",
#     "7": "7",
#     "8": "8",
#     "9": "9"
# }


for i in range(10):
    current_string = ""
    out_string = ""
    num_length = random.randint(1, 9)
    for j in range(num_length):
        num = random.randint(0, 9)
        current_string += str(num)
        out_string += trans_map[str(num)]
    orign_data_item = {"input": current_string, "output": out_string}
    orign_data.append(orign_data_item)

    # 设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 注册自定义配置类
    AutoConfig.register("usherTransformer", UsherConfig)

    # 加载配置
    config = UsherConfig()
    # 配置
    config = AutoConfig.from_pretrained(config.path)

    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(config.path)
    tokenizer.pad_token_id = 0

    # 初始化模型并移动到指定设备
    model = Transformer(config).to(device)

    # 加载权重
    model_path = config.path + r"\\model.pth"
    state_dict = torch.load(model_path, map_location=device)
    model.load_state_dict(state_dict)

for orign_datum in orign_data:
    input_ = orign_datum["input"]
    s = forward(model, tokenizer, config, device, input_)
    print("请求:\t" + input_)
    print("返回:\t" + s)
    print(f"正确的:\t {orign_datum['output']}")
    print()
