# 导入必要的库
import re

import torch
from transformers import AutoTokenizer, AutoConfig
from base_model.nlp.base_model.second_model_mla import Transformer
from base_model.nlp.base_model.second_model_mla import UsherConfig
import torch.nn.functional as F

pattern = r'^<｜begin_of_sentence｜><｜User｜>(?P<user>.+?)<｜Assistant｜>(?P<assistant>.+?)<｜end｜>.*$'
PATTERN = re.compile(pattern)


def forward(model, tokenizer, config, device, prompt=r"423877184"):
    temperature = 1

    # 手动实现生成逻辑
    def generate_dialogue(model, user_input, max_length=config.max_seq_len, eos_token_id=tokenizer.eos_token_id):
        # 1. 构建初始输入序列
        prompt = f"<｜begin_of_sentence｜><｜User｜>{user_input}<｜Assistant｜>"
        inputs = tokenizer(
            prompt,
            max_length=config.max_seq_len,
            truncation=True,
            padding=False,
            return_tensors="pt"
        ).to(device)
        input_ids = inputs["input_ids"]
        generated_ids = input_ids.clone().detach()

        # 2. 开始生成循环
        for _ in range(max_length - input_ids.shape[1]):
            with torch.no_grad():
                # 2.1 执行前向推理（仅预测最后一个token）
                outputs = model(generated_ids)
                next_token_logits = outputs[:, -1, :] / temperature
                next_token_probs = F.softmax(next_token_logits, dim=-1)
                next_token_id = torch.multinomial(next_token_probs, num_samples=1)

            # 2.2 更新生成序列
            generated_ids = torch.cat([generated_ids, next_token_id], dim=-1)

            # 2.3 检查终止条件
            if next_token_id.item() == eos_token_id:
                break

        # 3. 解码结果
        output_text = tokenizer.decode(
            generated_ids[0]
        )
        return output_text.strip()

    # 示例对话
    response = generate_dialogue(model, prompt)
    match = PATTERN.match(response)
    if match is None:
        return response
    return match.group('assistant')


if __name__ == '__main__':
    # 设备
    device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')

    # 注册自定义配置类
    AutoConfig.register("usherTransformer", UsherConfig)

    # 加载配置
    config = UsherConfig()
    # 配置
    config = AutoConfig.from_pretrained(config.path)

    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(config.path)
    tokenizer.pad_token_id = 0

    # 初始化模型并移动到指定设备
    model = Transformer(config).to(device)

    # 加载权重
    model_path = config.path + "/model.pth"
    state_dict = torch.load(model_path, map_location=device)
    model.load_state_dict(state_dict)

    print(forward(model, tokenizer, config, device, prompt="423877184"))
