import torch
import random
import numpy as np
from transformers import AutoTokenizer
from model.model import Transformer
from model.LMConfig import LMConfig

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def setup_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

setup_seed(1337)

# 2. 初始化模型和分词器
lm_config = LMConfig()

max_seq_len = 1024  # 可以根据需要调整
lm_config.max_seq_len = max_seq_len

model = Transformer(lm_config).to(device)
model_path = './out/full_sft_512.pth'  # 替换为你的模型路径
state_dict = torch.load(model_path, map_location=device)
model.load_state_dict(state_dict)
model.eval()

tokenizer = AutoTokenizer.from_pretrained('./model/mateconv_tokenizer')


# 3. 对话函数：生成完整回复
def generate_reply(prompt, temperature=0.5, top_k=16, stream=True):
    messages = [{"role": "user", "content": prompt}]

    # 使用自定义的 prompt 模板 (根据你的应用逻辑)
    new_prompt = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )[-(max_seq_len - 1):]

    input_ids = tokenizer(new_prompt).data['input_ids']
    input_ids = torch.tensor(input_ids, dtype=torch.long, device=device).unsqueeze(0)

    generated_text = ""
    with torch.no_grad():
        # 生成器返回的生成结果
        res_y = model.generate(input_ids,
                               tokenizer.eos_token_id,
                               max_new_tokens=max_seq_len,
                               temperature=temperature,
                               top_k=top_k,
                               stream=stream)

        # 从生成器逐步获取生成结果
        try:
            y = next(res_y)
        except StopIteration:
            print("No answer")
            return ""

        history_idx = 0
        while y is not None:
            answer = tokenizer.decode(y[0].tolist())
            if answer and answer[-1] == '�':
                try:
                    y = next(res_y)
                except StopIteration:
                    break
                continue

            if len(answer):
                generated_text += answer[history_idx:]

            try:
                y = next(res_y)
            except StopIteration:
                break
            history_idx = len(answer)

    return generated_text

while True:
    inputs = input("User: ")
    if inputs == 'exit':
        break

    response = generate_reply(inputs)
    print(response)