import os
import torch
from datetime import datetime
from transformers import GenerationConfig
from qwen.modeling_qwen import QWenLMHeadModel
from qwen.tokenization_qwen import QWenTokenizer
from arguments import GenegrateArguments

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

genegrate_args = GenegrateArguments()

model = QWenLMHeadModel.from_pretrained(genegrate_args.model_dir).to(device)

tokenizer = QWenTokenizer.from_pretrained(genegrate_args.tokenizer_dir)

gen_config = GenerationConfig(
    temperature=genegrate_args.temperature,
    top_k=genegrate_args.top_k,
    top_p=genegrate_args.top_p,
    do_sample=genegrate_args.do_sample,
    num_beams=genegrate_args.num_beams,
    repetition_penalty=genegrate_args.repetition_penalty,
    max_new_tokens=genegrate_args.max_new_tokens,
    eos_token_id=tokenizer.eos_token_id,
    pad_token_id=tokenizer.pad_token_id,
)


def generate(text):
    model.eval()
    text = "你是一个助手 用户: {text} 回答: ".format(text=text)
    tokend = tokenizer(text, add_special=False)
    input_ids, attention_mask = torch.LongTensor([tokend.input_ids]).to(device), torch.LongTensor([tokend.attention_mask]).to(device)
    outputs = model.generate(inputs=input_ids, attention_mask=attention_mask, generation_config=gen_config)
    outs = tokenizer.decode(outputs[0].cpu().numpy())
    dialog = outs.split("<|im_end|>")[0].split("回答: ")[1]
    return dialog


if not os.path.exists(genegrate_args.save_samples_path):
    os.makedirs(genegrate_args.save_samples_path)

samples_file = open(genegrate_args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))

print('开始和chatbot聊天，输入再见或者退出，结束对话')

while True:
    text = input('user:> ')
    if text in ('再见', '退出'):
        print('再见')
        samples_file.close()
        break
    dialog = "AI: >" + generate(text)
    print(dialog)
    samples_file.write('user:> ' + text + '\n')
    samples_file.write(dialog)