# encoding: utf-8

import traceback
from loguru import logger
import torch

from transformers import AutoModelForCausalLM, AutoTokenizer

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

pretrained_model = "/root/train_about/llm_from_zero/my_minimind/minimind_dpo/checkpoint-4000"

tokenizer = AutoTokenizer.from_pretrained(pretrained_model, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(pretrained_model, trust_remote_code=True)
model.to(device)
model.eval()

# if __name__ == '__main__':
#     print("开始于模型聊天吧，输入exit退出")
#     while True:
#         text = input("用户：")
#         text = text.strip()
#         if "exit" in text:
#             break
#         input_text = text.strip()
#         if not input_text:
#             continue
#         inputs = tokenizer.apply_chat_template([{"role": "user", "content": input_text}],
#                                                add_generation_prompt=True,
#                                                tokenize=True,
#                                                return_tensors="pt",
#                                                return_dict=True
#                                                )
#         inputs = inputs.to(device)
#         idx = inputs["input_ids"]
#         with torch.no_grad():
#
#             res_y = model.generate(idx, tokenizer.eos_token_id, max_new_tokens=128, temperature=0.7,
#                                    top_k=8, stream=True)
#             print('回答：', end='')
#             try:
#                 y = next(res_y)
#             except StopIteration:
#                 print("No answer")
#                 continue
#
#             history_idx = 0
#             while y != None:
#                 answer = tokenizer.decode(y[0].tolist())
#                 if answer and answer[-1] == '�':
#                     try:
#                         y = next(res_y)
#                     except:
#                         break
#                     continue
#                 # print(answer)
#                 if not len(answer):
#                     try:
#                         y = next(res_y)
#                     except:
#                         break
#                     continue
#
#                 print(answer[history_idx:], end='', flush=True)
#                 try:
#                     y = next(res_y)
#                 except:
#                     break
#                 history_idx = len(answer)
#
#             print('\n')
if __name__ == '__main__':
    print("开始于模型聊天吧，输入exit退出")
    while True:
        text = input("用户：")
        text = text.strip()
        if "exit" in text:
            break
        input_text = text.strip()
        if not input_text:
            continue
        inputs = tokenizer.apply_chat_template([{"role": "user", "content": input_text}],
                                               add_generation_prompt=True,
                                               tokenize=True,
                                               return_tensors="pt",
                                               return_dict=True
                                               )
        inputs = inputs.to(device)
        idx = inputs["input_ids"]
        with torch.no_grad():
            history_idx = 0
            res_y = model.generate(idx, tokenizer.eos_token_id, max_new_tokens=128, temperature=0.7,
                                   top_k=8, stream=True)
            for y in res_y:
                answer = tokenizer.decode(y[0].tolist())
                if answer and answer[-1] == '�':
                    try:
                        y = next(res_y)
                    except:
                        continue
                print(answer[history_idx:], end='', flush=True)
                try:
                    y = next(res_y)
                except:
                    break
                history_idx = len(answer)
            print()
