from gxl_ai_utils.utils import utils_file
from model_agent import do_chat_text2text, init_model_my

gpu_id, checkpoint_path, config_path, input_jsonl_path, output_jsonl_path = utils_file.do_get_commandline_param(5, ["gpu_id", "checkpoint_path", "config_path", "input_jsonl_path", "output_jsonl_path"])
gpu_id = int(gpu_id)
utils_file.logging_info(f"gpu_id: {gpu_id},\n checkpoint_path: {checkpoint_path},\n config_path: {config_path},\n input_jsonl_path: {input_jsonl_path}, \n output_jsonl_path: {output_jsonl_path}")

model, tokenizer, device = init_model_my(gpu_id=gpu_id, checkpoint_path=checkpoint_path, config_path=config_path)

utils_file.makedir_for_file(output_jsonl_path)
dict_list = utils_file.load_dict_list_from_jsonl(input_jsonl_path)
output_dict_list = []
total_num = len(dict_list)
for i, dict_item in enumerate(dict_list):
    question = dict_item['prompt']
    now = utils_file.do_get_now_time()
    response = do_chat_text2text(device=device, model=model, text_for_chat=question)
    time_used = utils_file.do_get_elapsed_time(now)
    utils_file.logging_info(f"processing {i+1}/{total_num}, time used: {time_used}s, question: {question}, response: {response}")
    dict_item['response'] = response
    output_dict_list.append(dict_item)
    if i % 100 == 0:
        utils_file.write_dict_list_to_jsonl(output_dict_list, output_jsonl_path)
utils_file.write_dict_list_to_jsonl(output_dict_list, output_jsonl_path)
