from gxl_ai_utils.utils import utils_file
from model_agent import do_chat_text2text, init_model_my, do_chat_s2t
import os
import json
import argparse

def infer_t2t(text, model, device):
    response = do_chat_text2text(device=device, model=model, text_for_chat=text)
    print("Bot:", response)
    return response

def infer_s2t(input_wav_path, model, device):
    response = do_chat_s2t(device=device, model=model, input_wav_path=input_wav_path)
    print("Bot:", response)
    return response

def process_jsonl_file(input_filepath, output_filepath, model, tokenizer, device, choice):
    new_lines = []
    with open(input_filepath, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line)
            if choice == "t2t":
                data['response'] = infer_t2t(data['prompt'], model, device)
            elif choice == "s2t":
                data['response'] = infer_s2t(data['audio_path'], model, device)
            new_lines.append(json.dumps(data, ensure_ascii=False))
    with open(output_filepath, 'w', encoding='utf-8') as f:
        for line in new_lines:
            f.write(line + '\n')

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="单文件推理")
    parser.add_argument('--input', type=str, required=True, help='输入jsonl文件路径')
    parser.add_argument('--output', type=str, required=True, help='输出jsonl文件路径')
    parser.add_argument('--choice', type=str, required=True, help='推理方式，t2t或s2t')
    args = parser.parse_args()

    gpu_id = 6
    checkpoint_path = "/mnt/sfs/asr/code/osum_xlgeng_3B/examples/wenetspeech/whisper/exp/epoch_3_7_LLM3Binstruct_5Ws2t/step_9999.pt"
    config_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct_3B_cosyvoice1-token.yaml"
    model, tokenizer, device = init_model_my(gpu_id=gpu_id, checkpoint_path=checkpoint_path, config_path=config_path)
    process_jsonl_file(args.input, args.output, model, tokenizer, device, args.choice)