import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
from datasets import load_dataset, Audio
from argparse import ArgumentParser
from src.models import model_cls_mapping
import json
import torchaudio
from tqdm import tqdm
from loguru import logger


def main():
    parser = ArgumentParser()
    parser.add_argument('--model', type=str, default='qwen2', choices=list(model_cls_mapping.keys()))
    parser.add_argument('--data', type=str, default='alpacaeval')
    parser.add_argument('--split', type=str, default='test')
    parser.add_argument('--modality', type=str, default='audio', choices=['audio', 'text', 'ttft'])
    parser.add_argument('--id', type=str, default=4)
    args = parser.parse_args()

    # load data
    data = load_dataset('/mnt/sfs/asr/wsy_data/hlt-lab', args.data, split=args.split)
    if args.modality == 'audio':
        data = data.cast_column("audio", Audio(sampling_rate=16_000))

    # 修改代码_wsy
    # print("加载数据")
    # data_path = f'/home/environment2/sywang/workspace/toolkit/tools/understand_llm/llm_eval/VoiceBench/chinese_data/audio/'
    # jsonl_file = os.path.join(data_path, f'{args.data}/{args.data}_audio.jsonl')
    # data = []
    # with open(jsonl_file, 'r') as f:
    #     for line in tqdm(f):
    #         record = json.loads(line)
    #         audio_path = os.path.join(data_path, record['audio'])
    #         record['audio'] = audio_path
    #         data.append(record)

    model = model_cls_mapping[args.model]()

    if args.modality == 'ttft':
        # avoid cold start
        _ = model.generate_ttft(data[0]['audio'])

    # inference
    results = []
    for item in tqdm(data, total=len(data)):
        tmp = {k: v for k, v in item.items() if k != 'audio'}
        if args.modality == 'text':
            response = model.generate_text(item['prompt'],args.id)
            print("text_response: ", response)
        elif args.modality == 'audio':
            # audio_data, sample_rate = torchaudio.load(item['audio'])
            # response = model.generate_audio(audio_data)[0]
            response = model.generate_audio(item['audio'],args.id)
            print("audio_response: ", response)
        elif args.modality == 'ttft':
            response = model.generate_ttft(item['audio'])
        else:
            raise NotImplementedError
        # logger.info(f"Prompt: {item['prompt']}, Response: {response}")
        # logger.info('====================================')
        tmp['response'] = response
        results.append(tmp)

    # save results
    output_file = f'{args.model}-{args.data}-{args.split}-{args.modality}.jsonl'
    with open(output_file, 'w', encoding='utf-8') as f:
        for record in results:
            json_line = json.dumps(record, ensure_ascii=False)  # Convert dictionary to JSON string
            f.write(json_line + '\n')


if __name__ == '__main__':
    main()
