from gxl_ai_utils.utils import utils_file
from data_hanle import _main_make_shards

def convert_dict_list_to_format_dict_list_for_t2t(input_data_jsonl_path, output_data_jsonl_path):
    dict_list = utils_file.load_dict_list_from_jsonl(input_data_jsonl_path)
    task = "<TEXT2TEXT>"
    lang = "<CN>"
    speaker = "<NONE>"
    emotion = "NEUTRAL"
    style = "<NONE>"
    gender = "<NONE>"
    age = "<NONE>"
    caption = "<NONE>"
    extra_info = {'duration':-1,}
    output_dict_list = []
    for dict_item in utils_file.tqdm(dict_list, desc='Converting dict list to format dict list for t2t',total=len(dict_list) ):
        # 在后面加上长时间戳和随机数，防止key重复
        # key = dict_item['key'] + "_" + str(int(time.time())) + "_" + str(random.randint(1, 999999))
        key = dict_item['key']
        try:
            q = dict_item['extra']['q_txt']
            a = dict_item['txt']
        except Exception as e:
            utils_file.logging_warning(f"Skip item with no question or answer: {dict_item}， error: {e}")
            continue
        extra_info_item = {'duration':-1, 'question':q}
        txt = a
        if len(txt) < 1 or len(q)< 1:
            utils_file.logging_warning(f"Skip item with empty text or question: {dict_item}")
            continue
        dict_output_item = {
            'task': task,
            'key': key,
            'lang': lang,
           'speaker': speaker,
            'emotion': emotion,
           'style': style,
            'gender': gender,
            'age': age,
            'caption': caption,
            'txt': txt,
            'extra': extra_info_item
        }
        output_dict_list.append(dict_output_item)
    utils_file.write_dict_list_to_jsonl(output_dict_list, output_data_jsonl_path)
    # return output_dict_list


output_root_dir = "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/shards/benchdata"

# 三剑客
data_path = "/home/work_nfs16/cywang/workspace/OSUM/UltraEval/ALL_en_Expand-Answered_last.jsonl"
output_dir = f"{output_root_dir}/three_kingdoms"
utils_file.makedir_sil(output_dir)
data_list_path = f"{output_dir}/data.list"
convert_dict_list_to_format_dict_list_for_t2t(data_path, data_list_path)
shard_dir = f"{output_dir}/shards"
_main_make_shards(data_list_path, shard_dir)

# qwen-omni bench data
data_path = "/home/work_nfs11/znlin/znlin_nfs11/osum-math/code/merge/omni/20250624_qwenomni_bench_data.list.re"
output_dir = f"{output_root_dir}/qwenomni_bench_data"
utils_file.makedir_sil(output_dir)
data_list_path = f"{output_dir}/data.list"
convert_dict_list_to_format_dict_list_for_t2t(data_path, data_list_path)
shard_dir = f"{output_dir}/shards"
_main_make_shards(data_list_path, shard_dir)

# voicebench data
data_path = "/home/work_nfs11/znlin/znlin_nfs11/osum-math/code/merge/all.jsonl.merge/all.jsonl.merge_data.list.re"
output_dir = f"{output_root_dir}/voicebench_data"
utils_file.makedir_sil(output_dir)
data_list_path = f"{output_dir}/data.list"
convert_dict_list_to_format_dict_list_for_t2t(data_path, data_list_path)
shard_dir = f"{output_dir}/shards"
_main_make_shards(data_list_path, shard_dir)



