import pyarrow.parquet as pq
from gxl_ai_utils.utils import utils_file
# url = "/apdcephfs_zwfy/share_303841515/Tealab/data/translation/speech-to-text/03/parquet_000000054.tar"
# pf = pq.ParquetFile(url)
# cols = pf.schema.names
# print(cols)                # ['utt', 'user_text', 'user_wav', 'user_audio_data', 'user_audio_data_lang', 'assist_text', 'assist_audio_data_lang']
# print(pf.metadata.num_rows, pf.metadata.num_row_groups)

# map_dict = utils_file.load_dict_from_scp("/apdcephfs_cq8/share_2906397/users/adinehuang/shaoguan/bag_translate_data/speech-to-text/juanjuan/output.mapping")
#
# true_text_20W_cn = utils_file.load_dict_from_scp("/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/zh_punc_true_text", silence=True)
# true_text_20W_en = utils_file.load_dict_from_scp("/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/en_punc_true_text", silence=True)
#
# new_cn_en_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/zh_en_punc_true_text_new_key"
# # new_en_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/en_punc_true_text_new_key"
# new_cn_en_dict = {}
# false_num = 0
# for key, value in utils_file.tqdm(map_dict.items(), total=len(map_dict), desc="tmp"):
#     if value in true_text_20W_cn:
#         new_cn_en_dict[key] = true_text_20W_cn[value]
#     elif value in true_text_20W_en:
#         new_cn_en_dict[key] = true_text_20W_en[value]
#     else:
#         false_num += 1
# print(f"false_num: {false_num}")
# utils_file.write_dict_to_scp(new_cn_en_dict, new_cn_en_path)


# new_txt_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/zh_en_punc_true_text_new_key"
# new_txt_dict = utils_file.load_dict_from_scp(new_txt_path)
#
# for df in pq.ParquetFile(url).iter_batches(batch_size=32):
#     df = df.to_pandas()
#     sample = {}
#     for i in range(len(df)):
#         sample.update(dict(df.loc[i]))
#         # NOTE do not return sample directly, must initialize a new dict
#         res_dict = {
#             **sample
#         }
#         # print(f'parquet opener: res_dict: {res_dict}')
#         # assert 'text' in res_dict, f'text not in res_dict'
#         assert "audio_data" in res_dict or "user_audio_data" in res_dict, f'audio_data not in res_dict'
#         print(f"res_dict keys: {res_dict.keys()}")
#
#         if 'utt' in res_dict and res_dict['utt'] in new_txt_dict:
#             print(f"found utt in true_text_20W_cn: {res_dict['utt']}, text: {new_txt_dict[res_dict['utt']]}")
#         elif 'utt' in res_dict and res_dict['utt']:
#             print(f"utt not found in true_text_20W_cn/en: {res_dict['utt']}")
#
# valid_new_txt_dict = {}
# valid_new_txt_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/zh_en_punc_true_text_new_key_valid"
# tar_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/data.list.train"
# tar_list = utils_file.load_list_file_clean(tar_list_path)
# false_num = 0
#
# def little_func(input_tar_list_little, new_txt_dict):
#     valid_new_txt_dict_little = {}
#     false_num = 0
#     true_num = 0
#     for url in utils_file.tqdm(input_tar_list_little):
#         for df in pq.ParquetFile(url).iter_batches(batch_size=32):
#             df = df.to_pandas()
#             for i in range(len(df)):
#                 res_dict = dict(df.loc[i])
#                 utt_key = res_dict['utt']
#                 if utt_key in new_txt_dict:
#                     valid_new_txt_dict_little[utt_key] = new_txt_dict[utt_key]
#                     true_num += 1
#                 else:
#                     false_num += 1
#             print(f"true_num: {true_num}, false_num: {false_num}")
#
#     return [valid_new_txt_dict_little, false_num]
#
#
# runner = utils_file.GXLMultiprocessingWithReturn(num_processes=150)
# result_list = runner.run(little_func, tar_list, new_txt_dict=new_txt_dict)
# for r in result_list:
#     little_valid_dict = r[0]
#     little_false_num = r[1]
#     valid_new_txt_dict.update(little_valid_dict)
#     false_num += little_false_num
#
# print(f"false_num: {false_num}")
# utils_file.write_dict_to_scp(valid_new_txt_dict, valid_new_txt_path)

from wenet.utils.common import remove_punctuation_keep_quote

# root_dir = "/apdcephfs_qy3/share_976139/data/asr/train/zh/train_20240825_ver12_80kh_org_fbankhires"
# wav_scp_path = f"{root_dir}/wav.scp"
# text_scp_path = f"{root_dir}/text"

output_dir = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq"
text_dict = utils_file.load_dict_from_scp(f'{output_dir}/text.scp')
new_text_dict = {}
for k, v in text_dict.items():
    new_text_dict[k] = remove_punctuation_keep_quote(v)

utils_file.write_dict_to_scp(new_text_dict, f"{output_dir}/text.scp")
utils_file.do_convert_wav_text_scp_to_jsonl(f"{output_dir}/wav.scp", f"{output_dir}/text.scp", f"{output_dir}/data.jsonl")

