import os

from gxl_ai_utils.utils import utils_file

def do_work1():
    root_dir = "/home/A02_tmpdata2/data/context_asr_sentence_full"
    utils_file.makedir(root_dir)

    json_file = "/home/A02_tmpdata3/code/dragon_ai/context_asr_sentence_full.jsonl"
    utils_file.copy_file2(json_file, root_dir)

    error_file = os.path.join(root_dir, "error_file.jsonl")

    dict_list = utils_file.load_dict_list_from_jsonl(json_file)

    error_list = []
    for dict_i in utils_file.tqdm(dict_list):
        key = dict_i['key']
        wav_path = f"/home/A02_tmpdata2/data/context_asr_sentence_full/wav_random_yinse/{key}.wav"
        if not os.path.exists(wav_path):
            print(f"wav file {wav_path} does not exist")
            error_list.append(dict_i)
        if utils_file.get_file_size(wav_path) < 0.0000001:
            # print(f"wav file {wav_path} is empty")
            error_list.append(dict_i)

    utils_file.write_dict_list_to_jsonl(error_list, error_file)


def do_work2():
    root_dir = "/home/A02_tmpdata2/data/context_asr_sentence_full"
    all_file = os.path.join(root_dir, "context_asr_sentence_full.jsonl")
    vaild_file = os.path.join(root_dir, "vaild_file.jsonl")
    error_file = os.path.join(root_dir, "error_file.jsonl")
    dict_list = utils_file.load_dict_list_from_jsonl(all_file)
    error_list = utils_file.load_dict_list_from_jsonl(error_file)
    error_key_set = set([dict_i['key'] for dict_i in error_list])
    vaild_list = []
    for dict_i in utils_file.tqdm(dict_list):
        if dict_i['key'] not in error_key_set:
            vaild_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(vaild_list, vaild_file)

def do_work3():
    valid_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/vaild_file.jsonl"
    dict_list = utils_file.load_dict_list_from_jsonl(valid_file)
    new_list = []
    new_valid_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_short_think.jsonl"
    for dict_i in dict_list:
        think_str = dict_i['extra']['think_str']
        new_think_str = think_str.split("。")[0] + "。"
        dict_i['extra']['think_str'] = new_think_str
        new_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(new_list, new_valid_file)

import string

def remove_punctuation(input_str):
    """
    高效去除输入字符串中的所有标点符号

    参数:
        input_str (str): 包含标点符号的输入字符串

    返回:
        str: 去除所有标点符号后的字符串
    """
    # 创建标点符号到空字符的映射表
    translator = str.maketrans('', '', string.punctuation)
    # 使用映射表转换字符串，去除所有标点
    return input_str.translate(translator)

def do_work4():
    short_think_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_short_think.jsonl"
    medium_think_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_medium_think.jsonl"
    full_think_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file.jsonl"

    # 得到medium 内容
    short_dict_list = utils_file.load_dict_list_from_jsonl(short_think_file)
    medium_dict_list = []
    for dict_i in utils_file.tqdm(short_dict_list):
        think_str = dict_i['extra']['think_str']
        words_list = dict_i['extra']['words']
        words_str = "、".join(words_list)
        new_think_str = f'{think_str}句子中含有的一些易错词，经过语音场景分析，应当采用如下词汇：{words_list}。'
        dict_i['extra']['think_str'] = new_think_str
        medium_dict_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(medium_dict_list, medium_think_file)

    # 消除标点符号
    # full
    full_dict_list = utils_file.load_dict_list_from_jsonl(full_think_file)
    output_full_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_no_punctuation.jsonl"
    new_full_dict_list = []
    for dict_i in utils_file.tqdm(full_dict_list):
        new_txt = remove_punctuation(dict_i['txt'])
        dict_i['txt'] = new_txt
        new_full_dict_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(new_full_dict_list, output_full_file)

    # medium
    # medium_dict_list = utils_file.load_dict_list_from_jsonl(medium_think_file)
    output_medium_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_medium_no_punctuation.jsonl"
    new_medium_dict_list = []
    for dict_i in utils_file.tqdm(medium_dict_list):
        new_txt = remove_punctuation(dict_i['txt'])
        dict_i['txt'] = new_txt
        new_medium_dict_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(new_medium_dict_list, output_medium_file)

    # short
    # short_dict_list = utils_file.load_dict_list_from_jsonl(short_think_file)
    output_short_file = "/home/A02_tmpdata2/data/context_asr_sentence_full/valid_file_short_no_punctuation.jsonl"
    new_short_dict_list = []
    for dict_i in utils_file.tqdm(short_dict_list):
        new_txt = remove_punctuation(dict_i['txt'])
        dict_i['txt'] = new_txt
        new_short_dict_list.append(dict_i)
    utils_file.write_dict_list_to_jsonl(new_short_dict_list, output_short_file)





do_work4()
