import codecs
import json
import os
import re

import tqdm
from gxl_ai_utils.utils import utils_file


def data_prepare():
    """"""
    asr_train_data = "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_salmonn4ft_LLM/examples/aishell/ft_LLM/data_list/all.text"
    dict_data = utils_file.load_dict_from_scp(asr_train_data)
    list_data = list(dict_data.values())
    utils_file.write_list_to_file(list_data, "./data/asr_train_data.list")


def data_prepare2():
    """"""
    pachong_dir = "/home/node36_data/xlgeng/asr_data_from_pachong/gxl_output/"
    dataname_list = os.listdir(pachong_dir)
    file_names = ['text', 'all_2.text']
    res_list = []
    for dataname in dataname_list:
        temp_dir = os.path.join(pachong_dir, dataname)
        for file_name in file_names:
            if not os.path.exists(os.path.join(temp_dir, file_name)):
                continue
            dict_data = utils_file.load_dict_from_scp(os.path.join(temp_dir, file_name))
            res_list.extend(list(dict_data.values()))
            continue
    utils_file.write_list_to_file(res_list, "./data/asr_pachong_data.list")


def do_filter(text, only_zn=False, only_en=False, only_num=False):
    """使用正则表达式去除标点符号，只保留汉字、英文和数字"""
    if only_zn:
        return re.sub(r'[^\u4e00-\u9fa5]', '', text)
    if only_en:
        return re.sub(r'[^a-zA-Z]', '', text)
    if only_num:
        return re.sub(r'[^0-9]', '', text)
    return re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9]', '', text)


def get_all_files_in_directory(directory):
    file_paths = []
    # 使用 os.walk() 函数递归地遍历目录下的所有文件和子目录
    for root, dirs, files in os.walk(directory):
        for file in files:
            # 将文件路径添加到列表中
            file_path = os.path.join(root, file)
            file_paths.append(file_path)
    return file_paths


def handle_nlp_data():
    """"""
    # 微博
    input_data_path = "./data/weibo.txt"
    res_list = []
    target_list = utils_file.load_list_file_clean(input_data_path)
    for target in tqdm.tqdm(target_list, total=len(target_list)):
        temp_target = do_filter(target, only_zn=True)
        if len(temp_target) == 0:
            continue
        temp_target = temp_target.strip()
        temp_target = temp_target.replace(' ', '')
        temp_target = temp_target.replace('\n', '')
        temp_target = temp_target.replace('回复', '')
        temp_target = temp_target.replace('转发', '')
        temp_target = temp_target.replace('\t', '')
        temp_target = temp_target.replace('微博', '')
        temp_target = temp_target.replace('客户端','')
        res_list.append(temp_target)
    utils_file.write_list_to_file(res_list, "./data/weibo_1.list")


def handle_nlp_data_2():
    # 维基百科
    input_data_dir = "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/wiki_zh"
    all_file_path_list = get_all_files_in_directory(input_data_dir)
    res_list = []
    for file_path in tqdm.tqdm(all_file_path_list, total=len(all_file_path_list)):
        try:
            dict_list = utils_file.load_dict_list_from_jsonl(file_path)
            for dict_data in dict_list:
                temp_txt = dict_data['text']
                temp_txt = do_filter(temp_txt, only_zn=True)
                if len(temp_txt) == 0:
                    continue
                res_list.append(temp_txt)
        except Exception as e:
            print(e)
            continue
    utils_file.write_list_to_file(res_list, "./data/wiki_zh_1.list")

def handle_nlp_data_3():
    # 百度问答
    input_data_path = "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/baike_qa_train.json"
    with codecs.open(input_data_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        lines_res = []
        for line in lines:
            try:
                line = json.loads(line)
                lines_res.append(line)
            except Exception as e:
                print(e)
                continue
    res_list = []
    for dict_data in tqdm.tqdm(lines_res, total=len(lines_res)):
        title = dict_data['title']
        answer = dict_data['answer']
        temp_txt = do_filter(title, only_zn=True)
        if not len(temp_txt) == 0:
            res_list.append(temp_txt)
        temp_txt = do_filter(answer, only_zn=True)
        if not len(temp_txt) == 0:
            res_list.append(temp_txt)
    utils_file.write_list_to_file(res_list, "./data/baike_wenda_1.list")

def handle_nlp_data_4():
    # 出门问问
    output_path = "./data/chumenwenwen_1.list"
    input_data_path = "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/mobvoi_seq_monkey_general_open_corpus.jsonl"
    utils_file.logging_print('开始统计行数')
    with codecs.open(input_data_path, 'r', encoding='utf-8') as f:
        total_num = sum(1 for _ in f)
    utils_file.logging_print("total_num: {}".format(total_num))
    with codecs.open(output_path, 'a', encoding='utf-8') as f_output:
        with codecs.open(input_data_path, 'r', encoding='utf-8') as f:
            for line in tqdm.tqdm(f, total=total_num):
                try:
                    line_dict = json.loads(line)
                    title = line_dict['text']
                    temp_txt = do_filter(title, only_zn=True)
                    if not len(temp_txt) == 0:
                        f_output.write(temp_txt + '\n')
                except Exception as e:
                    print(e)
                    continue



def cat_all():
    """"""
    path_list = [
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/asr_pachong_data.list",
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/asr_train_data.list",
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/baike_wenda_1.list",
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/weibo_1.list",
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/wiki_zh_1.list",
        "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_aishell/examples/aishell/s0/LM_training/data/chumenwenwen_1.list",
    ]
    output_path = "./data/all_data.list"
    with codecs.open(output_path, 'a', encoding='utf-8') as f:
        for path in path_list:
            utils_file.logging_print(f'cat {path} to {output_path}')
            with codecs.open(path, 'r', encoding='utf-8') as f_temp:
                total_num = sum(1 for _ in f_temp)
            with codecs.open(path, 'r', encoding='utf-8') as f_temp:
                for line in tqdm.tqdm(f_temp, total=total_num):
                    f.write(line)



if __name__ == "__main__":
    cat_all()