import json
import uuid

import tqdm

from gxl_ai_utils.utils import utils_file
import re
import html

def clean_text(text: str) -> str:
    """
    对输入字符串进行深度清洗，返回仅含可阅读文字的“纯净”文本。
    主要步骤：
      1. HTML 实体 & 标签处理
      2. URL、邮箱、IP 等移除
      3. Emoji／表情符号移除
      4. 各类特殊符号 & 控制字符移除
      5. 多余空白归一化

    :param text: 原始网络文本
    :return: 清洗后纯净文本
    """
    # 1. HTML 解码（将 &amp; &lt; 等转回字符）并去除标签
    text = text.replace('<user>', 'user')
    text = text.replace("<assistant>", "assistant")
    text = html.unescape(text)
    text = re.sub(r'<[^>]+>', ' ', text)

    # 2. 移除 URL、邮箱、IP
    #   - URL: http(s)://... 或 www.xxx 或 xxx.com/xxx
    text = re.sub(
        r'(https?://|www\.)\S+|'
        r'\b[\w\.-]+@[\w\.-]+\.\w{2,}\b|'
        r'\b(?:\d{1,3}\.){3}\d{1,3}\b',
        ' ',
        text
    )

    # 3. 移除 Emoji & 各类表情符号（常见 Unicode 范围）
    emoji_pattern = re.compile(
        "["
        "\U0001F300-\U0001F5FF"  # 符号 & 图标
        "\U0001F600-\U0001F64F"  # 表情
        "\U0001F680-\U0001F6FF"  # 运输 & 地点
        "\U0001F700-\U0001F77F"  # 宗教 & 哥特
        "\U0001F780-\U0001F7FF"  # 几何图形
        "\U0001F800-\U0001F8FF"  # 补充箭头-C
        "\U0001F900-\U0001F9FF"  # 补充符号 & 表情
        "\U0001FA00-\U0001FA6F"  # 象形
        "\U00002702-\U000027B0"  # 常用符号
        # "\U000024C2-\U0001F251" # 中文
        "]+",
        flags=re.UNICODE
    )
    text = emoji_pattern.sub(' ', text)

    # 4. 移除各种特殊符号、控制字符等
    #    保留中英文、数字及部分标点（可根据需求调整）
    text = re.sub(r'[\r\n\t]+', ' ', text)   # 控制字符
    text = re.sub(r'[#\$%\^&\*\(\)\[\]\{\}<>\|\\/~`"“”‘’]+', ' ', text)
    text = re.sub(r'—|–|…|·|•', ' ', text)
    # 可选：如果要移除所有标点，可开启下一行
    # text = re.sub(r'[^\w\s\u4e00-\u9fa5]', ' ', text)

    # 5. 多余空白归一化
    text = re.sub(r'\s+', ' ', text).strip()

    return text

def convert_str_to_list(text: str) -> list:
    """"""
    qa_pairs = []
    i = 0
    round_id = 1
    the_key = str(uuid.uuid4().int)

    while i < len(text):
        # 找到下一段 user 开头
        user_start = text.find("user", i)
        if user_start == -1:
            break
        assistant_start = text.find("assistant", user_start)
        if assistant_start == -1:
            break
        next_user_start = text.find("user", assistant_start + 1)

        # 提取内容
        user_text = text[user_start + len("user"):assistant_start].strip()
        if next_user_start == -1:
            assistant_text = text[assistant_start + len("assistant"):].strip()
            i = len(text)  # 完成
        else:
            assistant_text = text[assistant_start + len("assistant"):next_user_start].strip()
            i = next_user_start  # 移动到下一个轮次

        # 添加一轮问答对
        qa_pairs.append({
            "key": f"{the_key}_{round_id}",
            "q": user_text,
            "a": assistant_text
        })
        round_id += 1
    return qa_pairs


# 示例
if __name__ == "__main__":
    raw = 'Hello 😊! Visit https://example.com or mail me at foo@bar.com.\n这是一段测试文本，含有#特殊$符号…'
    print(clean_text(raw))
    # 输出: "Hello Visit or mail me at 这是一段测试文本 含有 特殊 符号"
    input_raw_path = "/home/work_nfs9/asr_data/data/osum_data/text2text/Mutonix/RefGPT-Fact/data/zh_dataset.jsonl"
    dict_list = utils_file.load_dict_list_from_jsonl(input_raw_path)
    dialogue_list = []
    for dict_i in tqdm.tqdm(dict_list, total=len(dict_list), desc="提取对话，并进行清洗"):
        dialogue_list.append(clean_text(dict_i["dialogue"]))
    output_dialogue_path = "/home/work_nfs9/asr_data/data/osum_data/text2text/Mutonix/RefGPT-Fact/data/zh_dataset_gxl_clean.list"
    utils_file.write_list_to_file(dialogue_list, output_dialogue_path)
    res_list = []
    for dialogue_i in tqdm.tqdm(dialogue_list, total=len(dialogue_list), desc="生成 QA 对"):
        res_list.append(convert_str_to_list(dialogue_i))
    output_qa_path = "/home/work_nfs9/asr_data/data/osum_data/text2text/Mutonix/RefGPT-Fact/data/zh_dataset_gxl_clean_qa.json"
    # 将一个list写入json文件中
    with open(output_qa_path, 'w', encoding='utf-8') as f:
        json.dump(res_list, f, ensure_ascii=False, indent=4)








# sharegpt
# input_jsonl_paths = utils_file.do_get_list_for_wav_dir("/home/work_nfs11/asr_data/data/text2text_data_xlgeng/shareAI/ShareGPT-Chinese-English-90k/sharegpt_jsonl", suffix=".jsonl")
#
# def filter_list_delete_if_contain_str(input_list, str_to_filter):
#     return list(filter(lambda x: str_to_filter not in x, input_list))
#
# input_jsonl_paths = filter_list_delete_if_contain_str(input_jsonl_paths, "fix")
# utils_file.print_list(input_jsonl_paths)
#
# dict_list_all = []
# for input_jsonl_path in input_jsonl_paths:
#     dict_list = utils_file.load_dict_list_from_jsonl(input_jsonl_path)
#     dict_list_all.extend(dict_list)
#
# output_all_path = "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/shareAI/ShareGPT-Chinese-English-90k/sharegpt_jsonl/gxl_all_data.jsonl"
# # utils_file.write_dict_list_to_jsonl(dict_list_all, output_all_path)
# dict_list = utils_file.load_dict_list_from_jsonl(output_all_path)
# new_dict_list = []
# for dict_i in tqdm.tqdm(dict_list, total=len(dict_list)):
#     conversition_list = dict_i["conversation"]
#     for conver_dict_i in conversition_list:
#         human_text = conver_dict_i["human"]
#         assistant_text = conver_dict_i["assistant"]
#         new_human_text = clean_text(human_text)
#         new_assistant_text = clean_text(assistant_text)
#         conver_dict_i["human"] = new_human_text
#         conver_dict_i["assistant"] = new_assistant_text
#     new_dict_list.append(dict_i)
# output_all_path_filter = "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/shareAI/ShareGPT-Chinese-English-90k/sharegpt_jsonl/gxl_all_data_filter.jsonl"
# utils_file.write_dict_list_to_jsonl(new_dict_list, output_all_path_filter)



# Mutonix/RefGPT-Fact
# from datasets import load_dataset
#
# # 加载 parquet 文件
# ds_dict = load_dataset(
#     "parquet",
#     data_files={
#         "en": "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/Mutonix/RefGPT-Fact/data/en-00000-of-00001-cd3c0de9c6d1d508.parquet",
#         "zh": "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/Mutonix/RefGPT-Fact/data/zh-00000-of-00001-1da7374906626d9c.parquet",
#     }
# )
#
# # 保存成 jsonl 格式
# ds_dict["en"].to_json("/home/work_nfs11/asr_data/data/text2text_data_xlgeng/Mutonix/RefGPT-Fact/en_data.jsonl", lines=True)
# ds_dict["zh"].to_json("/home/work_nfs11/asr_data/data/text2text_data_xlgeng/Mutonix/RefGPT-Fact/zh_data.jsonl", lines=True)
