import html
import random
import re
import time
import uuid

from gxl_ai_utils.utils import utils_file
from tqdm import tqdm

kejilou_data_list = [
    "/home/work_nfs11/mcshao/workspace/voicebench/VoiceBench/text/znlin_tn/qwen2-noFunc.jsonl",
    "/home/work_nfs11/mcshao/workspace/voicebench/VoiceBench/text/znlin_tn/qwen2-normal.jsonl",
    "/home/work_nfs11/znlin/znlin_nfs11/osum-math/data/gsm8k/0603_gsm8k.jsonl",
    "/home/work_nfs11/znlin/znlin_nfs11/osum-math/data/ape210k/train.ape.json",
    "/home/work_nfs11/znlin/znlin_nfs11/osum-math/code/MathGen/tn_gen_normalized.jsonl",

]
jifang_data_list = [
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_kouyu/qa_vllm.jsonl",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_kouyu2/qa_vllm.jsonl",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/qa_vllm.jsonl",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_stage2/qa_vllm.jsonl",
    "/home/work_nfs14/znlin/osum-math/code/Math23k/math23k_train_normal.json",
    "/home/work_nfs14/znlin/osum-math/code/Math23k/math23k_train_choice.json",
    "/home/work_nfs14/znlin/osum-math/code/Big-Math-RL-Verified/bigMath_train_normal.jsonl",



]

duolun_data_list = [
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_duolun_stage1/qa_vllm.jsonl",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_duolun_stage2/qa_vllm.jsonl",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_duolun_stage3/qa_vllm.jsonl.filtered",
    "/home/work_nfs14/mcshao/workspace/osum_data/data/qa_text/data_duolun_stage4/qa_vllm.jsonl.filtered",
]

jifang_all_path = "/home/work_nfs14/asr_data/data/text2text_data_xlgeng/kouyu_t2t_data/data.list"

def clean_text(text: str) -> str:
    """
    对输入字符串进行深度清洗，返回仅含可阅读文字的“纯净”文本。
    主要步骤：
      1. HTML 实体 & 标签处理
      2. URL、邮箱、IP 等移除
      3. Emoji／表情符号移除
      4. 各类特殊符号 & 控制字符移除
      5. 多余空白归一化

    :param text: 原始网络文本
    :return: 清洗后纯净文本
    """
    # 1. HTML 解码（将 &amp; &lt; 等转回字符）并去除标签
    text = text.replace('<user>', 'user')
    text = text.replace("<assistant>", "assistant")
    text = html.unescape(text)
    text = re.sub(r'<[^>]+>', ' ', text)

    # 2. 移除 URL、邮箱、IP
    #   - URL: http(s)://... 或 www.xxx 或 xxx.com/xxx
    text = re.sub(
        r'(https?://|www\.)\S+|'
        r'\b[\w\.-]+@[\w\.-]+\.\w{2,}\b|'
        r'\b(?:\d{1,3}\.){3}\d{1,3}\b',
        ' ',
        text
    )

    # 3. 移除 Emoji & 各类表情符号（常见 Unicode 范围）
    emoji_pattern = re.compile(
        "["
        "\U0001F300-\U0001F5FF"  # 符号 & 图标
        "\U0001F600-\U0001F64F"  # 表情
        "\U0001F680-\U0001F6FF"  # 运输 & 地点
        "\U0001F700-\U0001F77F"  # 宗教 & 哥特
        "\U0001F780-\U0001F7FF"  # 几何图形
        "\U0001F800-\U0001F8FF"  # 补充箭头-C
        "\U0001F900-\U0001F9FF"  # 补充符号 & 表情
        "\U0001FA00-\U0001FA6F"  # 象形
        "\U00002702-\U000027B0"  # 常用符号
        # "\U000024C2-\U0001F251" # 中文
        "]+",
        flags=re.UNICODE
    )
    text = emoji_pattern.sub(' ', text)

    # 4. 移除各种特殊符号、控制字符等
    #    保留中英文、数字及部分标点（可根据需求调整）
    text = re.sub(r'[\r\n\t]+', ' ', text)   # 控制字符
    # text = re.sub(r'[#\$%\^&\*\(\)\[\]\{\}<>\|\\/~`"“”‘’]+', ' ', text)
    text = re.sub(r'[#\$%\^&\(\)\[\]\{\}<>\|\\~`"“”‘’]+', ' ', text)
    text = re.sub(r'—|–|…|·|•', ' ', text)
    # 可选：如果要移除所有标点，可开启下一行
    # text = re.sub(r'[^\w\s\u4e00-\u9fa5]', ' ', text)

    # 5. 多余空白归一化
    text = re.sub(r'\s+', ' ', text).strip()

    return text


q_key = "question"
a_key = "answer"
q_ke_2 = "q"
a_ke_2 = "a"
def convert_dict_list_to_format_dict_list_for_t2t(dict_list):
    task = "<TEXT2TEXT>"
    lang = "<CN>"
    speaker = "<NONE>"
    emotion = "NEUTRAL"
    style = "<NONE>"
    gender = "<NONE>"
    age = "<NONE>"
    caption = "<NONE>"
    extra_info = {'duration':-1,}
    output_dict_list = []
    for dict_item in tqdm(dict_list, desc='Converting dict list to format dict list for t2t',total=len(dict_list) ):
        # 在后面加上长时间戳和随机数，防止key重复
        key = dict_item.get('key', "") + "_" + str(int(time.time())) + "_" + str(uuid.uuid4().hex)
        # key = dict_item['key']
        if q_key in dict_item and a_key in dict_item:
            q = dict_item[q_key]
            a = dict_item[a_key]
        elif q_ke_2 in dict_item and a_ke_2 in dict_item:
            q = dict_item[q_ke_2]
            a = dict_item[a_ke_2]
        else:
            print(f"Skip item with no question or answer: {dict_item}")
            continue
        q = clean_text(q)
        a = clean_text(a)
        extra_info_item = {'duration':-1, 'question':q}
        txt = a
        if len(txt) < 1 or len(q)< 1:
            print(f"Skip item with empty text or question: {dict_item}")
            continue
        dict_output_item = {
            'task': task,
            'key': key,
            'lang': lang,
           'speaker': speaker,
            'emotion': emotion,
           'style': style,
            'gender': gender,
            'age': age,
            'caption': caption,
            'txt': txt,
            'extra': extra_info_item
        }
        output_dict_list.append(dict_output_item)
    return output_dict_list

#
# jifang_all_path = "/home/work_nfs14/asr_data/data/text2text_data_xlgeng/kouyu_t2t_data/data.list"
# all_list = []
# for tmp_path in jifang_data_list:
#     print(f"Loading {tmp_path}")
#     dict_list = utils_file.load_dict_list_from_jsonl(tmp_path)
#     new_dict_list = convert_dict_list_to_format_dict_list_for_t2t(dict_list)
#     all_list.extend(new_dict_list)
# utils_file.write_dict_list_to_jsonl(all_list, jifang_all_path)


# kejilou_all_path = "/home/work_nfs11/asr_data/data/text2text_data_xlgeng/kouyu_t2t_data/data.list"
shard_dir = "/home/work_nfs14/asr_data/data/text2text_data_xlgeng/kouyu_t2t_data/shard"
data_list_path = "/home/work_nfs14/asr_data/data/text2text_data_xlgeng/kouyu_t2t_data/data.list"



