import re
import uuid


from gxl_ai_utils.utils import utils_file
from data_hanle import _main_get_data_list, _main_make_shards

root_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat"

EMO_MAP = {
    "生气": "ANGER",
    "害怕": "FEAR",
    "高兴": "HAPPY",
    "惊讶": "SURPRISE",
    "伤心": "SAD",
    "厌恶": "DISGUST",
    "疑惑": "CONFUSED",
    "嘲讽": "SARCASM",
    "尴尬": "EMBARRASSED",
    "好奇": "CURIOUS",
    "担忧": "WORRIED",
    "关心": "WORRIED",
    "害羞": "SHY",
    "抱歉": "SORRY",
    "中性": "NEUTRAL"
}

def handle_chat_for_think_template(input_data_list):
    """
    处理think模板数据
    Args:
        input_data_list:

    Returns:

    """
    new_dict_list = []
    for dict_i in utils_file.tqdm(input_data_list, desc="handle_chat_for_think_template", total=len(input_data_list)):
        key = dict_i["key"]
        question = dict_i["question"]
        answer = dict_i["answer"]
        age = dict_i.get("age", "<ADULT>")
        sex = dict_i.get("sex", "<MALE>")
        caption = dict_i.get("caption", "<OTHER>")
        emotion = dict_i.get("emotion", "<NEUTRAL>")
        style = dict_i.get("style", "<日常口语>")
        answer_emotion = dict_i.get("answer_emotion", "<NEUTRAL>")
        question_new = f"<think>用户说的话是:{question},年龄为:{age},性别为:{sex},风格为:{style},情感为:{emotion},声音事件为:{caption},推测使用的回复情感为:{answer_emotion},我应该综合用户的语义和副语言信息给出专业且对应的回答<think end>"
        if not answer.startswith("<"):
            answer = answer_emotion + answer
        new_dict_i = {
            "key": key,
            "q": question_new,
            "a": answer
        }
        # print(new_dict_i)
        new_dict_list.append(new_dict_i)
    return new_dict_list


def do_get_labels_from_str(input_str):
    """
    从字符串中提取标签列表,并返回删除标签后的内容
    >>> input: "<好奇>小朋友，那些座位可能在等<特别勇敢>的小朋友去发现它们呢！"
    >>> output: ['好奇', '特别勇敢'], '小朋友，那些座位可能在等的小朋友去发现它们呢！'
    :param input_str:
    :return:
    """
    # 提取尖括号内的内容
    tag_content = re.findall(r"<(.*?)>", input_str)
    # 移除尖括号
    clean_sentence = re.sub(r"<.*?>", "", input_str )
    return tag_content, clean_sentence

# 年龄
row_path = "/home/work_nfs11/gjli/workspaces/labeling_glm-4/age_sex_yinshi_0615/age_7.7/age_gen_q_deepseek_7-7_a.txt"
# lines_list = utils_file.load_list_file_clean(row_path)
# new_dict_list = []
# for line_i in lines_list:
#     items = line_i.split("|")
#     key,_ ,age_label = items[0].strip().split("_")
#     new_key = f"{key}_{str(uuid.uuid4())}"
#     new_age_label = "<" + age_label + ">"
#     quetion_str = items[1].strip()
#     answer_str = items[2].strip()
#     answer_emotion_label, answer_clean_sentence = do_get_labels_from_str(answer_str)
#     answer_emotion_label_str = "<" + EMO_MAP.get(answer_emotion_label[0], "NEUTRAL") + ">"
#     new_answer_str = answer_emotion_label_str+answer_clean_sentence
#     new_dict_i = {
#         "key": new_key,
#         "question": quetion_str,
#         "answer": new_answer_str,
#         "age": new_age_label,
#         'answer_emotion': answer_emotion_label_str
#     }
#     # print(new_dict_i)
#     new_dict_list.append(new_dict_i)
#
# total_dict_list = handle_chat_for_think_template(new_dict_list)
# output_path = f'{root_dir}/age_chat/age_sex_chat_row.jsonl'
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)

# 性别
row_path = "/home/work_nfs11/gjli/workspaces/labeling_glm-4/age_sex_yinshi_0615/sex_7.7/sex_gen_q_deepseek_7-7_a.txt"
# lines_list = utils_file.load_list_file_clean(row_path)
# new_dict_list = []
# for line_i in lines_list:
#     items = line_i.split("|")
#     keys = items[0].strip().split("_")
#     key = keys[0]
#     age_label = keys[-1]
#     new_key = f"{key}_{str(uuid.uuid4())}"
#     new_age_label = "<" + age_label + ">"
#     quetion_str = items[1].strip()
#     answer_str = items[2].strip()
#     answer_emotion_label, answer_clean_sentence = do_get_labels_from_str(answer_str)
#     answer_emotion_label_str = "<" + EMO_MAP.get(answer_emotion_label[0], "NEUTRAL") + ">"
#     new_answer_str = answer_emotion_label_str+answer_clean_sentence
#     new_dict_i = {
#         "key": new_key,
#         "question": quetion_str,
#         "answer": new_answer_str,
#         "sex": new_age_label,
#         'answer_emotion': answer_emotion_label_str
#     }
#     # print(new_dict_i)
#     new_dict_list.append(new_dict_i)
#
# total_dict_list = handle_chat_for_think_template(new_dict_list)
# output_path = f'{root_dir}/sex_chat/age_sex_chat_row.jsonl'
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)


# 情感
# row_path = "/home/work_nfs16/cywang/workspace/OSUM/emotion_qa/emotion_qa_yinshi.jsonl"
# row_dict_list = utils_file.load_dict_list_from_jsonl(row_path)
# new_dict_list = []
# for row_dict in utils_file.tqdm(row_dict_list, desc="handle_chat_for_think_template", total=len(row_dict_list)):
#     key = row_dict["key"]
#     question = row_dict["question"]
#     answer = row_dict["answer"]
#     emotion = row_dict["question_emotion_label"]
#     answer_emotion = row_dict["answer_emotion_label"]
#     new_key = f"{key}_{str(uuid.uuid4())}"
#     dict_i = {
#         "key": new_key,
#         "question": question,
#         "answer": answer,
#         "emotion": emotion,
#         "answer_emotion": answer_emotion
#     }
#     new_dict_list.append(dict_i)
#
# total_dict_list =  handle_chat_for_think_template(new_dict_list)
# output_path = f'{root_dir}/emotion_chat/emotion_chat_row.jsonl'
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)



# 事件
row_path = "/home/work_nfs9/yacao/0707_add_implicit_T2T/result/caption_10w_implicit.jsonl"
# row_dict_list = utils_file.load_dict_list_from_jsonl(row_path)
# new_dict_list = []
# for row_dict in utils_file.tqdm(row_dict_list, desc="handle_chat_for_think_template", total=len(row_dict_list)):
#     key = str(row_dict["key"]) + "_"+str(uuid.uuid4())
#     question = row_dict['extra']["question"]
#     answer = row_dict["a_2"]
#     # emotion = row_dict["question_emotion_label"]
#     # answer_emotion = row_dict["answer_emotion_label"]
#     new_key = f"{key}_{str(uuid.uuid4())}"
#     caption_label = row_dict["caption"]
#     dict_i = {
#         "key": new_key,
#         "question": question,
#         "answer": answer,
#         # "emotion": emotion,
#         # "answer_emotion": answer_emotion,
#         "caption": caption_label,
#     }
#     new_dict_list.append(dict_i)
#
# total_dict_list =  handle_chat_for_think_template(new_dict_list)
# output_path = f'{root_dir}/caption_chat/caption_chat_row.jsonl'
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)


# 情感显示
# row_path = "/home/work_nfs16/cywang/workspace/OSUM/emotion_qa/emotion_qa_xianshi.jsonl"
# row_dict_list = utils_file.load_dict_list_from_jsonl(row_path)
# new_dict_list = []
# for row_dict in utils_file.tqdm(row_dict_list, desc="handle_chat_for_think_template", total=len(row_dict_list)):
#     key = "_"+str(uuid.uuid4())
#     question = row_dict["question"]
#     answer = row_dict["answer"]
#     # caption_label = row_dict["caption"]
#     answer_emotion = do_get_labels_from_str(answer)[0][0]
#     answer_emotion = "<" + answer_emotion + ">"
#     dict_i = {
#         "key": key,
#         "question": question,
#         "answer": answer,
#         # "emotion": emotion,
#         "answer_emotion": answer_emotion,
#         # "caption": caption_label,
#     }
#     new_dict_list.append(dict_i)
#
# from data_hanle import _main_get_data_list, _main_make_shards
# total_dict_list =  handle_chat_for_think_template(new_dict_list)
# output_path = f'{root_dir}/xianshi_emotion_chat/row.jsonl'
# print("output_path: ",output_path)
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)
# utils_file.print_list(total_dict_list[0:10])
# input_path = output_path
# output_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/xianshi_emotion_chat/data.list"
# output_shards_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/xianshi_emotion_chat/shard_dir"
# _main_get_data_list(input_path, output_path)
# _main_make_shards(output_path, output_shards_dir)


# 年龄
# row_path = "/home/work_nfs11/gjli/workspaces/labeling_glm-4/age_sex_yinshi_0615/age_7.7/age_gen_q_deepseek_7-7_a.txt"
# lines_list = utils_file.load_list_file_clean(row_path)
# new_dict_list = []
# for line_i in lines_list:
#     items = line_i.split("|")
#     parts = items[0].strip().split("_")
#     key, age_label = parts[0], parts[-1]
#     new_key = f"{key}_{str(uuid.uuid4())}"
#     new_age_label = "<" + age_label + ">"
#     quetion_str = items[1].strip()
#     answer_str = items[2].strip()
#     answer_emotion_label, answer_clean_sentence = do_get_labels_from_str(answer_str)
#     answer_emotion_label_str = "<" + EMO_MAP.get(answer_emotion_label[0], "NEUTRAL") + ">"
#     new_answer_str = answer_emotion_label_str+answer_clean_sentence
#     new_dict_i = {
#         "key": new_key,
#         "question": quetion_str,
#         "answer": new_answer_str,
#         "age": new_age_label,
#         'answer_emotion': answer_emotion_label_str
#     }
#     # print(new_dict_i)
#     new_dict_list.append(new_dict_i)
#
# total_dict_list = handle_chat_for_think_template(new_dict_list)
# data_name = "age_chat_2"
# output_path = f'{root_dir}/{data_name}/row.jsonl'
# utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)
# utils_file.print_list(total_dict_list[0:10])
# input_path = output_path
# output_path = f"{root_dir}/{data_name}/data.list"
# output_shards_dir = f"{root_dir}/{data_name}/shard_dir"
# _main_get_data_list(input_path, output_path)
# _main_make_shards(output_path, output_shards_dir)


# 性别
# age_map = {
#     "男": "MALE",
#     "女": "FEMALE",
# }
age_map = {
    "儿童": "CHILD",
    "成年人": "ADULT",
    "老年人": "OLD",
}
root_dir = "/home/work_nfs14/asr_data/data/osum_data/t2t_paralanguage_chat"
# row_path = "/home/work_nfs11/gjli/workspaces/labeling_glm-4/age_sex_yinshi_0615/sex_7.7/sex_gen_q_deepseek_7-7_a.txt"
# row_path = "/home/work_nfs14/dhgao/osum_pro/age_gender/age_gender.jsonl"
row_path = "/home/work_nfs14/dhgao/osum_pro/age_gender/age_q.jsonl"
dict_list = utils_file.load_dict_list_from_jsonl(row_path)
new_dict_list = []
for line_i in dict_list:
    answer_emotion_label_str = "<NEUTRAL>"
    new_key = str(uuid.uuid4())
    question_str = line_i["question"]
    answer_str = line_i["answer"]
    label = line_i["label"]
    new_age_label = "<" + age_map[label] + ">"

    new_dict_i = {
        "key": new_key,
        "question": question_str,
        "answer": answer_str,
        "age": new_age_label,
        'answer_emotion': answer_emotion_label_str
    }
    # print(new_dict_i)
    new_dict_list.append(new_dict_i)

total_dict_list = handle_chat_for_think_template(new_dict_list)
data_name = "age_chat_3"
output_path = f'{root_dir}/{data_name}/row.jsonl'
utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)
utils_file.print_list(total_dict_list[0:10])
input_path = output_path
output_path = f"{root_dir}/{data_name}/data.list"
output_shards_dir = f"{root_dir}/{data_name}/shard_dir"
_main_get_data_list(input_path, output_path)
_main_make_shards(output_path, output_shards_dir)


root_dir = "/home/work_nfs14/asr_data/data/osum_data/t2t_paralanguage_chat"
# row_path = "/home/work_nfs11/gjli/workspaces/labeling_glm-4/age_sex_yinshi_0615/sex_7.7/sex_gen_q_deepseek_7-7_a.txt"
# row_path = "/home/work_nfs14/dhgao/osum_pro/age_gender/age_gender.jsonl"
row_path = "/home/work_nfs9/yacao/0715_gxl_caption_text/sound_recognition_data_0715.jsonl"
dict_list = utils_file.load_dict_list_from_jsonl(row_path)
new_dict_list = []
for line_i in dict_list:
    answer_emotion_label_str = "<NEUTRAL>"
    new_key = str(uuid.uuid4())
    question_str = line_i["q"]
    answer_str = line_i["a"]
    label = line_i["label"]
    new_age_label =label

    new_dict_i = {
        "key": new_key,
        "question": question_str,
        "answer": answer_str,
        "caption": new_age_label,
        'answer_emotion': answer_emotion_label_str
    }
    # print(new_dict_i)
    new_dict_list.append(new_dict_i)

total_dict_list = handle_chat_for_think_template(new_dict_list)
data_name = "caption_chat_3"
output_path = f'{root_dir}/{data_name}/row.jsonl'
utils_file.write_dict_list_to_jsonl(total_dict_list, output_path)
utils_file.print_list(total_dict_list[0:10])
input_path = output_path
output_path = f"{root_dir}/{data_name}/data.list"
output_shards_dir = f"{root_dir}/{data_name}/shard_dir"
_main_get_data_list(input_path, output_path)
_main_make_shards(output_path, output_shards_dir)