import argparse
import random
import string
import sys

from openai import OpenAI
import re
import json
import os
import time
import os
import logging

logging.basicConfig(level=logging.INFO)
# 聚合接口， 用的时候把代理取消
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)


# client = OpenAI(
#         base_url="http://csig.litellm.prod.polaris",
#         api_key="sk-NXxj_3TYKOmSCj8NxB_kOA",
# )
def do_get_a_bot():
    client = OpenAI(
        base_url="http://csig.litellm.prod.polaris",
        api_key="sk-NXxj_3TYKOmSCj8NxB_kOA",
    )
    return client


def do_chat(question_str, client=None):
    if client is None:
        client = OpenAI(
            base_url="http://csig.litellm.prod.polaris",
            api_key="sk-NXxj_3TYKOmSCj8NxB_kOA",
        )
    message = [
        {"role": "system", "content": "you are deepseek."},
        {"role": "user", "content": question_str}
    ]
    retry = 0
    while retry < 10000:
        try:
            completion = client.chat.completions.create(
                model="deepseek-v3-union",
                messages=message,
            )
            response = completion.choices[0].message.content
            return response
        except Exception as e:
            if retry >= 10000:
                print("Maximum retry limit reached. Exiting.")
                return None
            print(f"Error occurred when calling API: {e}")
            retry += 1
            time.sleep(30)
            continue


def remove_spaces_between_chinese(text: str) -> str:
    """
    高效去除两个汉字之间的所有空白字符（包括半角空格、全角空格等）。
    同时，会清理掉字符串开头和结尾的空白字符。

    Args:
        text (str): 输入的字符串。

    Returns:
        str: 处理后的字符串。
    """
    if not isinstance(text, str):
        return ""

    # 1. 先清理字符串开头和结尾的空白字符
    text = text.strip()

    # 2. 正则表达式模式
    # (?<=[\u4e00-\u9fa5]) 是一个“正向后顾”断言，表示前面必须是一个汉字
    # [\s　]+ 匹配一个或多个空白字符，包括半角空格(\s)和全角空格(　)
    # (?=[\u4e00-\u9fa5]) 是一个“正向前瞻”断言，表示后面必须是一个汉字
    # 这种写法更精确，它只匹配两个汉字中间的空白，不会捕获汉字本身
    pattern = r'(?<=[\u4e00-\u9fa5])[\s　]+(?=[\u4e00-\u9fa5])'

    # 3. 使用 re.sub 进行替换，将匹配到的空白替换为空字符串
    return re.sub(pattern, '', text)


# from gxl_ai_utils.utils import utils_file
import utils_file


def do_get_random_string(length: int = 20) -> str:
    """
    使用 random 模块生成一个随机字符串（非安全）。

    Args:
        length: 字符串的长度。

    Returns:
        一个随机字符串。
    """
    # 定义字符集，这里增加了标点符号
    characters = string.ascii_letters + string.digits

    # 使用 random.choices 一次性选择多个字符
    # k=length 指定了选择的数量
    # ''.join() 将选择出的字符列表拼接成一个字符串
    random_str = ''.join(random.choices(characters, k=length))

    return random_str


def little_func(input_text_dict_i):
    from do_get_score import prompt, extract_content_regex
    # 得到一个长随机字符串
    random_str = do_get_random_string()
    tmp_output_path =f"/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/tmp_stage3/score_{random_str}.jsonl"
    print(f'tmp_output_path: {tmp_output_path}')
    bot = do_get_a_bot()
    print(f"bot: {bot}")
    res_dict_list = []
    index = 0
    for key, txt_str in utils_file.tqdm(input_text_dict_i.items(), desc="chatting with bot",
                                        total=len(input_text_dict_i)):
        # new_txt_str = extract_content_regex(txt_str)[0]
        index = index + 1
        new_txt_str = remove_spaces_between_chinese(txt_str)
        utils_file.logging_limit_print(new_txt_str)
        question_str = prompt.format(new_txt_str)
        answer = do_chat(question_str, client=bot)
        utils_file.logging_limit_print(answer)
        res_dict_list.append({'key': key, 'txt': new_txt_str, 'score': answer})
        if index % 1000 == 0:
            utils_file.write_dict_list_to_jsonl(res_dict_list, tmp_output_path)
    return res_dict_list


import multiprocessing as mp


def main4get_score():
    # output_dict_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/text.scp"
    output_dict_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/text_little2w"
    print(f'output_dict_path: {output_dict_path}')
    text_dict = utils_file.load_dict_from_scp(output_dict_path)

    # 2. 将大字典平均分成 100 份
    num_processes = 100
    print(f"正在将大字典平均分成 {num_processes} 份...")
    dicts_list = utils_file.do_split_dict(text_dict, num_processes)
    print(f"分割完成，共得到 {len(dicts_list)} 个小字典。")

    # 3. 使用多进程池来并行执行任务
    print("--- 开始多进程处理 ---")
    start_time = time.time()

    # 使用 'spawn' 启动方法可以更好地兼容 Windows 系统，并避免一些潜在问题
    ctx = mp.get_context('spawn')

    # 创建一个进程池，进程数量为 100
    with ctx.Pool(processes=num_processes) as pool:
        # 使用 map 函数将任务分配给进程池
        # pool.map 会将 dict_chunks 中的每个元素（小字典）作为参数传递给 little_func
        # 它会返回一个列表，其中包含每个 little_func 的执行结果
        results_from_all_processes = pool.map(little_func, dicts_list)

    end_time = time.time()
    print(f"--- 所有进程处理完毕，耗时: {end_time - start_time:.2f} 秒 ---")

    # 4. 将 100 份结果拼接成一个大的列表
    print("正在合并所有进程的结果...")
    final_result_list = []
    for sublist in results_from_all_processes:
        final_result_list.extend(sublist)

    print(f"结果合并完成，最终列表共有 {len(final_result_list)} 条记录。")

    # 5. 将最终的大列表写入 ./data.list 文件
    # 为了保证可读性，我们将列表中的每个字典转换为 JSON 字符串，每行一个
    # output_file = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/score.list"
    output_file = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/score.list"
    print(f"正在将最终结果写入文件: {output_file} ...")
    utils_file.write_dict_list_to_jsonl(final_result_list, output_dict_path)
    print("文件写入成功！")


class GXLMultiprocessingWithReturn:
    def __init__(self, num_processes):
        self.num_processes = num_processes
        self.ctx = mp.get_context('spawn')

    def run(self, func, big_dict_or_list):
        assert big_dict_or_list is not None, "big_dict_or_list 不能为空！"
        assert callable(func), "func 必须是一个可调用对象！"
        assert isinstance(big_dict_or_list, (dict, list)), "big_dict_or_list 必须是一个字典或列表！"
        if isinstance(big_dict_or_list, dict):
            split_list = utils_file.do_split_dict(big_dict_or_list, self.num_processes)
        else:
            split_list = utils_file.do_split_list(big_dict_or_list, self.num_processes)
        utils_file.logging_info(f'开始多进程处理，共有 {len(split_list)} 个子任务。')
        time_start = time.time()
        results_from_all_processes = []
        with mp.Pool(self.num_processes) as pool:
            results_from_all_processes = pool.map(func, split_list)
        end_time = time.time()
        print(f"--- 所有进程处理完毕，耗时: {end_time - time_start:.2f} 秒 ---")
        return results_from_all_processes

def main4get_score2():
    split_index = sys.argv[1]
    # output_dict_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/text.scp"
    # output_dict_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/text"
    input_text_dict_path = f"/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/text_remain_stage3_split{split_index}.scp"
    print(f'input_text_dict_path: {input_text_dict_path}')
    text_dict = utils_file.load_dict_from_scp(input_text_dict_path)

    # 2. 将大字典平均分成 100 份
    num_processes = 500
    # 3. 使用多进程池来并行执行任务
    print("--- 开始多进程处理 ---")

    runner = GXLMultiprocessingWithReturn(num_processes)
    results_from_all_processes = runner.run(little_func, text_dict)


    # 4. 将 100 份结果拼接成一个大的列表
    print("正在合并所有进程的结果...")
    final_result_list = []
    for sublist in results_from_all_processes:
        final_result_list.extend(sublist)
    print(f"结果合并完成，最终列表共有 {len(final_result_list)} 条记录。")

    # 5. 将最终的大列表写入 ./data.list 文件
    # 为了保证可读性，我们将列表中的每个字典转换为 JSON 字符串，每行一个
    # output_file = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/score.list"
    output_file = f"/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_6.4w/score_stage3_split{split_index}.list"
    print(f"正在将最终结果写入文件: {output_file} ...")
    utils_file.write_dict_list_to_jsonl(final_result_list, output_file)
    print("文件写入成功！")


def get_int_from_str(score_str):
    """"""
    try:
        score_float = float(score_str)
        score_int = int(round(score_float))
        return score_int
    except ValueError:
        print(f"无法将 '{score_str}' 转换为整数。")
        return -1


def little_func_answer_by_emotion_tag(little_dict_i):
    from do_get_question4emotion_txt import prompt, emotion_map, extract_content_regex
    res_dict_list_i = []
    for key, txt_str in utils_file.tqdm(little_dict_i.items(), desc="chatting with bot", total=len(little_dict_i)):
        only_txt, emotion_tar = extract_content_regex(txt_str)
        if emotion_tar is None or emotion_tar not in emotion_map:
            continue
        emotion_str = emotion_map[emotion_tar]
        question_str = prompt.format(only_txt, emotion_str)
        res_str = do_chat(question_str)
        dict_i = {'key': key, 'txt': only_txt, 'emotion': emotion_str, 'answer': res_str}
        res_dict_list_i.append(dict_i)
    return res_dict_list_i

def little_func_answer_by_full_tag(dict_list_i):
    from full_tag_prompt import prompt
    res_dict_list_i = []
    for dict_i in utils_file.tqdm(dict_list_i, desc="chatting with bot", total=len(dict_list_i)):
        key = dict_i['key']
        gemini_dict = dict_i['gemini_res']
        pretty_json_str = json.dumps(gemini_dict, ensure_ascii=False, indent=4)
        # pretty_json_str= str(gemini_dict)
        question_str = prompt.format(pretty_json_str)
        # print(f'question_str: {question_str}')
        res_str = do_chat(question_str)
        # print(f'res_str: {res_str}')
        dict_i["think_answer"] = res_str
        res_dict_list_i.append(dict_i)
    return res_dict_list_i


def mian4get_answer_with_emotion():
    """"""
    # score_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/score_int.scp"
    # score_dict = utils_file.load_dict_from_scp(score_path)
    # low_int = 5
    # new_score_dict = {k: v for k, v in score_dict.items() if get_int_from_str(v) >= low_int}
    # text_scp = '/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/text.scp'
    # text_dict = utils_file.load_dict_from_scp(text_scp)
    # new_text_dict = {k: v for k, v in text_dict.items() if k in new_score_dict}
    output_text_scp = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/text_score_up_4.scp"
    # utils_file.write_dict_to_scp(new_text_dict, output_text_scp)
    text_dict = utils_file.load_dict_from_scp(output_text_scp)
    res_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/answer_and_think_by_emotion_tag.jsonl"
    runner = GXLMultiprocessingWithReturn(num_processes=100)
    res_list = runner.run(little_func_answer_by_emotion_tag, text_dict)
    big_res_dict_list = []
    for res_i in res_list:
        big_res_dict_list.extend(res_i)
    utils_file.write_dict_list_to_jsonl(big_res_dict_list, res_path)


def mian4get_answer_with_full_label():
    """"""
    gemini_tags_dict_list_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/gemini_res.jsonl"
    # utils_file.write_dict_to_scp(new_text_dict, output_text_scp)
    tags_dict_list = utils_file.load_dict_list_from_jsonl(gemini_tags_dict_list_path)
    res_path = "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/emotion_data/ft_local/answer_and_think_by_full_tag.jsonl"
    runner = GXLMultiprocessingWithReturn(num_processes=300)
    res_list = runner.run(little_func_answer_by_full_tag, tags_dict_list)
    big_res_dict_list = []
    for res_i in res_list:
        big_res_dict_list.extend(res_i)
    utils_file.write_dict_list_to_jsonl(big_res_dict_list, res_path)


import re
def extract_content_regex(s: str):
    """
    使用正则表达式从格式为 '前缀<内容>' 的字符串中提取前缀和内容。

    参数:
        s (str): 输入的字符串。

    返回:
        tuple: 一个包含两个元素的元组 (前缀, 内容)。如果匹配失败，返回 (None, None)。
    """
    # 定义正则表达式模式
    pattern = r'^(.*?)<(.*?)>$'

    # 使用 re.match 进行匹配，它会从字符串的开头开始匹配
    match = re.match(pattern, s)

    if match:
        # 如果匹配成功，group(1) 是第一个捕获组（前缀），group(2) 是第二个捕获组（内容）
        return match.group(1), '<' + match.group(2) + '>'
    else:
        # 如果匹配失败
        print(f"警告：输入字符串 '{s}' 与模式不匹配。")
        return None, None


def little_func_get_str_convert(little_dict_i):
    from str_convert import prompt as prompt_str_convert
    res_dict_list = []
    for key, txt_str in utils_file.tqdm(little_dict_i.items()):
        txt_only, emotion_tag = extract_content_regex(txt_str)
        question_str = prompt_str_convert.format(txt_only)
        res_str = do_chat(question_str)
        dict_i = {'key': key, 'txt': txt_str, 'txt_convert': res_str}
        res_dict_list.append(dict_i)
    return res_dict_list
def main_4_get_str_convert():
    """"""
    from str_convert import prompt as prompt_str_convert

    input_text_scp_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/text.scp"
    output_text_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/tmp.list"
    text_dict = utils_file.load_dict_from_scp(input_text_scp_path)
    runner = GXLMultiprocessingWithReturn(num_processes=20)
    res_list = runner.run(little_func_get_str_convert, text_dict)
    big_res_dict_list = []
    for res_i in res_list:
        big_res_dict_list.extend(res_i)
    utils_file.write_dict_list_to_jsonl(big_res_dict_list, output_text_path)


def new_mian4get_answer_with_full_label(input_jsonl_path, output_jsonl_path):
    """
    输入：
    input_jsonl_path: 必须含有 key  gemini_res_dict 字段，gemini_res_dict 字段是一个字典，包含 gemini 标签结果
    output_jsonl_path: 输出的 jsonl 文件，新增一个字段：think_answer，值为 chatbot 回复的结果

    """
    utils_file.logging_info(f'new_mian4get_answer_with_full_label params:\n input_jsonl_path: {input_jsonl_path}\n output_jsonl_path: {output_jsonl_path}')
    # input_jsonl_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/asr_6.4w/score_all_6.4w_full_info_with_wav_score_up_8_with_emotion2vec_label_only_valid_part1_all2_with_gemini_res.jsonl"
    # output_jsonl_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/asr_6.4w/score_all_6.4w_full_info_with_wav_score_up_8_with_emotion2vec_label_only_valid_part1_all2_with_gemini_res_with_think_answer.jsonl"
    tags_dict_list = utils_file.load_dict_list_from_jsonl(input_jsonl_path)
    runner = GXLMultiprocessingWithReturn(num_processes=400)
    res_list = runner.run(little_func_answer_by_full_tag, tags_dict_list)
    big_res_dict_list = []
    for res_i in res_list:
        big_res_dict_list.extend(res_i)
    utils_file.write_dict_list_to_jsonl(big_res_dict_list, output_jsonl_path)

if __name__ == "__main__":
    """"""
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_file", type=str, required=True)
    parser.add_argument("--output_file", type=str, required=True)
    args = parser.parse_args()
    input_jsonl_path = args.input_file
    output_jsonl_path = args.output_file
    new_mian4get_answer_with_full_label(input_jsonl_path, output_jsonl_path)


