import os
import json
import base64
import argparse
import time
from pathlib import Path

import requests

API_URL = "http://csig.litellm.prod.sgpolaris/openai/deployments/gemini-2.5-pro/chat/completions"
# API_URL = "http://csig.litellm.prod.sgpolaris/openai/deployments/gpt-4o/chat/completions"
# API_URL = "http://csig.litellm.prod.sgpolaris/openai/deployments/gpt-4o-mini/chat/completions"
import os

# 设置环境变量
os.environ["LITELLM_API_KEY"] = "sk-NXxj_3TYKOmSCj8NxB_kOA"

def encode_audio_to_base64(audio_path: str):
    p = Path(audio_path)
    if not p.exists():
        raise FileNotFoundError(f"音频文件不存在: {audio_path}")
    ext = p.suffix.lower().lstrip(".")
    # 常见支持格式（按多模态/ASR常用）
    supported = {"wav", "mp3", "m4a", "flac", "ogg", "webm"}
    if ext not in supported:
        print(f"警告: 扩展名 .{ext} 可能不被模型支持，仍将尝试发送。建议使用: {sorted(supported)}")
    b64 = base64.b64encode(p.read_bytes()).decode("utf-8")
    return b64, ext


def build_payload(model: str, prompt: str, audio_b64: str, audio_format: str):
    # 使用OpenAI Chat Completions 兼容的多模态消息结构：
    # content 为数组，包含文本与音频分片；音频以 base64 形式嵌入
    return {
        "model": model,
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt},
                    {
                        "type": "input_audio",
                        "input_audio": {
                            "data": audio_b64,
                            "format": audio_format,  # 例如: "wav", "mp3"
                        },
                    },
                ],
            }
        ],
        # 适当设置生成参数（可按需调整）
        "temperature": 0.5,
        "max_tokens": 4096,
    }

PROMPT="""
# Prompt：语音特征标签生成指令
请你作为语音理解模型，基于输入的语音内容，按照以下要求生成结构化语音特征标签。需覆盖全部指定特征维度，每个维度的输出需精准、简洁（避免冗余描述），且严格遵循“特征维度：具体标签”的键值对格式，各维度用“|”分隔，确保后续可直接提取标签。

## 需覆盖的语音特征维度及说明：
1. **语义转录**：完整、准确转录语音中的文字内容（含填充词如“嗯、啊”，若有重复表述需如实记录，不删减语义）；
2. **说话人性别**：标签仅限“男性”“女性”“中性（如变声、无法明确判断）”；
3. **年龄**：标签仅限“儿童（0-12岁）”“青少年（13-17岁）”“成年（18-59岁）”“老年（60岁及以上）”“无法明确判断”；
4. **音色**：从“低沉”“浑厚”“清脆”“尖细”“沙哑”“柔和”“洪亮”“平淡”中选择1-2个最贴合的标签，若有特殊音色可补充（如“机械音”）；
5. **语速快慢**：标签仅限“极慢”“较慢”“中等”“较快”“极快”；
6. **音量大小**：标签仅限“极轻”“较轻”“中等”“较重”“极重”；
7. **语气**：从“陈述”“疑问”“感叹”“祈使（请求/命令）”“反问”“商量”中选择1个核心标签，可补充修饰（如“疑问-迟疑”）；
8. **情绪**：从“平静”“开心”“愤怒”“悲伤”“紧张”“焦虑”“惊讶”“不耐烦”“温和”“冷漠”中选择1-2个核心标签，若情绪不明显标注“无明显情绪”；
9. **口音**：标签格式为“无明显口音”或“XX口音（如：东北口音、川渝口音、英式英语口音、带口音的普通话）”；
10. **环境描述**：简洁描述背景环境及噪音情况（如“安静室内，无明显噪音”“街道环境，有汽车鸣笛噪音”“会议室环境，有轻微键盘敲击声”）；
11. **停顿特征**：从“停顿少且短（自然换气）”“停顿较多（思考性停顿）”“停顿过长（超过1秒，含沉默）”“停顿无序（逻辑混乱）”中选择1个标签，可补充（如“停顿较多-句间停顿为主”）；
12. **发声状态**：标签仅限“正常发声（清晰无异常）”“口吃（重复/延长音节）”“鼻音过重”“声音嘶哑”“气音明显（虚弱）”“无法明确判断”；
13. **音调**：从“偏低”“中等”“偏高”“波动小（平缓）”“波动大（抑扬顿挫）”中选择1-2个标签，可补充（如“偏高-疑问时上扬明显”）。

## 输出格式为Python 字典 格式，如下是一个示例，直接输出"{"开头的类字典形式，不要加```： 
{"语义转录": "刚才看天气预报说明天会下雨，咱们出门记得带伞啊，别淋感冒了","说话人性别": "女性","年龄": "成年","音色": "柔和+清脆","语速快慢": "中等","音量大小": "较轻","语气": "陈述-提醒","情绪": "温和","口音": "川渝口音（带轻微方言词汇）","环境描述": "居家环境，有远处厨房水流声，无明显噪音","停顿特征": "停顿少且短（自然换气，句间停顿为主）","发声状态": "正常发声（清晰无异常）","音调": "中等+波动小（平缓，提醒时轻微上扬）"}
请严格按照上述格式和维度要求，基于输入语音内容生成标签，不得遗漏任何维度，不得修改格式。
"""

def do_label_by_gemini(audio_path: str, prompt: str=PROMPT, model: str = "gemini-2.5-pro"):
    api_key = os.environ.get("LITELLM_API_KEY")
    if not api_key:
        print("缺少环境变量 LITELLM_API_KEY，请先执行: export LITELLM_API_KEY='your-key'")
        return "None"

    try:
        audio_b64, audio_format = encode_audio_to_base64(audio_path)
    except Exception as e:
        print(f"加载音频失败: {e}")
        return "None"

    headers = {
        "accept": "application/json",
        "x-litellm-api-key": api_key,
        "Content-Type": "application/json",
    }
    payload = build_payload(model, prompt, audio_b64, audio_format)

    try:
        resp = requests.post(API_URL, headers=headers, data=json.dumps(payload), timeout=120)
        # print(f"状态码: {resp.status_code}")
        # print("响应头:", dict(resp.headers))
        ct = resp.headers.get("Content-Type", "")
        if "application/json" in ct.lower():
            try:
                # print(json.dumps(resp.json(), ensure_ascii=False, indent=2))
                response_data = resp.json()
                message_content = response_data["choices"][0]["message"]["content"]
                return message_content
            except Exception:
                print("JSON解析失败，原始文本:")
                print(resp.text)
        else:
            print("原始响应内容:, 回复失败")
            print(resp.text)
            raise Exception("请求失败")
    except Exception as e:
        print(f"请求出错: {e}")
        return "None"

import json
from gxl_ai_utils.utils import utils_file
from deepseek_v3 import GXLMultiprocessingWithReturn
def little_func_4emotiondata(little_wav_dict_i):
    res_list = []
    for key, wav_path in utils_file.tqdm(little_wav_dict_i.items(), total=len(little_wav_dict_i)):
        try:
            res = do_label_by_gemini(wav_path)
            res = res.replace("```json\n", "").replace("\n```", '').strip()
            res_dict = json.loads(res)
            # print(res_dict, type(res_dict))
            dict_i = {"key": key, "gemini_res": res_dict, "wav": wav_path}
            # print(dict_i)
            res_list.append(dict_i)
        except Exception as e:
            print(f"json.loads 失败: {e}")
            continue
    return res_list


def main4emotion_data():
    """"""
    wav_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/wav_score_low_5.scp"
    wav_path_little = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/wav_score_up_4_little20.scp"
    # res_code = os.system(f'head -n 20 {wav_path} > {wav_path_little}')
    # print(f"res_code: {res_code}")

    wav_dict = utils_file.load_dict_from_scp(wav_path)
    runner = GXLMultiprocessingWithReturn(500)
    res_list = runner.run(little_func_4emotiondata, wav_dict)
    big_res_list = []
    for res in res_list:
        big_res_list.extend(res)
    output_res_dict_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/emotion_data/ft_local/raw_data/gemini_score_low_5_res.jsonl"
    utils_file.write_dict_list_to_jsonl(big_res_list, output_res_dict_path)


def new_little_func_4emotiondata(little_dict_list_i):
    res_list = []
    for dict_i in utils_file.tqdm(little_dict_list_i, total=len(little_dict_list_i)):
        try:
            key = dict_i["key"]
            wav_path = dict_i["wav_path"]
            res = do_label_by_gemini(wav_path)
            res = res.replace("```json\n", "").replace("\n```", '').strip()
            res_dict = json.loads(res)
            # print(res_dict, type(res_dict))
            utils_file.logging_limit_print(f"key: {key} gemini_res: {res_dict}")
            dict_i["gemini_res_dict"] = res_dict
            # print(dict_i)
            res_list.append(dict_i)
        except Exception as e:
            print(f"json.loads 失败: {e}")
            continue
    return res_list

def new_main4full_para_labeling(input_jsonl_path, output_jsonl_path):
    """
    输入：
    input_jsonl_path: 输入的jsonl文件路径，必要信息： key wav_path,
    输出：
    output_jsonl_path: 新增 gemini_res_dict 字段，保存 gemini 输出的结果，格式为字典，包含所有维度的标签

    """
    utils_file.logging_info(f'new_main4full_para_labeling params:\n input_jsonl_path: {input_jsonl_path}\n output_jsonl_path: {output_jsonl_path}')
    # input_jsonl_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/asr_6.4w/score_all_6.4w_full_info_with_wav_score_up_8_with_emotion2vec_label_only_valid_part1_all2.jsonl"
    # output_jsonl_path = "/teaspeech_ceph/share_976139/users/xuelonggeng/data/asr_6.4w/score_all_6.4w_full_info_with_wav_score_up_8_with_emotion2vec_label_only_valid_part1_all2_with_gemini_res.jsonl"
    input_dict_list = utils_file.load_dict_list_from_jsonl(input_jsonl_path)
    runner = GXLMultiprocessingWithReturn(500)
    res_list = runner.run(new_little_func_4emotiondata, input_dict_list)
    big_res_list = []
    for res in res_list:
        big_res_list.extend(res)
    utils_file.write_dict_list_to_jsonl(big_res_list, output_jsonl_path)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_file", type=str, required=True)
    parser.add_argument("--output_file", type=str, required=True)
    args = parser.parse_args()
    input_jsonl_path = args.input_file
    output_jsonl_path = args.output_file
    new_main4full_para_labeling(input_jsonl_path, output_jsonl_path)