import os
import json
import uuid
import dashscope
from openai import OpenAI
from volcenginesdkarkruntime import Ark

# ===================== 基础配置 =====================
UPLOAD_FOLDER = os.environ.get("UPLOAD_FOLDER", "uploads")
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# DashScope 配置
dashscope.api_key = 'sk-b42f646a808549e099932167d32f2a9c'
DEFAULT_ASR_MODEL = 'qwen-audio-asr'

# Qwen 客户端
client = OpenAI(
    api_key='sk-b42f646a808549e099932167d32f2a9c',
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

# 允许的音频格式
ALLOWED_EXTS = {'.wav', '.mp3', '.m4a', '.flac', '.aac', '.ogg', '.opus'}


def allow_file(filename: str) -> bool:
    return os.path.splitext(filename)[1].lower() in ALLOWED_EXTS


def real_asr(file_path: str) -> str:
    """调用 DashScope ASR，将本地音频转文字"""
    audio_uri = f"file://{os.path.abspath(file_path)}"
    try:
        response = dashscope.MultiModalConversation.call(
            model=DEFAULT_ASR_MODEL,
            messages=[{"role": "user", "content": [{"audio": audio_uri}]}],
            result_format="message"
        )
        text = response['output']['choices'][0]['message']['content'][0]['text']
        return text
    except Exception as e:
        return f"ASR 解析失败: {e}"


def run_infer(audio_paths: list, json_file_path: str) -> str:
    """
    输入音频文件路径和JSON文件路径，返回Qwen生成的中文推理结果

    :param audio_paths: 音频文件路径列表
    :param json_file_path: JSON文件路径
    :return: Qwen回复文本
    """
    # 读取 JSON 文件
    with open(json_file_path, "r", encoding="utf-8") as f:
        user_json = json.load(f)

    # 逐个音频转写
    asr_texts = []
    for audio_path in audio_paths:
        if not allow_file(audio_path):
            raise ValueError(f"文件类型不支持: {audio_path}")
        text = real_asr(audio_path)
        asr_texts.append(text)

    combined_asr_text = "\n".join(asr_texts)

    # # 系统提示
    # system_prompt = (
    #     '''
    #     你是一名劳动法律小助手的智能分析模块，用户提供了案件相关的聊天记录(JSON)和音频材料，请结合聊天记录和音频内容，分析音频是否可以作为证据以及证据的充分性。只返回JSON格式，包含五个字段：
    #         1. "文件类型"："录音"
    #         1. "关键内容摘要"：对录音内容的精确概述
    #         2. "文件有效性说明"
    #         3. "与案件关联性分析"
    #         4. "是否可以作为核心证据"：是或否
    #     不要输出其他文字或额外说明。
    #     '''
    # )
    system_prompt = (
        '''
        你是一名劳动法律小助手的智能分析模块，用户提供了案件相关的聊天记录(JSON)和音频材料，请结合聊天记录和音频内容，分析音频是否可以作为证据以及证据的充分性。只返回JSON格式，包含五个字段：
            1. "Document type"："录音"
            2. "Summary of key contents"：对录音内容的精确概述
            3. "Document validity"：文件的有效性说明
            4. "Relevance to the case"：与案件关联性分析
            5. "Can it be used as core evidence"：true或false
        不要输出其他文字或额外说明。
        '''
    )

    user_message = (
        f"JSON 聊天记录:\n{json.dumps(user_json, ensure_ascii=False)}\n\n"
        f"音频转写内容:\n{combined_asr_text}"
    )
    # print(f"JSON 聊天记录:\n{json.dumps(user_json, ensure_ascii=False)}")
    # print(f"音频转写内容:\n{combined_asr_text}")
    # print("==============================================")
    # print(user_message)

    #   =====================Qwen推理 =====================
    # completion = client.chat.completions.create(
    #     model="qwen-plus",
    #     messages=[
    #         {"role": "system", "content": system_prompt},
    #         {"role": "user", "content": user_message},
    #     ],
    # )
    #    =================================================

    #    =====================豆包推理 =====================
    # 初始化豆包API客户端
    ark_client = Ark(
        base_url="https://ark.cn-beijing.volces.com/api/v3",
        api_key=os.environ.get("ARK_API_KEY", "1b4bef68-37d5-4196-ba8b-17c9054ae9c5")
    )
    
    # 构建消息内容
    doubao_messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_message}
    ]
    
    completion = ark_client.chat.completions.create(
        model="doubao-seed-1-6-250615",
        messages=doubao_messages
    )
    #    =================================================
    
    reply_text = completion.choices[0].message.content
    return reply_text
