import os
import dashscope
from config import app_config

# 引入抽象基类

from .asr_base import BaseASR
from .asr_registry import register_asr
import numpy as np

@register_asr("qwen3")
class Qwen3ASR(BaseASR):
    def transcribe(self, audio_data: np.ndarray = None,audio_path:str = None) -> str: # type: ignore
        """这里写Qwen3的实际识别逻辑"""
        # 如果没有传入音频数据，则从文件加载
        if not audio_data:
            #警告
            print("Qwen3ASR not need audio_data, only need audio_path")
        if not audio_path:
            raise ValueError("audio_path is None, please provide audio_path")
        
        # 构造请求体
        messages = [
            {
                "role": "user",
                "content": [
                    {"audio": audio_path},
                ]
            }
        ]
        response = dashscope.MultiModalConversation.call(
            api_key=os.getenv("DASHSCOPE_API_KEY"), # type: ignore
            #api_key="sk-ca0aa5c5d1e24d49b285c8c3049685b0",
            model=app_config.QWEN3_ASR_MODEL, # type: ignore
            messages=messages,
            result_format="message",
            asr_options={
                "enable_lid":True,
                "enable_itn":False
            }
        )
        if response.status_code != 200: # type: ignore
            raise Exception(f"qwen3_asr ASR请求失败: {response.status_code}, {response.message}") # type: ignore
        return response.output.choices[0].message['content'][0]['text'] # type: ignore
    
    def cleanup(self):
        """Qwen3ASR 无需显式释放资源"""
        pass
    
if __name__ == "__main__":
    audio_path = "D:\\HR_AI\\AI_VTuber\\AI_VTuber_Server\\static\\audio_files\\ref_audio\\tingyun\\01a996c32f001e93-参考.wav"
    text = Qwen3ASR().transcribe(audio_path=audio_path)
    print(text)