#from _typeshed import FileDescriptor
import asyncio
import string
from typing import Any
import numpy
import os
import whisper
from abc import ABC, abstractmethod
from whisper import Whisper
import librosa
import soundfile as sf

class ASRInferenceBlock(ABC):
    """ASR推理模块抽象基类"""
    @abstractmethod
    async def initModel(self, strModelRoot) -> bool:
        """初始化模型
        """


    @abstractmethod
    async def inference(self, audio_data: bytes) -> str:
        """执行ASR推理

        Args:
            audio_data: 音频数据字节

        Returns:
            识别结果文本
        """

    @abstractmethod
    async def inferenceFile(self, file_path:str) -> str:
        """执行ASR推理

        Args:
            file_path: 音频文件路径

        Returns:
            识别结果文本
        """


class SimpleASRInferenceBlock(ASRInferenceBlock):
    """模拟ASR推理模块，用于测试"""
    m_model:Whisper = None

    async def initModel(self, strModelRoot) -> bool:
        model_path = "./models"
        model_path = strModelRoot
        await asyncio.sleep(0.1)
        self.m_model = whisper.load_model("tiny", download_root=model_path)
        return True

    async def inference(self, audio_data: bytes) -> str:
        # 模拟推理延迟

        await asyncio.sleep(0.1)
        # result = model.transcribe("./serverCache/002.wav")

        print("len(audio_data):",len(audio_data))
        audio_np = numpy.frombuffer(audio_data, dtype=numpy.int16).astype(numpy.float32) / 32768.0
        # 如果需要重采样或转换声道，仍然需要 librosa
        # if sr != 16000:
        #      audio_np = librosa.resample(audio_np, orig_sr=sr, target_sr=16000)

        result = self.m_model.transcribe(
            audio_np,
            language="en",  # 指定语言，例如中文
            #task="translate",  # 任务类型：识别 ("transcribe") 或翻译成英语 ("translate")
            fp16=False,  # 如果是在 CPU 上运行，设置为 False 可以获得更好的性能
            temperature=0.0,  # 降低随机性，使结果更确定
            # verbose=True # 打印进度信息
        )

        print("Recognition result:",result["text"])
        return result["text"]

#        return f"[模拟识别结果] 音频长度: {len(audio_data)} 字节"


    async def inferenceFile(self, file_path: str) -> str:
        # 模拟推理延迟

        await asyncio.sleep(0.1)
        #支持两种格式的音频文件：wav、webm。扩展名为wav而内容为webm的情况很可能会出错。
        filePath_source: str = file_path

        #fileDir = os.path.dirname(filePath_source)
        fileName = os.path.basename(filePath_source)

        #print("fileDir:"+fileDir)
        print("fileName:"+fileName)

        extension = os.path.splitext(filePath_source)[1].lower()

        print("File extension:",extension)

        result = {}

        # 标准Wav格式：
        if(extension == ".wav"):

            filePath_transcribe = os.path.join("./serverCache" , "16k_" + fileName)
            print("16k filePath:"+filePath_transcribe)

    #        y, sr = librosa.load(filePath_source, sr=None)  # sr=None表示保留原始采样率
            y, sr = sf.read(filePath_source)  # sr=None表示保留原始采样率

            # 如果需要重采样或转换声道，仍然需要 librosa
            if sr != 16000:
                y_resampled = librosa.resample(y, orig_sr=sr, target_sr=16000)
                sf.write(filePath_transcribe, y_resampled, 16000)

            else:
                sf.write(filePath_transcribe, y, sr)

            result = self.m_model.transcribe(
                filePath_transcribe,
                language="en",
                fp16=False,
                temperature=0.0
            )

        # 标准Webm格式：
        if(extension == ".webm"):
            result = self.m_model.transcribe(
                filePath_source,
                language="en",
                fp16=False,
                temperature=0.0
            )

        # 内容为Webm，扩展名为wav：


        print("Recognition result:",result["text"])
        return result["text"]
