import time, os, io, sys, uuid, wave
import opuslib_next
from typing import List, Tuple, Optional
from main.base import ASRProviderBase
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
import logging

# 设置日志级别为 ERROR，这样 INFO 及以下级别的日志将不会被打印
logging.basicConfig(level=logging.ERROR)

# 捕获标准输出
class CaptureOutput:
    def __enter__(self):
        self._output = io.StringIO()
        self._original_stdout = sys.stdout
        sys.stdout = self._output

    def __exit__(self, exc_type, exc_value, traceback):
        sys.stdout = self._original_stdout
        self.output = self._output.getvalue()
        self._output.close()

        # 将捕获到的内容通过 logger 输出
        if self.output:
            print(self.output.strip())

class ASR(ASRProviderBase):
    
    def __init__(self, delete_audio_file: bool = True):
        self.model_dir = "./tools/fun_asr"
        self.output_dir = "./audios"
        self.delete_audio_file = delete_audio_file
        
        os.makedirs(self.output_dir, exist_ok=True)
        self.model = AutoModel(
                model= "./tools/fun_asr",
                vad_kwargs={"max_single_segment_time": 30000},
                disable_update=True
                # hub="hf"
                # device="cuda:0",  # 启用GPU加速
            )
        # while CaptureOutput():
        #     self.model = AutoModel(
        #         model= "./tools/SenseVoiceSmall",
        #         vad_kwargs={"max_single_segment_time": 30000},
        #         disable_update=True
        #         # hub="hf"
        #         # device="cuda:0",  # 启用GPU加速
        #     )
    
    # 保存声音碎片
    def save_audio_to_file(self, audio_data: List[bytes], session_id: str, is_opus: bool = False) -> str:
        file_name = f"asr_{session_id}_{uuid.uuid4()}.wav"
        file_path = os.path.join(self.output_dir,file_name)
        pcm_data = []
        if is_opus:
            decoder = opuslib_next.Decoder(16000, 1) # 16kHz, 单声道
            for opus_packet in audio_data:
                try:
                    pcm_fame = decoder.decode(opus_packet, 960)  # 960 samples = 60ms
                    pcm_data.append(pcm_fame)
                except opuslib_next.OpusError as e:
                    logging.error(f"Opus解码错误: {e}")
        else:
            pcm_data = audio_data
                
        with wave.open(file_path, "wb") as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2)  # 2 bytes = 16-bit
            wf.setframerate(16000)
            wf.writeframes(b"".join(pcm_data))
        return file_path
    
    # 语音转文本
    async def speech_to_text(self, opus_data: List[bytes], session_id: str) -> Tuple[Optional[str], Optional[str]]:
        file_path = None
        try:
            start_time = time.time()
            file_path = self.save_audio_to_file(audio_data=opus_data, is_opus=True, session_id=session_id)
            print(f"音频文件保存耗时: {time.time() - start_time:.3f}s | 路径: {file_path}")
            
            # asr
            start_time = time.time()
            result = self.model.generate(
                input=file_path,
                cache={},
                language="auto",
                use_itn=True,
                batch_size_s=60,
            )
            text = rich_transcription_postprocess(result[0]["text"])
            # print(f"语音识别耗时: {time.time() - start_time:.3f}s | 结果: {text}")
            return text, file_path
            
        except Exception as e:
            logging.error(f"语音识别失败: {e}")
            return "", None
        finally:
            if self.delete_audio_file and file_path and os.path.exists(file_path):
                try:
                    os.remove(file_path)
                    logging.info("删除临时文件")
                except Exception as e:
                    logging.error(f"删除临时文件失败: {file_path} | {e}")
                    
    # 语音转文本
    def speech_to_text_no_opus(self, audio_data: List[bytes], session_id: str) -> Tuple[Optional[str], Optional[str]]:
        file_path = None
        try:
            start_time = time.time()
            file_path = self.save_audio_to_file(audio_data=audio_data, session_id=session_id)
            # print(f"音频文件保存耗时: {time.time() - start_time:.3f}s | 路径: {file_path}")
            # asr
            start_time = time.time()
            result = self.model.generate(
                input=file_path,
                cache={},
                language="auto",
                use_itn=True,
                batch_size_s=60,
            )
            text = rich_transcription_postprocess(result[0]["text"])
            # print(f"语音识别耗时: {time.time() - start_time:.3f}s | 结果: {text}")
            return text
            
        except Exception as e:
            logging.error(f"语音识别失败: {e}")
            return "", None
        finally:
            if self.delete_audio_file and file_path and os.path.exists(file_path):
                try:
                    os.remove(file_path)
                    logging.info("删除临时文件")
                except Exception as e:
                    logging.error(f"删除临时文件失败: {file_path} | {e}")
                    
# 测试asr
def testAsr(file_path):
    asrModel = AutoModel(
            model="./tools/fun_asr",
            vad_kwargs={"max_single_segment_time": 30000},
            disable_update=True,
            # hub="hf"
            # device="cuda:0",  # 启用GPU加速
        )
    # asr
    start_time = time.time()
    result = asrModel.generate(
        input=file_path,
        cache={},
        language="auto",
        use_itn=True,
        batch_size_s=60,
    )
    text = rich_transcription_postprocess(result[0]["text"])
    logging.info(f"语音识别耗时: {time.time() - start_time:.3f}s | 结果: {text}")
    
if __name__ == "__main__":
    # asr = ASR()
    testAsr(file_path="../audios/en.mp3")