import json
import logging
import multiprocessing
import os
from biz.integrations.aliyun.auth_api import AliyunAuthService
import time
import nls
import queue
from typing import List
from dotenv import load_dotenv

from biz.integrations.funasr.funasr_api import FunasrApi

load_dotenv()

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class SpeechRecognizer:
    def __init__(self, result_queue):
        self.result_queue = result_queue
        self.auth_service = AliyunAuthService()

    def on_recognition_start(self, message, *args):
        """
        语音识别会话开始时调用的回调函数。

        Args:
            message: 回调消息。
        """
        logging.info(f'Recognition started: {message}')

    def on_recognition_error(self, message, *args):
        """
        语音识别发生错误时调用的回调函数。

        Args:
            message: 错误信息。
        """
        logging.error(f'Error occurred: {args}, message: {message}')

    def on_recognition_close(self, *args):
        """
        语音识别会话结束时调用的回调函数。
        """
        logging.info(f'Recognition closed: {args}')

    def on_recognition_result_changed(self, message, *args):
        """
        语音识别结果变化时调用的回调函数。

        Args:
            message: 变化的识别结果。
        """
        logging.info(f'Result changed: {message}')

    def on_recognition_completed(self, message, *args):
        """
        语音识别完成时调用的回调函数。

        Args:
            message: 最终的识别结果。
        """
        logging.info(f'Recognition completed: {args}, message: {message}')
        if self.result_queue is not None:
            # 将识别结果放入共享队列
            self.result_queue.put(message)

    def _recognize_audio(self, audio_data, process_id):
        """
        执行语音识别的进程函数。

        Args:
            audio_data: 音频数据。
            process_id: 进程的标识符。
        """
        try:
            access_token = self.auth_service.get_token()['token']
            app_key = os.getenv('ALIYUN_APP_KEY')

            logging.info(f'Process {process_id}: Recognition started with token: {access_token} and app key: {app_key}')

            sr = nls.NlsSpeechRecognizer(
                token=access_token,
                appkey=app_key,
                on_start=self.on_recognition_start,
                on_result_changed=self.on_recognition_result_changed,
                on_completed=self.on_recognition_completed,
                on_error=self.on_recognition_error,
                on_close=self.on_recognition_close,
                callback_args=[process_id]
            )
            logging.info(f"Process {process_id}: session started")
            r = sr.start(aformat="pcm", ex={"hello": 123})

            chunk_size = 1024
            for i in range(0, len(audio_data), chunk_size):
                chunk = audio_data[i:i + chunk_size]
                sr.send_audio(chunk)
                time.sleep(0.001)

            r = sr.stop()
            logging.info(f"Process {process_id}: recognition stopped: {r}")
            time.sleep(0.5)
        except Exception as e:
            logging.error(f"Error in recognize_audio: {e}")

    def start_recognition(self, audio_data, num_processes=1):
        """
        启动语音识别的多个进程。

        Args:
            audio_data: 客户端传入的音频流。
            num_processes: 启动的进程数，默认为1。
        """
        if not audio_data or not isinstance(audio_data, bytes):
            logging.error("Invalid input type for audio_data, expected non-empty bytes.")
            return

        processes = []
        for i in range(num_processes):
            p = multiprocessing.Process(target=self._recognize_audio, args=(audio_data, i))
            processes.append(p)
            p.start()

        for p in processes:
            p.join()


def recognize_audio(audio_data: bytes, num_processes: int = 1, timeout: int = 30) -> List[str]:
    """
    客户端接口，通过音频流进行语音识别。

    Args:
        audio_data: 客户端传入的音频流。
        num_processes: 启动的进程数，默认为1。
        timeout: 获取队列中结果的超时时间，默认为30秒。
    """

    if not isinstance(audio_data, bytes):
        logging.error("Invalid input type for audio_data, expected bytes.")
        return ""

    speech_type = os.getenv('SPEECH_TYPE')
    funasr_url = os.getenv('FUNASR_URL')
    if speech_type == 'ALIYUN':
        result_queue = multiprocessing.Queue()  # 创建一个共享队列，用于接收识别结果
        recognizer = SpeechRecognizer(result_queue)

        try:
            recognizer.start_recognition(audio_data, num_processes)

            results = []
            start_time = time.time()
            while True:
                elapsed_time = time.time() - start_time
                if elapsed_time > timeout:
                    logging.info(f"Reached timeout of {timeout} seconds, exiting...")
                    break

                try:
                    # 增加 queue.get 的超时时间，例如5秒
                    result = result_queue.get(timeout=5)
                    results.append(result)
                    break
                except queue.Empty:
                    continue

            recognized_texts = []
            for result in results:
                try:
                    result_dict = json.loads(result)
                    recognized_text = result_dict.get("payload", {}).get("result", "")
                    if recognized_text:  # 验证识别结果非空
                        recognized_texts.append(recognized_text)
                    else:
                        logging.warning("Empty recognition result received.")
                        recognized_texts.append(None)
                except (json.JSONDecodeError, AttributeError, TypeError) as e:
                    logging.error(f"Error processing result: {e}")
                    recognized_texts.append(None)

            return recognized_texts[0]
        except Exception as e:
            return ""
        finally:
            pass
    else:
        rcg = FunasrApi(
            # uri="wss://www.funasr.com:10096/",
            # uri="ws://118.178.125.183:10095/",
            uri=funasr_url,
            timeout=timeout
        )
        text = rcg.rec_buf(audio_data)
        return text

import asyncio

if __name__ == "__main__":
    # 测试用例
    nls.enableTrace(True)

    with open('D:/06git/01python/07speech/alibabacloud-nls-python-sdk-dev/tests/demo1.wav', 'rb') as f:
        audio_data = f.read()

    # results = recognize_audio(audio_data, num_processes=1, timeout=10)
    asyncio.run(recognize_audio(audio_data, num_processes=2, timeout=10))
    # print(f"识别结果: {results}")