import asyncio
import json
import os
import threading
import time
from typing import Dict, Any

import dashscope
import pyaudio
from dashscope.audio.asr import *
from dotenv import load_dotenv
from fastapi import WebSocket

from audio.dashscope import ASR_Vocabulary
from config.logging_config import get_logger

# 加载环境变量
load_dotenv()

# 配置日志
logger = get_logger(__name__)

# DashScope 配置
if not dashscope.api_key:
    dashscope.api_key = os.getenv("DASHSCOPE_API_KEY")

if not dashscope.api_key:
    logger.warning("DashScope API Key 未配置，语音识别功能将无法使用")

target_model = "paraformer-realtime-v2"


class AudioRecognitionManager:
    """音频识别管理器 - 后端直接处理麦克风输入"""

    def __init__(self):
        self.active_sessions: Dict[str, Dict[str, Any]] = {}
        self.recognition_instances: Dict[str, Recognition] = {}
        self.main_loop = None  # 存储主事件循环
        self.audio_threads: Dict[str, threading.Thread] = {}  # 音频处理线程

    def set_main_loop(self, loop):
        """设置主事件循环"""
        self.main_loop = loop

    async def create_session(self, session_id: str, websocket: WebSocket, config: Dict[str, Any] = None):
        """创建音频识别会话"""
        if not config:
            config = {
                'model': target_model,
                'format': 'pcm',
                'sample_rate': 8000,  # 使用8000Hz采样率
                'language_hints': ['zh'],
                'channels': 1  # 单声道
            }

        # 获取当前事件循环
        current_loop = asyncio.get_running_loop()
        if not self.main_loop:
            self.main_loop = current_loop

        self.active_sessions[session_id] = {
            'websocket': websocket,
            'config': config,
            'is_active': True,
            'recognition_results': [],
            'mic': None,
            'stream': None,
            'recording_active': False,
            'recognition_ready': False  # 新增：识别引擎是否准备就绪
        }

        # 创建识别回调
        callback = WebSocketRecognitionCallback(session_id, self)

        # 创建识别实例
        vocabulary_id = ASR_Vocabulary.VOCABULARY_ID  # 获取热词表ID
        recognition_params = {
            'model': config.get('model', target_model),
            'format': config.get('format', 'pcm'),
            'sample_rate': config.get('sample_rate', 8000),
            'callback': callback
        }

        # 只有在热词表创建成功时才添加vocabulary_id参数
        if vocabulary_id:
            recognition_params['vocabulary_id'] = vocabulary_id
            logger.info(f"使用热词表: {vocabulary_id}")
        else:
            logger.warning("未使用热词表，将进行标准语音识别")

        recognition = Recognition(**recognition_params)

        self.recognition_instances[session_id] = recognition

        logger.info(f"音频识别会话 {session_id} 已创建")

    async def start_recording(self, session_id: str):
        """开始录音 - 后端直接处理麦克风"""
        if session_id not in self.active_sessions:
            raise ValueError(f"会话 {session_id} 不存在")

        session = self.active_sessions[session_id]
        if session['recording_active']:
            logger.warning(f"会话 {session_id} 已在录音中")
            return

        try:
            # 启动识别引擎
            recognition = self.recognition_instances[session_id]
            recognition.start()

            # 发送开始录音通知
            await self.send_message(session_id, {
                'type': 'recording_started',
                'message': '开始录音，后端正在监听麦克风...'
            })

            logger.info(f"会话 {session_id} 识别引擎已启动")

        except Exception as e:
            logger.error(f"启动录音失败: {e}")
            await self.send_message(session_id, {
                'type': 'error',
                'message': f'启动录音失败: {str(e)}'
            })

    async def stop_recording(self, session_id: str):
        """停止录音"""
        if session_id not in self.active_sessions:
            return

        session = self.active_sessions[session_id]
        session['recording_active'] = False
        session['recognition_ready'] = False

        # 停止识别
        if session_id in self.recognition_instances:
            try:
                self.recognition_instances[session_id].stop()
                logger.info(f"会话 {session_id} 识别已停止")
            except Exception as e:
                logger.error(f"停止识别失败: {e}")

        # 发送停止录音通知
        await self.send_message(session_id, {
            'type': 'recording_stopped',
            'message': '录音已停止'
        })

    async def send_message(self, session_id: str, message: Dict[str, Any]):
        """向WebSocket客户端发送消息"""
        if session_id in self.active_sessions:
            session = self.active_sessions[session_id]
            if session['is_active']:
                try:
                    await session['websocket'].send_text(json.dumps(message, ensure_ascii=False))
                except Exception as e:
                    logger.error(f"发送WebSocket消息失败: {e}")
                    await self.close_session(session_id)

    def send_message_threadsafe(self, session_id: str, message: Dict[str, Any]):
        """线程安全的发送消息方法"""
        if self.main_loop and not self.main_loop.is_closed():
            try:
                # 使用call_soon_threadsafe在主事件循环中调度协程
                self.main_loop.call_soon_threadsafe(
                    lambda: asyncio.create_task(self.send_message(session_id, message))
                )
            except Exception as e:
                logger.error(f"线程安全发送消息失败: {e}")
        else:
            logger.error("主事件循环不可用，无法发送消息")

    async def close_session(self, session_id: str):
        """关闭音频识别会话"""
        if session_id in self.active_sessions:
            session = self.active_sessions[session_id]
            session['is_active'] = False
            session['recording_active'] = False
            session['recognition_ready'] = False

            # 清理pyaudio资源
            if session['stream']:
                try:
                    session['stream'].stop_stream()
                    session['stream'].close()
                except Exception as e:
                    logger.error(f"关闭音频流失败: {e}")

            if session['mic']:
                try:
                    session['mic'].terminate()
                except Exception as e:
                    logger.error(f"释放麦克风失败: {e}")

            del self.active_sessions[session_id]

        if session_id in self.recognition_instances:
            try:
                self.recognition_instances[session_id].stop()
            except Exception as e:
                logger.error(f"停止识别实例失败: {e}")
            del self.recognition_instances[session_id]

        # 清理音频处理线程
        if session_id in self.audio_threads:
            del self.audio_threads[session_id]

        logger.info(f"音频识别会话 {session_id} 已关闭")


class WebSocketRecognitionCallback(RecognitionCallback):
    """WebSocket音频识别回调 - 集成麦克风处理"""

    def __init__(self, session_id: str, manager: AudioRecognitionManager):
        self.session_id = session_id
        self.manager = manager

    def on_open(self) -> None:
        """识别开启时，初始化麦克风并开始录音"""
        logger.info(f"音频识别会话 {self.session_id} 已打开，开始初始化麦克风")

        try:
            session = self.manager.active_sessions.get(self.session_id)
            if not session:
                logger.error(f"会话 {self.session_id} 不存在")
                return

            config = session['config']

            # 初始化pyaudio麦克风
            mic = pyaudio.PyAudio()

            stream = mic.open(
                format=pyaudio.paInt16,
                channels=config.get('channels', 1),
                rate=config.get('sample_rate', 8000),
                input=True,
                frames_per_buffer=1600  # 减少缓冲区大小，提高实时性
            )

            # 保存到会话中
            session['mic'] = mic
            session['stream'] = stream
            session['recognition_ready'] = True  # 标记识别引擎准备就绪
            session['recording_active'] = True  # 开始录音

            # 发送开始消息
            self.manager.send_message_threadsafe(self.session_id, {
                'type': 'recognition_started',
                'message': '语音识别已开始，麦克风已准备就绪'
            })

            # 启动音频读取线程
            audio_thread = threading.Thread(
                target=self._audio_capture_loop,
                daemon=True
            )
            self.manager.audio_threads[self.session_id] = audio_thread
            audio_thread.start()

            logger.info(f"麦克风初始化成功，开始音频捕获: {self.session_id}")

        except Exception as e:
            logger.error(f"初始化麦克风失败: {e}")
            self.manager.send_message_threadsafe(self.session_id, {
                'type': 'error',
                'message': f'麦克风初始化失败: {str(e)}'
            })

    def _audio_capture_loop(self):
        """音频捕获循环（在独立线程中运行）"""
        logger.info(f"开始音频捕获循环: {self.session_id}")

        session = self.manager.active_sessions.get(self.session_id)
        if not session:
            return

        stream = session['stream']
        recognition = self.manager.recognition_instances.get(self.session_id)

        if not stream or not recognition:
            logger.error(f"音频流或识别实例未初始化: {self.session_id}")
            return

        try:
            audio_chunk_count = 0
            while session['recording_active'] and session['is_active'] and session['recognition_ready']:
                try:
                    # 读取音频数据
                    data = stream.read(1600, exception_on_overflow=False)  # 100ms的数据

                    # 发送音频帧到识别引擎
                    recognition.send_audio_frame(data)

                    audio_chunk_count += 1

                    # 每50个chunk记录一次日志（约5秒）
                    if audio_chunk_count % 50 == 0:
                        logger.debug(f"已发送 {audio_chunk_count} 个音频chunk: {self.session_id}")

                    # 短暂休眠，避免过度占用CPU
                    time.sleep(0.01)

                except Exception as e:
                    if session['recording_active']:  # 只有在应该录音时才报错
                        logger.error(f"读取音频数据失败: {e}")
                    break

        except Exception as e:
            logger.error(f"音频捕获循环异常: {e}")
        finally:
            logger.info(f"音频捕获循环结束: {self.session_id}")

    def on_close(self) -> None:
        """识别关闭时，清理麦克风资源"""
        logger.info(f"音频识别会话 {self.session_id} 已关闭，清理资源")

        session = self.manager.active_sessions.get(self.session_id)
        if session:
            session['recording_active'] = False
            session['recognition_ready'] = False

            # 清理pyaudio资源（仿照test.py）
            if session['stream']:
                try:
                    session['stream'].stop_stream()
                    session['stream'].close()
                    session['stream'] = None
                except Exception as e:
                    logger.error(f"关闭音频流失败: {e}")

            if session['mic']:
                try:
                    session['mic'].terminate()
                    session['mic'] = None
                except Exception as e:
                    logger.error(f"释放麦克风失败: {e}")

        self.manager.send_message_threadsafe(self.session_id, {
            'type': 'recognition_ended',
            'message': '语音识别已结束，麦克风已释放'
        })

    def on_complete(self) -> None:
        logger.info(f"音频识别会话 {self.session_id} 识别完成")
        self.manager.send_message_threadsafe(self.session_id, {
            'type': 'recognition_complete',
            'message': '语音识别完成'
        })

    def on_error(self, message) -> None:
        logger.error(f"音频识别错误 {self.session_id}: {message}")
        self.manager.send_message_threadsafe(self.session_id, {
            'type': 'error',
            'message': f'识别错误: {message.message if hasattr(message, "message") else str(message)}',
            'request_id': getattr(message, 'request_id', None)
        })

    def on_event(self, result: RecognitionResult) -> None:
        """处理识别结果（核心逻辑）"""
        try:
            sentence = result.get_sentence()
            if 'text' in sentence and sentence['text'].strip():
                # 识别结果
                result_data = {
                    'type': 'recognition_result',
                    'text': sentence['text'],
                    'is_final': RecognitionResult.is_sentence_end(sentence),
                    'confidence': sentence.get('confidence', 0),
                    'begin_time': sentence.get('begin_time', 0),
                    'end_time': sentence.get('end_time', 0),
                    'request_id': result.get_request_id(),
                    'usage': result.get_usage(sentence) if RecognitionResult.is_sentence_end(sentence) else None
                }

                # 保存识别结果到队列
                self.manager.active_sessions[self.session_id]['recognition_results'].append(result_data)

                # 实时发送结果给前端
                self.manager.send_message_threadsafe(self.session_id, result_data)

                # 处理状态变化
                if RecognitionResult.is_sentence_end(sentence):
                    logger.info(f"句子识别完成 {self.session_id}: {sentence['text']}")
                    usage = result.get_usage(sentence)
                    if usage:
                        logger.info(f"识别用量信息: {usage}")
                else:
                    logger.debug(f"实时识别结果 {self.session_id}: {sentence['text']}")

        except Exception as e:
            logger.error(f"处理识别结果失败: {e}")
            self.manager.send_message_threadsafe(self.session_id, {
                'type': 'error',
                'message': f'处理识别结果失败: {str(e)}'
            })


# 全局音频识别管理器
audio_manager = AudioRecognitionManager()

# 确保模块被导入时正确初始化
logger.info("音频识别管理器已初始化")
