#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
实时音频采集模块

提供实时音频录制、语音活动检测、音频预处理等功能
支持与现有ASR服务的无缝集成
"""

import pyaudio
import wave
import numpy as np
import threading
import time
import os
import tempfile
import requests
from pathlib import Path
from typing import Optional, Callable, Dict, Any
import queue
import json

# 处理相对导入和直接运行的兼容性
try:
    from .asr_service import MinimalASR
    from .llm_client import LLMClient
except ImportError:
    from asr_service import MinimalASR
    from llm_client import LLMClient


class VoiceActivityDetector:
    """
    语音活动检测器 (VAD)
    基于音量阈值和静音检测实现简单但有效的VAD
    """
    
    def __init__(self, 
                 volume_threshold: float = 0.01,
                 silence_duration: float = 2.0,
                 min_speech_duration: float = 0.5):
        """
        初始化VAD
        
        Args:
            volume_threshold: 音量阈值，低于此值认为是静音
            silence_duration: 静音持续时间阈值（秒）
            min_speech_duration: 最小语音持续时间（秒）
        """
        self.volume_threshold = volume_threshold
        self.silence_duration = silence_duration
        self.min_speech_duration = min_speech_duration
        
        # 状态跟踪
        self.is_speaking = False
        self.silence_start_time = None
        self.speech_start_time = None
        self.last_volume = 0.0
        
    def process_audio_chunk(self, audio_data: np.ndarray) -> Dict[str, Any]:
        """
        处理音频块并返回VAD结果
        
        Args:
            audio_data: 音频数据 (numpy array)
            
        Returns:
            VAD结果字典
        """
        # 计算音量 (RMS)
        volume = float(np.sqrt(np.mean(audio_data ** 2)))  # 确保转换为Python原生float
        self.last_volume = volume
        
        current_time = time.time()
        
        # 检测语音开始
        if not self.is_speaking and volume > self.volume_threshold:
            self.is_speaking = True
            self.speech_start_time = current_time
            self.silence_start_time = None
            return {
                "event": "speech_start",
                "volume": volume,
                "timestamp": current_time
            }
        
        # 检测静音开始
        elif self.is_speaking and volume <= self.volume_threshold:
            if self.silence_start_time is None:
                self.silence_start_time = current_time
            
            # 检查静音持续时间
            silence_duration = current_time - self.silence_start_time
            if silence_duration >= self.silence_duration:
                # 检查语音持续时间是否足够
                if self.speech_start_time and (current_time - self.speech_start_time) >= self.min_speech_duration:
                    self.is_speaking = False
                    self.silence_start_time = None
                    return {
                        "event": "speech_end",
                        "volume": volume,
                        "timestamp": current_time,
                        "speech_duration": current_time - self.speech_start_time
                    }
                else:
                    # 语音太短，重置状态
                    self.is_speaking = False
                    self.silence_start_time = None
                    self.speech_start_time = None
                    return {
                        "event": "speech_too_short",
                        "volume": volume,
                        "timestamp": current_time
                    }
        
        # 继续语音中
        elif self.is_speaking and volume > self.volume_threshold:
            self.silence_start_time = None  # 重置静音计时
        
        return {
            "event": "continue",
            "volume": volume,
            "timestamp": current_time,
            "is_speaking": self.is_speaking
        }


class RealTimeAudioCapture:
    """
    实时音频采集器
    
    功能：
    1. 实时麦克风音频采集
    2. 语音活动检测 (VAD)
    3. 音频预处理和格式转换
    4. 与ASR服务集成
    5. 音频文件管理
    """
    
    def __init__(self, 
                 sample_rate: int = 16000,
                 channels: int = 1,
                 chunk_size: int = 1024,
                 audio_format: int = pyaudio.paInt16,
                 temp_dir: str = "./temp_audio"):
        """
        初始化音频采集器
        
        Args:
            sample_rate: 采样率 (Hz)
            channels: 声道数
            chunk_size: 音频块大小
            audio_format: 音频格式
            temp_dir: 临时音频文件目录
        """
        # 音频参数
        self.sample_rate = sample_rate
        self.channels = channels
        self.chunk_size = chunk_size
        self.audio_format = audio_format
        
        # 临时文件管理
        self.temp_dir = Path(temp_dir)
        self.temp_dir.mkdir(exist_ok=True)
        
        # PyAudio实例
        self.audio = pyaudio.PyAudio()
        self.stream = None
        
        # VAD实例
        self.vad = VoiceActivityDetector()
        
        # 录音状态
        self.is_recording = False
        self.is_listening = False
        self.recording_thread = None
        
        # 音频数据缓冲
        self.audio_buffer = queue.Queue()
        self.current_recording = []
        
        # 回调函数
        self.on_speech_start: Optional[Callable] = None
        self.on_speech_end: Optional[Callable] = None
        self.on_audio_data: Optional[Callable] = None
        
        print(f"🎤 实时音频采集器初始化完成")
        print(f"   采样率: {sample_rate}Hz, 声道: {channels}, 块大小: {chunk_size}")
        print(f"   临时目录: {self.temp_dir}")
    
    def list_audio_devices(self):
        """列出可用的音频设备"""
        print("🎧 可用音频设备:")
        for i in range(self.audio.get_device_count()):
            device_info = self.audio.get_device_info_by_index(i)
            if device_info['maxInputChannels'] > 0:  # 输入设备
                print(f"  设备 {i}: {device_info['name']} (输入声道: {device_info['maxInputChannels']})")
    
    def start_listening(self, device_index: Optional[int] = None):
        """
        开始监听音频
        
        Args:
            device_index: 音频设备索引，None为默认设备
        """
        if self.is_listening:
            print("⚠️ 已在监听中")
            return
        
        try:
            # 打开音频流
            self.stream = self.audio.open(
                format=self.audio_format,
                channels=self.channels,
                rate=self.sample_rate,
                input=True,
                input_device_index=device_index,
                frames_per_buffer=self.chunk_size,
                stream_callback=self._audio_callback
            )
            
            self.is_listening = True
            self.stream.start_stream()
            
            print(f"🎤 开始音频监听...")
            if device_index is not None:
                device_info = self.audio.get_device_info_by_index(device_index)
                print(f"   使用设备: {device_info['name']}")
            
        except Exception as e:
            print(f"❌ 启动音频监听失败: {e}")
            raise
    
    def stop_listening(self):
        """停止监听音频"""
        if not self.is_listening:
            return
        
        self.is_listening = False
        
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            self.stream = None
        
        print("🔇 音频监听已停止")
    
    def _audio_callback(self, in_data, frame_count, time_info, status):
        """音频流回调函数"""
        if not self.is_listening:
            return (None, pyaudio.paComplete)
        
        # 转换音频数据
        audio_data = np.frombuffer(in_data, dtype=np.int16).astype(np.float32) / 32768.0
        
        # VAD处理
        vad_result = self.vad.process_audio_chunk(audio_data)
        
        # 处理VAD事件
        self._handle_vad_event(vad_result, in_data)
        
        # 调用用户回调
        if self.on_audio_data:
            self.on_audio_data(audio_data, vad_result)
        
        return (in_data, pyaudio.paContinue)
    
    def _handle_vad_event(self, vad_result: Dict[str, Any], audio_data: bytes):
        """处理VAD事件"""
        event = vad_result["event"]
        
        if event == "speech_start":
            print(f"🗣️ 检测到语音开始 (音量: {vad_result['volume']:.4f})")
            self.is_recording = True
            self.current_recording = [audio_data]
            
            if self.on_speech_start:
                self.on_speech_start(vad_result)
        
        elif event == "speech_end":
            print(f"🔇 检测到语音结束 (持续: {vad_result['speech_duration']:.2f}秒)")
            self.is_recording = False
            
            # 保存录音
            if self.current_recording:
                audio_file = self._save_recording(self.current_recording)
                if audio_file and self.on_speech_end:
                    self.on_speech_end(vad_result, audio_file)
            
            self.current_recording = []
        
        elif event == "continue" and self.is_recording:
            # 继续录音
            self.current_recording.append(audio_data)
        
        elif event == "speech_too_short":
            print(f"⚠️ 语音太短，忽略")
            self.is_recording = False
            self.current_recording = []
    
    def _save_recording(self, audio_chunks: list) -> Optional[str]:
        """
        保存录音到文件
        
        Args:
            audio_chunks: 音频数据块列表
            
        Returns:
            保存的文件路径
        """
        try:
            # 生成临时文件名
            timestamp = int(time.time() * 1000)
            filename = f"recording_{timestamp}.wav"
            filepath = self.temp_dir / filename
            
            # 保存WAV文件
            with wave.open(str(filepath), 'wb') as wf:
                wf.setnchannels(self.channels)
                wf.setsampwidth(self.audio.get_sample_size(self.audio_format))
                wf.setframerate(self.sample_rate)
                wf.writeframes(b''.join(audio_chunks))
            
            print(f"💾 录音已保存: {filepath}")
            return str(filepath)
            
        except Exception as e:
            print(f"❌ 保存录音失败: {e}")
            return None
    
    def record_fixed_duration(self, duration: float, device_index: Optional[int] = None) -> Optional[str]:
        """
        录制固定时长的音频
        
        Args:
            duration: 录制时长（秒）
            device_index: 音频设备索引
            
        Returns:
            录音文件路径
        """
        print(f"🎤 开始录制 {duration} 秒音频...")
        
        try:
            # 打开音频流
            stream = self.audio.open(
                format=self.audio_format,
                channels=self.channels,
                rate=self.sample_rate,
                input=True,
                input_device_index=device_index,
                frames_per_buffer=self.chunk_size
            )
            
            # 计算需要录制的帧数
            frames_to_record = int(self.sample_rate * duration / self.chunk_size)
            audio_chunks = []
            
            print("   录制中...")
            for i in range(frames_to_record):
                data = stream.read(self.chunk_size)
                audio_chunks.append(data)
                
                # 显示进度
                progress = (i + 1) / frames_to_record * 100
                if i % (frames_to_record // 10) == 0:  # 每10%显示一次
                    print(f"   进度: {progress:.0f}%")
            
            stream.stop_stream()
            stream.close()
            
            # 保存录音
            filepath = self._save_recording(audio_chunks)
            print(f"✅ 录制完成: {filepath}")
            return filepath
            
        except Exception as e:
            print(f"❌ 录制失败: {e}")
            return None
    
    def upload_audio_to_tos(self, audio_file: str) -> Optional[str]:
        """
        上传音频文件到TOS
        
        Args:
            audio_file: 本地音频文件路径
            
        Returns:
            上传后的CDN URL
        """
        try:
            import os
            
            # 导入TOS配置
            try:
                from config.tos_config import TOSConfig
            except ImportError:
                # 如果导入失败，返回本地路径作为fallback
                print("⚠️ 无法导入TOS配置，使用本地路径")
                return f"file://{os.path.abspath(audio_file)}"
            
            print(f"📤 上传音频文件到TOS: {audio_file}")
            
            # 检查文件是否存在
            if not os.path.exists(audio_file):
                print(f"❌ 音频文件不存在: {audio_file}")
                return None
            
            # 创建TOS配置和客户端
            tos_config = TOSConfig()
            tos_client = tos_config.create_tos_client()
            
            if not tos_client:
                print("❌ TOS客户端创建失败")
                return None
            
            # 获取文件名并生成对象键
            file_name = os.path.basename(audio_file)
            # 为音频文件添加特殊的文件夹前缀
            audio_folder = "voice_audio"
            object_key = tos_config.get_full_object_key(file_name, audio_folder)
            
            print(f"   上传到: {object_key}")
            
            # 上传文件到TOS
            with open(audio_file, 'rb') as file_data:
                upload_response = tos_client.put_object(
                    bucket=tos_config.bucket_name,
                    key=object_key,
                    content=file_data
                )
            
            # 检查上传结果
            if upload_response.status_code == 200:
                # 生成CDN访问URL
                cdn_url = tos_config.get_cdn_url(object_key)
                print(f"✅ 音频上传成功: {cdn_url}")
                return cdn_url
            else:
                print(f"❌ 音频上传失败，状态码: {upload_response.status_code}")
                return None
                
        except Exception as e:
            print(f"❌ 上传音频失败: {e}")
            # 发生错误时，返回本地路径作为fallback
            import os
            return f"file://{os.path.abspath(audio_file)}"
    
    def cleanup_temp_files(self, keep_recent: int = 5):
        """
        清理临时音频文件
        
        Args:
            keep_recent: 保留最近的文件数量
        """
        try:
            audio_files = list(self.temp_dir.glob("recording_*.wav"))
            audio_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
            
            # 删除旧文件
            for file_path in audio_files[keep_recent:]:
                file_path.unlink()
                print(f"🗑️ 删除旧录音: {file_path.name}")
            
            print(f"🧹 临时文件清理完成，保留最近 {min(len(audio_files), keep_recent)} 个文件")
            
        except Exception as e:
            print(f"❌ 清理临时文件失败: {e}")
    
    def set_vad_parameters(self, 
                          volume_threshold: Optional[float] = None,
                          silence_duration: Optional[float] = None,
                          min_speech_duration: Optional[float] = None):
        """
        设置VAD参数
        
        Args:
            volume_threshold: 音量阈值
            silence_duration: 静音持续时间阈值
            min_speech_duration: 最小语音持续时间
        """
        if volume_threshold is not None:
            self.vad.volume_threshold = volume_threshold
        if silence_duration is not None:
            self.vad.silence_duration = silence_duration
        if min_speech_duration is not None:
            self.vad.min_speech_duration = min_speech_duration
        
        print(f"🔧 VAD参数已更新:")
        print(f"   音量阈值: {self.vad.volume_threshold}")
        print(f"   静音时长: {self.vad.silence_duration}s")
        print(f"   最小语音时长: {self.vad.min_speech_duration}s")
    
    def get_audio_stats(self) -> Dict[str, Any]:
        """获取音频统计信息"""
        return {
            "is_listening": self.is_listening,
            "is_recording": self.is_recording,
            "current_volume": float(self.vad.last_volume),  # 确保转换为Python原生float
            "is_speaking": self.vad.is_speaking,
            "sample_rate": self.sample_rate,
            "channels": self.channels,
            "temp_files_count": len(list(self.temp_dir.glob("recording_*.wav")))
        }
    
    def __del__(self):
        """析构函数"""
        self.stop_listening()
        if hasattr(self, 'audio'):
            self.audio.terminate()


class VoiceControlledASR:
    """
    语音控制ASR服务
    集成实时音频采集和ASR识别
    """
    
    def __init__(self, asr_service: Optional[MinimalASR] = None):
        """
        初始化语音控制ASR
        
        Args:
            asr_service: ASR服务实例，None则创建新实例
        """
        self.audio_capture = RealTimeAudioCapture()
        self.asr_service = asr_service or MinimalASR()
        
        # 设置回调
        self.audio_capture.on_speech_end = self._on_speech_end
        
        print("🎯 语音控制ASR服务初始化完成")
    
    def _on_speech_end(self, vad_result: Dict[str, Any], audio_file: str):
        """语音结束回调"""
        print(f"🔄 开始处理录音: {audio_file}")
        
        # 上传音频文件 (这里需要集成实际的上传逻辑)
        audio_url = self.audio_capture.upload_audio_to_tos(audio_file)
        
        if audio_url:
            # 进行ASR识别
            segments = self.asr_service.recognize(audio_url)
            
            if segments:
                # 获取识别文本
                text = self.asr_service.get_full_text(segments)
                print(f"🗣️ 识别结果: {text}")
                
                # 这里可以继续调用指令处理服务
                self._process_recognized_text(text, segments)
            else:
                print("❌ ASR识别失败")
        else:
            print("❌ 音频上传失败")
    
    def _process_recognized_text(self, text: str, segments: list):
        """
        处理识别到的文本
        
        Args:
            text: 识别的文本
            segments: ASR段落信息
        """
        # TODO: 这里可以集成您现有的IntelligentCommandService
        print(f"📝 待处理文本: {text}")
        print(f"📊 识别段落数: {len(segments)}")
    
    def start_voice_control(self, device_index: Optional[int] = None):
        """启动语音控制"""
        print("🚀 启动语音控制模式...")
        self.audio_capture.start_listening(device_index)
        
        try:
            print("🎤 语音控制已启动，开始说话...")
            print("   按 Ctrl+C 退出")
            
            while True:
                time.sleep(0.1)
                
        except KeyboardInterrupt:
            print("\n👋 退出语音控制")
            self.stop_voice_control()
    
    def stop_voice_control(self):
        """停止语音控制"""
        self.audio_capture.stop_listening()
        print("🔇 语音控制已停止")


def main():
    """测试和演示函数"""
    print("🧪 实时音频采集模块测试")
    print("=" * 50)
    
    # 创建音频采集器
    capture = RealTimeAudioCapture()
    
    # 列出音频设备
    capture.list_audio_devices()
    
    print("\n选择测试模式:")
    print("1. 固定时长录音测试")
    print("2. 实时VAD测试")
    print("3. 语音控制ASR测试")
    
    try:
        choice = input("请输入选择 (1-3): ").strip()
        
        if choice == "1":
            # 固定时长录音测试
            duration = float(input("录音时长(秒): ") or "3")
            audio_file = capture.record_fixed_duration(duration)
            if audio_file:
                print(f"✅ 录音完成: {audio_file}")
        
        elif choice == "2":
            # 实时VAD测试
            def on_speech_start(vad_result):
                print(f"🟢 语音开始!")
            
            def on_speech_end(vad_result, audio_file):
                print(f"🔴 语音结束! 文件: {audio_file}")
            
            capture.on_speech_start = on_speech_start
            capture.on_speech_end = on_speech_end
            
            capture.start_listening()
            
            print("🎤 实时VAD测试中... 按 Ctrl+C 退出")
            while True:
                time.sleep(0.1)
        
        elif choice == "3":
            # 语音控制ASR测试
            voice_asr = VoiceControlledASR()
            voice_asr.start_voice_control()
        
        else:
            print("❌ 无效选择")
    
    except KeyboardInterrupt:
        print("\n👋 测试结束")
    except Exception as e:
        print(f"❌ 测试失败: {e}")
    finally:
        capture.stop_listening()
        capture.cleanup_temp_files()


if __name__ == "__main__":
    main()
