import io
import wave
import pyaudio
import socket
import numpy as np
import time
import threading
from collections import deque
import tool.config.config as cfg
from framework.frontend.speech_recognition_model import VoskSpeechRcognitionModel
from framework.frontend.speech_recognition_model import FunasrSpeechRcognitionModel
from framework.frontend.speech_recognition_model import TencentCloudRcognition

config = cfg.get_config("config.toml")

class VoiceRecorder:
    def __init__(self):
        self.audio = pyaudio.PyAudio()
        self.stream = None
        self.recording = False
        self.audio_data = []
        self.silence_counter = 0
        self.SAMPLE_RATE = int(config["AUDIO"]["SAMPLE_RATE"])
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = int(config['AUDIO']['CHANNELS'])             # 单声道
        self.CHUNK_SIZE = int(config['AUDIO']['CHUNK_SIZE'])        # 每次读取的数据块大小
        # 静音检测参数
        self.SILENCE_THRESHOLD = float(config['AUDIO']['SILENCE_THRESHOLD'])  # 音量阈值（根据环境调整）
        self.SILENCE_DURATION = float(config['AUDIO']['SILENCE_DURATION'])   # 持续静音时间（秒）后停止
        self.BUFFER_DURATION = float(config['AUDIO']['BUFFER_DURATION'])    # 音频数据保留时间（秒）防止切断语音尾音
        # 网络传输设置
        # self.SERVER_IP = config['AUDIO']['SERVER_IP']  # 服务端IP地址
        # self.SERVER_PORT = config['AUDIO']['SERVER_PORT']       # 服务端端口号
        self.silence_blocks = int(self.SILENCE_DURATION * self.SAMPLE_RATE / self.CHUNK_SIZE)
        self.buffer_blocks = int(self.BUFFER_DURATION * self.SAMPLE_RATE / self.CHUNK_SIZE)
        self.buffer = deque(maxlen=self.buffer_blocks)
        self.query_text = ""
        self.is_start_say = False
        # self.speech_recognition_model = VoskSpeechRcognitionModel()
        # self.speech_recognition_model = FunasrSpeechRcognitionModel()
        self.speech_recognition_model = TencentCloudRcognition()

    def pcm_to_wav(self, pcm_bytes):
        """将 PCM 数据转换为 WAV 格式（二进制数据）"""
        buffer = io.BytesIO()
        wf = wave.open(buffer, 'wb')
        wf.setnchannels(self.CHANNELS)
        wf.setsampwidth(self.audio.get_sample_size(self.FORMAT))
        wf.setframerate(self.SAMPLE_RATE)
        wf.writeframes(pcm_bytes)
        wf.close()
        return buffer.getvalue()
        
    def start_recording(self):
        """开始录音"""
        if self.recording:
            return
            
        print("🎤 录音启动 - 等待说话...")
        self.recording = True
        self.audio_data = []
        self.silence_counter = 0
        self.buffer.clear()
        self.is_start_say=False
        
        # 打开音频流
        device_index = 1
        dev_info = self.audio.get_device_info_by_index(device_index)  # 输入设备索引
        self.SAMPLE_RATE = int(dev_info['defaultSampleRate'])  # 强制匹配设备采样率
        self.CHANNELS = min(1, dev_info['maxInputChannels'])  # 确保声道数有效

        self.stream = self.audio.open(
            format=pyaudio.paInt16,
            channels=self.CHANNELS,
            rate=self.SAMPLE_RATE,
            input=True,
            input_device_index=device_index,
            frames_per_buffer=512  # 降低缓冲区大小避免过载
        )
        
        # 启动录音线程
        th = threading.Thread(target=self._record_loop, daemon=True)
        th.start()
        th.join()
        return self.query_text
    
    def stop_recording(self):
        """停止录音并发送数据"""
        if not self.recording:
            return
        
        self.recording = False
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            self.stream = None
        
        # 将缓冲区数据加入最终音频数据
        self.audio_data.extend(self.buffer)
        
        if self.audio_data:
            print("🛑 录音已停止")
            return self._send_audio()
        
    def _record_loop(self):
        """录音主循环"""
        try:
            while self.recording:
                # 读取音频数据
                data = self.stream.read(self.CHUNK_SIZE, exception_on_overflow=False)
                audio_array = np.frombuffer(data, dtype=np.int16)
                
                # 计算音量 (RMS)
                if len(audio_array) > 0:
                    # 计算平方和
                    sum_sq = np.sum(audio_array.astype(np.float64) ** 2)
                    
                    # 避免除以零和负值
                    if sum_sq > 0:
                        rms = np.sqrt(sum_sq / len(audio_array))
                    else:
                        rms = 0.0
                else:
                    rms = 0.0
                print(f"当前音量: {rms:.2f}/{self.SILENCE_THRESHOLD}",end="\r")  # 调试使用

                if self.is_start_say==False and rms>=self.SILENCE_THRESHOLD:#第一次出现人声
                    self.is_start_say=True
                    self.buffer.append(data)
                    self.audio_data.append(data)
                elif self.is_start_say==True:
                    # 静音检测逻辑
                    if rms < self.SILENCE_THRESHOLD:
                        self.silence_counter += 1
                    else:
                        # 检测到声音，重置静音计时器
                        self.silence_counter = 0
                    
                    # 添加到环形缓冲区
                    self.buffer.append(data)
                    
                    # 如果持续静音时间达到阈值，停止录音
                    if self.silence_counter >= self.silence_blocks:
                        return self.stop_recording()
                    
                    # 添加到主音频数据（仅当不是静音时）
                    if self.silence_counter == 0:
                        # self.speech_recognition_model.accept_wave(data)
                        self.audio_data.append(data)
        
        except Exception as e:
            print(f"录音出错: {e}")
            self.recording = False
    
    def _send_audio(self):
        """发送音频数据到服务器"""
        try:
            # 合并所有音频数据
            full_audio = b''.join(self.audio_data)
            data_length = len(full_audio)
            print(f"📡 准备发送音频数据 ({data_length} 字节)")
            wav_bytes = self.pcm_to_wav(full_audio)
            # self.speech_recognition_model.accept_wave(full_audio)
            # self.query_text = self.speech_recognition_model.get_results()
            # return
            if cfg.get_config("sys.toml")["AUDIO"]["IS_LOCAL"]=="True":
                recognition_result=self.speech_recognition_model.speech_recognition(wav_bytes)
                self.query_text = recognition_result

            elif cfg.get_config("sys.toml")["AUDIO"]["IS_LOCAL"]=="False":
                recognition_result=self.speech_recognition_model.speech_recognition(wav_bytes)
                self.query_text = recognition_result
            return
        except Exception as e:
            print(f"音频识别失败: {e}")

# if __name__=="__main__":
#     V=VoiceRecorder()
#     V._send_audio()