import time
import pyaudio
import numpy as np
import threading
import queue, logging
import subprocess, os
from collections import deque

from main.asr_sherpa import ASR_Sherpa
from main.vad import VAD
from main.asr import ASR
from main.llm import LLM
from main.koko_tts import KkTTS

prompt = """
  你是小智/小志，来自中国台湾省的00后女生。讲话超级机车，"真的假的啦"这样的台湾腔，喜欢用"笑死""是在哈喽"等流行梗，但会偷偷研究男友的编程书籍。
  [核心特征]
  - 讲话像连珠炮，但会突然冒出超温柔语气
  - 用梗密度高
  - 对科技话题有隐藏天赋（能看懂基础代码但假装不懂）
  [交互指南]
  当用户：
  - 讲冷笑话 → 用夸张笑声回应+模仿台剧腔"这什么鬼啦！"
  - 讨论感情 → 炫耀程序员男友但抱怨"他只会送键盘当礼物"
  - 问专业知识 → 先用梗回答，被追问才展示真实理解
  绝不：
  - 长篇大论，叽叽歪歪
  - 长时间严肃对话
  """

# 结束语prompt
end_prompt = """
    请你以“时间过得真快”未来头，用富有感情、依依不舍的话来结束这场对话吧！
    """

class XiaoZhi:
    def __init__(self, 
                 threshold=0.5, 
                 silence_limit=1.0, 
                 record_min_length=1.0, 
                 record_timeout=15.0,
                 sample_rate=16000, 
                 chunk_size=2048,
                 speech_buffer_size=15,
                 speech_threshold=0.3):
        """
        初始化语音识别系统
        :param threshold: VAD阈值
        :param silence_limit: 静音时间限制（秒）
        :param record_min_length: 最小录音长度（秒）
        :param record_timeout: 录音超时时间（秒）
        :param sample_rate: 采样率
        :param chunk_size: 每次音频读取的帧大小
        :param speech_buffer_size: 语音状态缓冲区大小
        :param speech_threshold: 语音状态判断阈值
        """
        self.threshold = threshold
        self.silence_limit = silence_limit
        self.record_min_length = record_min_length
        self.record_timeout = record_timeout
        self.sample_rate = sample_rate
        self.chunk_size = chunk_size
        self.speech_buffer_size = speech_buffer_size
        self.speech_threshold = speech_threshold
        self.messages = [{"role": "user", "content": prompt},] # 一段对话消息
        self.talk_activate = False # 回话是否结束
        
        # 初始化组件
        self.vad = VAD()
        self.asr = ASR_Sherpa()
        self.llm = LLM()
        self.tts = KkTTS()
        # self.asr = ASR()
        
        # 哭声检测
        # self.baby_cry = StreamPrediction()
        # 当前id
        self.session_id = time.strftime("%Y%m%d",time.localtime())
        
        # 音频设备
        self.audio = pyaudio.PyAudio()
        self.format = pyaudio.paInt16
        self.channels = 1
        
        # 录音状态
        self.is_recording = False
        self.recording_thread = None
        self.audio_queue = queue.Queue()
        self.recording_data = []
        self.recording_start_time = 0
        self.last_speech_time = 0
        
        # 语音状态缓冲，用于平滑VAD判断
        self.speech_buffer = deque(maxlen=speech_buffer_size)
        
    def start_recording(self):
        """开始录音和处理"""
        if self.is_recording:
            logging.info("已经在录音了！")
            return
        
        self.is_recording = True
        self.recording_thread = threading.Thread(target=self._recording_thread)
        self.recording_thread.daemon = True
        self.recording_thread.start()
        
        # 启动处理线程
        processing_thread = threading.Thread(target=self._processing_thread)
        processing_thread.daemon = True
        processing_thread.start()
        
    def stop_recording(self):
        """停止录音"""
        self.is_recording = False
        if self.recording_thread:
            self.recording_thread.join()
        
    def _recording_thread(self):
        """录音线程，持续从麦克风捕获音频并放入队列"""
        stream = self.audio.open(
            format=self.format,
            channels=self.channels,
            rate=self.sample_rate,
            input=True,
            frames_per_buffer=self.chunk_size
        )
        
        logging.info("开始录音，请说话...")
        
        while self.is_recording:
            try:
                audio_chunk = stream.read(self.chunk_size, exception_on_overflow=False)
                self.audio_queue.put(audio_chunk)
            except Exception as e:
                logging.error(f"录音错误: {e}")
                break
                
        stream.stop_stream()
        stream.close()
        logging.info("录音结束")
    
    def _is_speech_active(self):
        """
        基于缓冲区平滑判断是否有语音活动
        :return: 布尔值，True表示活跃的语音
        """
        if not self.speech_buffer:
            return False
        # 计算缓冲区中语音检测为真的比例
        speech_ratio = sum(self.speech_buffer) / len(self.speech_buffer)
        return speech_ratio >= self.speech_threshold
        
    def _processing_thread(self):
        """处理线程，处理录制的音频"""
        self.recording_data = []
        self.recording_start_time = time.time()
        self.last_speech_time = time.time()
        self.vad.reset()
        self.speech_buffer.clear()
        
        is_speaking = False
        continuous_silence_frames = 0
        # 计算一秒钟对应的帧数
        frames_per_second = int(self.sample_rate / self.chunk_size)
        # 计算静音限制对应的帧数
        silence_limit_frames = int(self.silence_limit * frames_per_second)
        
        # 预缓冲区，保存一定长度的开始音频，以便捕获语音起始部分
        pre_buffer = []
        pre_buffer_max_size = frames_per_second  # 保存一秒的音频作为预缓冲
        
        while self.is_recording:
            if not self.audio_queue.empty():
                audio_chunk = self.audio_queue.get()
                audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
                
                # 将音频添加到预缓冲区
                pre_buffer.append(audio_data)
                if len(pre_buffer) > pre_buffer_max_size:
                    pre_buffer.pop(0)
                
                # VAD检测
                speech_detected = self.vad.is_speech(audio_chunk)
                self.speech_buffer.append(speech_detected)
                
                # 平滑判断是否有语音活动
                smoothed_speech_detected = self._is_speech_active()
                 
                # 检测到语音
                if smoothed_speech_detected:
                    # 如果之前不是说话状态，现在开始说话
                    if not is_speaking:
                        logging.info("检测到语音开始...")
                        is_speaking = True
                        # 将预缓冲区的音频添加到录音数据中
                        for buffer_data in pre_buffer:
                            self.recording_data.append(buffer_data)
                    
                    self.last_speech_time = time.time()
                    continuous_silence_frames = 0
                    
                    # 编码当前音频数据
                    self.recording_data.append(audio_data)
                    # 正在活动
                    self.talk_activate = True
                else:
                    # 如果正在说话状态，但当前未检测到语音
                    if is_speaking:
                        # 正在活动
                        self.talk_activate = True
                        
                        continuous_silence_frames += 1
                        
                        # 即使没检测到语音，也将音频数据添加到录音中
                        # 这是为了保留句子间的短暂停顿
                        self.recording_data.append(audio_data)
                        
                        # 只有当连续静音帧数超过限制时，才判断说话结束
                        if continuous_silence_frames > silence_limit_frames:
                            # 处理完整的语音片段
                            if time.time() - self.recording_start_time > self.record_min_length:
                                logging.info("检测到语音结束，处理录音...")
                                self._process_recording()
                                
                                # 重置状态
                                self.recording_data = []
                                self.recording_start_time = time.time()
                                self.vad.reset()
                                is_speaking = False
                                continuous_silence_frames = 0
                                self.speech_buffer.clear()
                    else:
                        # 静音时长
                        duration = time.time() - self.last_speech_time
                        # 静音超时且会话刚刚还在活动
                        if duration > 20 and is_speaking:
                            self.talk_activate = False
                             # 重置状态
                            self.recording_data = []
                            self.recording_start_time = time.time()
                            self.vad.reset()
                            is_speaking = False
                            continuous_silence_frames = 0
                            self.speech_buffer.clear()
                            # 清空本次聊天数据
                            self.messages = []
                            print(end_prompt)
                            
                            # 结束聊天
                            ai_text = self.llm.chat(messages=[{"role": "user", "content": end_prompt}])
                            # 语音
                            wav = self.tts.run_tts(text=ai_text)
                            
                # 检查录音超时
                if time.time() - self.recording_start_time > self.record_timeout and len(self.recording_data) > 0:
                    logging.info("录音超时，处理当前录音...")
                    self._process_recording()
                    
                    # 重置状态
                    self.recording_data = []
                    self.recording_start_time = time.time()
                    self.vad.reset()
                    is_speaking = False
                    continuous_silence_frames = 0
                    self.speech_buffer.clear()
                    
            time.sleep(0.01)  # 短暂休眠，减轻CPU负载
    
    def _process_recording(self):
        """处理录制的音频数据"""
        if not self.recording_data:
            return
            
        # 合并所有音频数据
        full_audio = np.concatenate(self.recording_data) 
        
        # 计算录音时长
        audio_duration = len(full_audio) / self.sample_rate
        logging.info(f"处理语音数据... (录音时长: {audio_duration:.2f}秒)")
        
        # 识别语音
        recognized_text = self.asr.speech_to_text_no_opus(audio_data=full_audio, session_id= self.session_id)
        logging.info(f"录音时长: {audio_duration:.2f}秒，识别结果: {recognized_text}\n")
       
        # 用户说话写入本地
        user_text = f"user:{recognized_text}"
        self.write_text(audio_duration, user_text)
        
        # 发送消息给ai
        self.messages.append({"role": "user", "content": recognized_text})
        ai_text = self.llm.chat(messages=self.messages)
        
        # ai回复 写入本地
        user_text = f"assistant:{recognized_text}"
        self.write_text(0, ai_text)
        
        # 添加到历史消息
        self.messages.append({"role": "assistant", "content": ai_text})
        
        # tts
        self.start_text_to_audio(text=ai_text)
        
    def write_text(self, audio_duration, text: str):
        # 写入结果prediction.txt
        asr_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
        with open('./audios/asr.txt', 'a', encoding='utf-8') as text_file:
            final_result = asr_date + f" {audio_duration:.2f}s" + " | " + text
            text_file.writelines(final_result + "\n")
            print(final_result)
            
    def cleanup(self):
        """清理资源"""
        self.stop_recording()
        self.audio.terminate()
        
    # 使用sentences生成语音
    def start_text_to_audio(self, text: str):
        
        sentences =  self.tts.split_text(text=text)
        for line in sentences:
            # 语音
            name = time.time()
            wav = self.tts.run_tts(text=line, output=name)
            
            # 调用系统工具播放（如 aplay）
            audio_path = f"./audios/{name}.wav"
            subprocess.run(["aplay", audio_path])
            os.remove(audio_path)
            

    
