import json
import queue
import threading
import time
import wave, os
import numpy as np
from datetime import datetime
import vosk
import pyaudio
import requests
import pyttsx3
import keyboard

# ================= 配置区域 =================
DEEPSEEK_API_KEY = "sk-你的API KEY"
VOSK_MODEL_PATH = "models/vosk-model-small-cn-0.22"
WAKE_WORD = "你好" #唤醒词
HISTORY_FILE = "conversation_history.json"
SAMPLE_RATE = 16000
CHANNELS = 1
SILENCE_THRESHOLD = 1.5
# ============================================

# 初始化VOSK语音识别模型
vosk_model = vosk.Model(VOSK_MODEL_PATH)
recognizer = vosk.KaldiRecognizer(vosk_model, SAMPLE_RATE)

# 初始化音频输入
audio = pyaudio.PyAudio()
audio_queue = queue.Queue()

# 修改后的语音合成管理器（解决单例模式问题）
class TTSManager:
    _engine_lock = threading.Lock()
    
    @classmethod
    def speak(cls, text):
        def _speak_task():
            with cls._engine_lock:  # 全局互斥锁
                try:
                    engine = pyttsx3.init()
                    # 配置语音参数
                    engine.setProperty('rate', 190)
                    engine.setProperty('volume', 0.9)
                    voices = engine.getProperty('voices')
                    for v in voices:
                        if 'zh' in v.id.lower() or 'chinese' in v.name.lower():
                            engine.setProperty('voice', v.id)
                            break
                    engine.say(text)
                    engine.runAndWait()
                    engine.stop()  # 显式停止引擎
                except Exception as e:
                    print(f"语音合成失败: {str(e)}")
        
        threading.Thread(target=_speak_task, daemon=True).start()

# DeepSeek API配置
DEEPSEEK_URL = "https://api.deepseek.com/chat/completions"
HEADERS = {
    "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
    "Content-Type": "application/json"
}

# 对话历史管理
conversation_history = []
if os.path.exists(HISTORY_FILE):
    with open(HISTORY_FILE, "r") as f:
        conversation_history = json.load(f)

def save_conversation():
    with open(HISTORY_FILE, "w") as f:
        json.dump(conversation_history, f, ensure_ascii=False, indent=2)

def audio_callback(in_data, frame_count, time_info, status):
    audio_queue.put(in_data)
    return (None, pyaudio.paContinue)

def listen_wake_word():
    print("等待唤醒词...")
    stream = audio.open(
        format=pyaudio.paInt16,
        channels=CHANNELS,
        rate=SAMPLE_RATE,
        input=True,
        frames_per_buffer=4096,
        stream_callback=audio_callback
    )
    stream.start_stream()

    while True:
        data = audio_queue.get()
        if recognizer.AcceptWaveform(data):
            result = json.loads(recognizer.Result())
            text = result.get("text", "").strip()
            if WAKE_WORD in text:
                TTSManager.speak("我在")
                time.sleep(2)
                print("检测到唤醒词，开始录音...")
                stream.stop_stream()
                return

def record_voice():
    # TTSManager.speak("开始录音，按空格键可随时停止")
    
    frames = []
    stream = audio.open(
        format=pyaudio.paInt16,
        channels=CHANNELS,
        rate=SAMPLE_RATE,
        input=True,
        frames_per_buffer=4096
    )

    print("正在录音...（停止说话自动结束或按空格键停止）")
    stop_event = threading.Event()
    last_sound_time = time.time()
    
    # 键盘监听线程
    def listen_for_space():
        while not stop_event.is_set():
            if keyboard.is_pressed('space'):
                print("\n检测到空格键，停止录音...")
                stop_event.set()
            time.sleep(0.05)
    
    keyboard_thread = threading.Thread(target=listen_for_space)
    keyboard_thread.start()

    try:
        while not stop_event.is_set():
            data = stream.read(4096)
            frames.append(data)
            
            # 静音检测逻辑
            audio_data = np.frombuffer(data, dtype=np.int16)
            energy = np.abs(audio_data).mean()
            if energy > 500:
                last_sound_time = time.time()
            elif time.time() - last_sound_time > SILENCE_THRESHOLD:
                stop_event.set()
    except Exception as e:
        print(f"录音出错: {e}")
    finally:
        stop_event.set()
        keyboard_thread.join()
        stream.stop_stream()
        stream.close()
        TTSManager.speak("结束")
        
        # 保存录音文件
        with wave.open("last_recording.wav", "wb") as wf:
            wf.setnchannels(CHANNELS)
            wf.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
            wf.setframerate(SAMPLE_RATE)
            wf.writeframes(b"".join(frames))
        
        return b"".join(frames)

def speech_to_text(audio_data):
    try:
        recognizer = vosk.KaldiRecognizer(vosk_model, SAMPLE_RATE)
        recognizer.AcceptWaveform(audio_data)
        result = json.loads(recognizer.FinalResult())
        return result.get("text", "").replace(" ","")
    except Exception as e:
        TTSManager.speak("抱歉，未能识别您的指令")
        return ""

def call_deepseek_api(prompt):
    messages = [
        {"role": "system", "content": "你是一个专业的智能助手"},
        *conversation_history[-5:],
        {"role": "user", "content": prompt}
    ]
    
    data = {
        "model": "deepseek-chat",
        "messages": messages,
        "stream": False
    }

    try:
        response = requests.post(DEEPSEEK_URL, headers=HEADERS, json=data)
        response.raise_for_status()
        return response.json()["choices"][0]["message"]["content"]
    except Exception as e:
        return f"API请求失败：{str(e)}"

# def call_deepseek_api(prompt):
#     messages = [
#         {"role": "system", "content": "你是一个专业的智能助手"},
#         *conversation_history[-5:],
#         {"role": "user", "content": prompt}
#     ]
    
#     data = {
#         "model": "deepseek-chat",
#         "messages": messages,
#         "stream": False
#     }

#     try:
#         response = requests.post(DEEPSEEK_URL, headers=HEADERS, json=data, timeout=10)
#         response.raise_for_status()
#         return response.json()["choices"][0]["message"]["content"]
#     except Exception as e:
#         return f"请求失败：{str(e)}"

def main_loop():
    while True:
        listen_wake_word()
        audio_data = record_voice()
        user_input = speech_to_text(audio_data)
        
        if not user_input:
            print("未能识别有效输入")
            continue
            
        print(f"你说：{user_input}")
        TTSManager.speak("正在思考中，请稍候")
        response = call_deepseek_api(user_input)
        print(f"助手：{response}")
        TTSManager.speak(response)
        
        timestamp = datetime.now().isoformat()
        conversation_history.extend([
            {"role": "user", "content": user_input, "time": timestamp},
            {"role": "assistant", "content": response, "time": timestamp}
        ])
        save_conversation()

if __name__ == "__main__":
    try:
        print("语音助手已启动（按Ctrl+C退出）")
        main_loop()
    except KeyboardInterrupt:
        print("\n程序退出")
        audio.terminate()
    finally:
        save_conversation()