# ==== 依赖导入 ====
import sounddevice as sd
import soundfile as sf
import webrtcvad
import numpy as np
import whisper
import requests
import asyncio
import websockets
from fastapi import FastAPI, WebSocket
from fastapi.responses import HTMLResponse
import threading
import time

# ==== 配置参数 ====
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
DEEPSEEK_API_KEY = "你的API密钥"  # 必须填写
WHISPER_MODEL = "tiny"  # 可选: tiny, base, small, medium, large
WAKE_WORDS = ["你好", "小助手", "机器人"]  # 唤醒词列表

# ==== 简单唤醒词检测（基于Whisper）====
class SimpleWakeupDetector:
    def __init__(self, wake_words=WAKE_WORDS):
        self.wake_words = wake_words
        self.whisper_model = None
        
    def load_whisper_model(self):
        if self.whisper_model is None:
            print("加载Whisper模型...")
            self.whisper_model = whisper.load_model(WHISPER_MODEL)
            
    def record_audio(self, duration=2, samplerate=16000):
        print("录制音频中...")
        audio = sd.rec(int(duration * samplerate), samplerate=samplerate, channels=1, dtype='int16')
        sd.wait()
        return audio
        
    def detect_wake_word(self, audio, samplerate=16000):
        try:
            temp_file = "temp_wakeup.wav"
            sf.write(temp_file, audio, samplerate)
            
            if self.whisper_model is None:
                self.load_whisper_model()
                
            result = self.whisper_model.transcribe(temp_file, language="zh")
            text = result["text"].strip().lower()
            
            print(f"识别结果: {text}")
            
            for wake_word in self.wake_words:
                if wake_word.lower() in text:
                    return True, wake_word
                    
            return False, None
            
        except Exception as e:
            print(f"唤醒词检测异常: {e}")
            return False, None
            
    def wait_for_wakeup(self, callback=None):
        print(f"等待唤醒词: {self.wake_words}")
        print("请说出唤醒词...")
        
        while True:
            try:
                audio = self.record_audio(duration=2)
                detected, wake_word = self.detect_wake_word(audio)
                
                if detected:
                    print(f"检测到唤醒词: {wake_word}")
                    if callback:
                        callback()
                    break
                    
                time.sleep(0.5)
                
            except KeyboardInterrupt:
                print("用户中断")
                break
            except Exception as e:
                print(f"唤醒检测异常: {e}")
                time.sleep(1)

def wait_for_wakeup(callback=None, wake_words=WAKE_WORDS):
    detector = SimpleWakeupDetector(wake_words=wake_words)
    detector.wait_for_wakeup(callback)

# ==== VAD自动录音 ====
def record_audio_vad(filename="temp.wav", samplerate=16000, max_record=20, vad_mode=2, silence_limit=0.8):
    vad = webrtcvad.Vad(vad_mode)
    frame_duration = 30  # ms
    frame_size = int(samplerate * frame_duration / 1000)
    audio_buffer = []
    silence_count = 0
    max_frames = int(max_record * 1000 / frame_duration)
    print("请开始说话...（停顿约1秒自动结束）")
    stream = sd.InputStream(samplerate=samplerate, channels=1, dtype='int16')
    with stream:
        for i in range(max_frames):
            frame, _ = stream.read(frame_size)
            frame_bytes = frame.tobytes()
            is_speech = vad.is_speech(frame_bytes, samplerate)
            audio_buffer.append(frame)
            if not is_speech:
                silence_count += 1
            else:
                silence_count = 0
            if silence_count * frame_duration > silence_limit * 1000 and i > 10:
                break
    audio_np = np.concatenate(audio_buffer, axis=0)
    sf.write(filename, audio_np, samplerate)
    print("录音结束。")

# ==== Whisper 语音识别 ====
def transcribe_audio(filename="temp.wav", model_name=WHISPER_MODEL):
    print("加载Whisper模型...")
    model = whisper.load_model(model_name)
    print("开始识别...")
    result = model.transcribe(filename, language=None)
    print("识别完成。")
    return result["text"]

def record_and_transcribe():
    record_audio_vad()
    return transcribe_audio()

# ==== Deepseek LLM API ====
def ask_llm(prompt):
    if not DEEPSEEK_API_KEY or DEEPSEEK_API_KEY == "你的API密钥":
        print("请填写你的Deepseek API密钥！")
        return "[API密钥未配置]"
    headers = {
        "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
        "Content-Type": "application/json"
    }
    data = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "user", "content": prompt}
        ]
    }
    try:
        print("向Deepseek LLM发送请求...")
        resp = requests.post(DEEPSEEK_API_URL, headers=headers, json=data, timeout=20)
        if resp.status_code == 200:
            result = resp.json()
            content = result["choices"][0]["message"]["content"]
            print("LLM回复：", content)
            return content
        else:
            print("LLM请求失败：", resp.text)
            return f"[LLM请求失败] {resp.text}"
    except Exception as e:
        print(f"LLM请求异常: {e}")
        return f"[LLM请求异常] {e}"

# ==== WebSocket前端推送 ====
async def push_to_frontend(message, ws_url="ws://localhost:8000/ws"):
    try:
        async with websockets.connect(ws_url) as websocket:
            await websocket.send(message)
            print("已推送到前端。")
    except Exception as e:
        print("推送前端失败：", e)

def push_to_frontend_sync(message):
    asyncio.run(push_to_frontend(message))

# ==== FastAPI前端服务 ====
app = FastAPI()
html = """
<!DOCTYPE html>
<html>
<head>
    <title>机器人对话展示</title>
</head>
<body>
    <h2>机器人对话内容</h2>
    <div id="content"></div>
    <script>
        var ws = new WebSocket("ws://" + location.host + "/ws");
        ws.onmessage = function(event) {
            document.getElementById('content').innerHTML += "<p>" + event.data + "</p>";
        };
    </script>
</body>
</html>
"""

class ConnectionManager:
    def __init__(self):
        self.active_connections = []

    async def connect(self, websocket: WebSocket):
        await websocket.accept()
        self.active_connections.append(websocket)

    def disconnect(self, websocket: WebSocket):
        self.active_connections.remove(websocket)

    async def broadcast(self, message: str):
        for connection in self.active_connections:
            await connection.send_text(message)

manager = ConnectionManager()

@app.get("/")
async def get():
    return HTMLResponse(html)

@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    await manager.connect(websocket)
    try:
        while True:
            data = await websocket.receive_text()
            await manager.broadcast(data)
    except:
        manager.disconnect(websocket)

# ==== 主控流程 ====
def dialogue_loop():
    print("唤醒成功，进入多轮对话模式（说'结束对话'可退出）...")
    while True:
        asr_text = record_and_transcribe()
        print(f"ASR识别结果: {asr_text}")
        if not asr_text.strip():
            print("未识别到有效语音，继续等待...")
            continue
        if '结束对话' in asr_text or '退出对话' in asr_text:
            print("检测到结束唤醒词，退出多轮对话模式。")
            push_to_frontend_sync("[对话已结束]")
            break
        llm_result = ask_llm(asr_text)
        print(f"LLM回复: {llm_result}")
        push_to_frontend_sync(llm_result)

def main():
    while True:
        print("等待唤醒...")
        wait_for_wakeup(callback=dialogue_loop)

# ==== 启动前端服务线程 ====
def start_frontend():
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)

# ==== 启动入口 ====
if __name__ == "__main__":
    # 启动前端服务
    threading.Thread(target=start_frontend, daemon=True).start()
    # 启动主控流程
    main() 