import asyncio
import json
import os
from threading import Thread
import queue
import uvicorn
import pyaudio
import vosk
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from contextlib import asynccontextmanager

# 加载Vosk模型
model_path = r"vosk-model-small-cn-0.22"  # 修改为你的模型路径
model = vosk.Model(model_path)

# 全局变量
clients = set()
listening_active = False
speech_thread = None

# 用于线程间通信的队列
status_queue = queue.Queue()
message_queue = queue.Queue()

# 使用 lifespan 管理应用生命周期
@asynccontextmanager
async def lifespan(app):
    # 启动处理
    global speech_thread
    # 启动语音识别线程
    speech_thread = Thread(target=speech_recognition_worker)
    speech_thread.daemon = True
    speech_thread.start()
    
    # 启动处理队列的后台任务
    asyncio.create_task(process_queues())
    
    yield
    
    # 关闭处理
    listening_active = False
    # 在此处添加关闭时的清理代码

# 创建FastAPI应用
app = FastAPI(lifespan=lifespan)

# 设置模板和静态文件
templates = Jinja2Templates(directory="templates")

# 创建static目录（如果不存在）
os.makedirs("static", exist_ok=True)

# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="static"), name="static")

# 处理队列中的消息和状态更新
async def process_queues():
    while True:
        try:
            # 处理状态更新
            while not status_queue.empty():
                status, message = status_queue.get_nowait()
                await broadcast_status(status, message)
            
            # 处理消息
            while not message_queue.empty():
                source, text = message_queue.get_nowait()
                await broadcast_message(source, text)
                
        except Exception as e:
            print(f"处理队列时出错: {e}")
            
        await asyncio.sleep(0.1)

# 语音识别线程
def speech_recognition_worker():
    global listening_active
    p = pyaudio.PyAudio()
    # 配置录音参数
    CHUNK = 4000  # 更小的块大小，提高响应速度
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 16000
    # 打开音频流
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    # 音频流转换为文本
    rec = vosk.KaldiRecognizer(model, RATE)
    rec.SetWords(True)  # 启用逐词识别
    rec.SetPartialWords(True)  # 启用部分结果

    # 发送初始状态到队列
    status_queue.put(('inactive', '准备就绪，点击麦克风开始识别'))
    
    stop = False
    last_partial = ""
    
    while True:
        if stop:
            break
            
        if not listening_active:
            # 如果没有激活监听，就暂停一下
            import time
            time.sleep(0.1)
            continue
            
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            if len(data) == 0:
                continue
            
            # 处理部分结果，提供实时反馈
            if rec.AcceptWaveform(data):
                result = json.loads(rec.Result())
                text = result.get("text", "")
                if len(text) > 0:
                    text = text.replace(' ', '')
                    print(f"识别到完整文本: {text}")
                    # 广播用户消息
                    message_queue.put(('user', text))
                    
                    if '停止服务' in text:
                        stop = True
                        # 关闭WebSocket连接
                        message_queue.put(('system', '服务即将停止'))
                        status_queue.put(('inactive', '服务正在停止...'))
            else:
                # 处理部分结果
                partial_result = json.loads(rec.PartialResult())
                partial_text = partial_result.get("partial", "")
                if len(partial_text) > 0 and partial_text != last_partial:
                    partial_text = partial_text.replace(' ', '')
                    print(f"部分识别: {partial_text}")
                    # 只发送一定长度的部分结果，避免过多更新
                    if len(partial_text) > 2:
                        message_queue.put(('partial', partial_text))
                        last_partial = partial_text
        except Exception as e:
            print(f"语音识别过程中出错: {e}")
            continue

    stream.stop_stream()
    stream.close()
    p.terminate()

# 广播消息给所有WebSocket客户端
async def broadcast_message(source, text):
    if not clients:
        return
    message = json.dumps({"type": "message", "source": source, "text": text})
    for client in clients:
        try:
            await client.send_text(message)
        except Exception as e:
            print(f"发送消息时出错: {e}")

# 广播状态给所有WebSocket客户端
async def broadcast_status(status, message):
    if not clients:
        return
    status_msg = json.dumps({"type": "status", "status": status, "message": message})
    for client in clients:
        try:
            await client.send_text(status_msg)
        except Exception as e:
            print(f"发送状态时出错: {e}")

# 主页路由 - 重定向到完整功能界面
@app.get("/", response_class=HTMLResponse)
async def get_index(request: Request):
    # 直接使用完整功能版本，不再使用简化版
    return templates.TemplateResponse("full_index.html", {"request": request})

# 测试页面路由
@app.get("/test", response_class=HTMLResponse)
async def get_test_page(request: Request):
    return templates.TemplateResponse("index.html", {"request": request})

# 完整功能界面路由
@app.get("/full", response_class=HTMLResponse)
async def get_full_page(request: Request):
    return templates.TemplateResponse("full_index.html", {"request": request})

# WebSocket连接
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    global listening_active
    await websocket.accept()
    
    clients.add(websocket)
    print(f"新的WebSocket连接，当前连接数: {len(clients)}")
    
    try:
        # 发送初始状态
        await websocket.send_text(json.dumps({
            "type": "status", 
            "status": "inactive", 
            "message": "准备就绪，点击麦克风开始识别"
        }))


        
        while True:
            data = await websocket.receive_text()
            try:
                message = json.loads(data)
                if message.get('action') == 'start_listening':
                    listening_active = True
                    await broadcast_status('listening', '正在聆听...')
                elif message.get('action') == 'stop_listening':
                    listening_active = False
                    await broadcast_status('inactive', '准备就绪，点击麦克风开始识别')
                elif message.get('action') == 'ping':
                    # 保持连接活跃的ping消息
                    await websocket.send_text(json.dumps({"type": "pong"}))
            except json.JSONDecodeError:
                print(f"无效的JSON: {data}")
    except WebSocketDisconnect:
        clients.remove(websocket)
        print(f"WebSocket连接关闭，当前连接数: {len(clients)}")
    except Exception as e:
        print(f"WebSocket处理时出错: {e}")
        if websocket in clients:
            clients.remove(websocket)

if __name__ == "__main__":
    # 启动FastAPI服务器
    uvicorn.run(app, host="localhost", port=8080)