#!/usr/bin/env python3
"""
ASR独立应用 - 简化版
专注于音频转文字和实时麦克风识别功能
"""

import asyncio
import logging
import signal
import sys
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Optional
import json

import uvicorn
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, UploadFile, File
from fastapi.responses import JSONResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles

# 添加项目根目录到Python路径
sys.path.insert(0, str(Path(__file__).parent))

from app.asr import ASRService
from config.config_manager import get_config, AppConfig

# 全局变量
asr_service: Optional[ASRService] = None
active_connections: set = set()
app_config: Optional[AppConfig] = None

# 初始化配置和日志
def init_logging_and_config():
    global app_config
    app_config = get_config()

    # 配置日志
    logging.basicConfig(
        level=getattr(logging, app_config.logging.level),
        format=app_config.logging.format
    )

init_logging_and_config()
logger = logging.getLogger("ASR_MAIN")


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global asr_service, app_config

    logger.info("启动ASR应用...")

    try:
        # 初始化ASR服务，传入配置
        asr_service = ASRService(app_config)
        logger.info("ASR服务初始化完成")

        yield

    except Exception as e:
        logger.error(f"应用启动失败: {e}", exc_info=True)
        raise
    finally:
        # 清理资源
        logger.info("正在关闭ASR应用...")
        if asr_service:
            asr_service.cleanup()
        logger.info("ASR应用已关闭")


async def broadcast_message(message: dict):
    """广播消息到所有WebSocket连接"""
    global active_connections
    if not active_connections:
        return

    message_text = json.dumps(message, ensure_ascii=False)
    disconnected = set()

    for websocket in active_connections.copy():
        try:
            await websocket.send_text(message_text)
        except Exception:
            disconnected.add(websocket)

    # 清理断开的连接
    active_connections -= disconnected


# 创建FastAPI应用
app = FastAPI(
    title="ASR简化应用",
    description="音频转文字和实时麦克风识别",
    version="1.0.0",
    lifespan=lifespan
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 挂载静态文件
app.mount("/static", StaticFiles(directory="static"), name="static")


@app.get("/")
async def root():
    """主页面 - 返回静态HTML文件"""
    return FileResponse("static/index.html")


@app.post("/upload_audio")
async def upload_audio(file: UploadFile = File(...)):
    """上传音频文件进行识别"""
    global asr_service, app_config

    if not asr_service:
        raise HTTPException(status_code=500, detail="ASR服务未初始化")

    try:
        # 验证文件大小
        file_size = 0
        audio_data = b""

        # 分块读取文件以检查大小
        max_size = app_config.upload.max_file_size * 1024 * 1024  # 转换为字节

        while True:
            chunk = await file.read(8192)  # 8KB chunks
            if not chunk:
                break
            file_size += len(chunk)
            if file_size > max_size:
                return {
                    "success": False,
                    "error": f"文件大小超过{app_config.upload.max_file_size}MB限制"
                }
            audio_data += chunk

        # 验证文件格式
        if file.filename:
            file_extension = file.filename.split('.')[-1].lower()
            if file_extension not in app_config.upload.allowed_formats:
                return {
                    "success": False,
                    "error": f"不支持的文件格式: {file_extension}"
                }

        logger.info(f"开始处理音频文件: {file.filename}, 大小: {file_size} bytes")

        # 进行语音识别
        result = asr_service.recognize_audio_file(audio_data)

        return {
            "success": True,
            "text": result,
            "filename": file.filename,
            "file_size": file_size
        }

    except Exception as e:
        logger.error(f"音频识别失败: {e}", exc_info=True)
        return {
            "success": False,
            "error": str(e)
        }


@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    """WebSocket端点，用于实时麦克风识别"""
    global simple_asr, active_connections

    await websocket.accept()
    active_connections.add(websocket)
    logger.info(f"WebSocket连接已建立，当前连接数: {len(active_connections)}")

    try:
        # 启动实时识别
        if asr_service:
            async def asr_callback(text: str):
                await broadcast_message({"type": "asr_result", "text": text})

            await asr_service.start_realtime_recognition(callback=asr_callback)

        # 保持连接
        while True:
            try:
                data = await websocket.receive_text()
                logger.info(f"收到WebSocket消息: {data}")
            except WebSocketDisconnect:
                break

    except Exception as e:
        logger.error(f"WebSocket错误: {e}", exc_info=True)
    finally:
        active_connections.discard(websocket)
        if asr_service:
            asr_service.stop_realtime_recognition()
        logger.info(f"WebSocket连接已断开，当前连接数: {len(active_connections)}")


def signal_handler(signum, frame):
    """信号处理器"""
    logger.info(f"收到信号 {signum}，正在关闭应用...")
    sys.exit(0)


def main():
    """主函数"""
    global app_config

    # 设置信号处理器
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    # 启动服务器
    uvicorn.run(
        "main:app",
        host=app_config.server.host,
        port=app_config.server.port,
        reload=app_config.server.debug,
        log_level=app_config.logging.level.lower()
    )


if __name__ == "__main__":
    main()
