import asyncio
import hashlib
import subprocess
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
import uvicorn
from aiohttp.web_fileresponse import FileResponse
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi_cache.decorator import cache
from pydantic import BaseModel
from prometheus_client import Counter, Histogram, generate_latest
from starlette.responses import Response, JSONResponse

from audio_utils import TTSManager, STTManager
from llm_service import MockLLMService, OpenAIService

# 配置常量
CONFIG = {
    # "VOSK_MODEL_PATH": os.getenv("VOSK_MODEL_PATH", "models"),
    "VOSK_MODEL_PATH": r"E:\Dept\Python\Model\vosk-model-cn-0.22",
    "AUDIO_CACHE_DIR": "audio_cache",
    "MAX_WORKERS": 4,
    "SAMPLE_RATE": 16000,
    "MAX_AUDIO_SIZE": 1024 * 1024 * 5  # 5MB
}

# 初始化监控指标
REQUEST_COUNT = Counter('http_requests', 'Total HTTP requests', ['method', 'endpoint'])
RESPONSE_TIME = Histogram('http_response_time', 'Response time in seconds', ['endpoint'])
ERROR_COUNT = Counter('http_errors', 'Total HTTP errors', ['status_code'])


class TextRequest(BaseModel):
    text: str


class AudioResponse(BaseModel):
    text: str
    audio_path: str


def init_components():
    """初始化模型和组件"""
    try:
        # 创建缓存目录
        Path(CONFIG["AUDIO_CACHE_DIR"]).mkdir(exist_ok=True)

        # 加载Vosk模型
        # model_path = r"E:\Dept\Python\Model\vosk-model-cn-0.22"
        model_path = r"E:\Dept\Python\Model\vosk-model-small-cn-0.22"
        sst_manager = STTManager(model_path)

        # 初始化线程池
        executor = ThreadPoolExecutor(max_workers=CONFIG["MAX_WORKERS"])

        return sst_manager, executor
    except Exception as e:
        print(f"初始化失败: {str(e)}")
        raise


# 全局组件初始化
# model, executor = init_components()
sst_manager, executor = init_components()
# executor = init_components()
app = FastAPI()

# CORS配置
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)


@app.middleware("http")
async def global_error_handler(request, call_next):
    try:
        return await call_next(request)
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        traceback.print_exc()
        return JSONResponse(
            status_code=500,
            content={"error": "Internal Server Error"}
        )


@app.middleware("http")
async def monitor_requests(request, call_next):
    """监控中间件"""
    start_time = time.time()
    endpoint = request.url.path

    try:
        response = await call_next(request)
        process_time = time.time() - start_time

        # 记录指标
        REQUEST_COUNT.labels(request.method, endpoint).inc()
        RESPONSE_TIME.labels(endpoint).observe(process_time)

        return response
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        raise


@app.get("/metrics")
async def metrics():
    """Prometheus监控端点"""
    return Response(generate_latest())


async def async_speech_to_text(audio_data: bytes) -> str:
    print(f"收到音频数据长度: {len(audio_data)} bytes")

    """异步语音识别"""
    loop = asyncio.get_event_loop()
    try:
        if len(audio_data) < 16000:  # 至少1秒音频
            raise ValueError("音频时长不足")

        def _recognize():
            # recognizer = sst_manager.get_recognizer()
            # result = recognizer.speech_to_text(audio_data)
            # result = STTManager.speech_to_text(audio_data)
            result = sst_manager.speech_to_text(sst_manager, audio_data)
            return result

        return await loop.run_in_executor(executor, _recognize)
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        print(f"语音识别失败详情: {traceback.format_exc()}")
        raise RuntimeError("语音识别失败") from e


@app.post("/api/voice", response_model=AudioResponse)
async def voice_to_text(file: UploadFile = File(...)):
    """语音转文本接口"""
    # 验证音频文件
    if file.content_type not in ["audio/wav", "audio/x-wav"]:
        raise HTTPException(400, "仅支持WAV格式")
    if file.size == 0:
        raise HTTPException(400, "空音频文件")
    if file.size > CONFIG["MAX_AUDIO_SIZE"]:
        ERROR_COUNT.labels(413).inc()
        raise HTTPException(413, "音频文件过大")

    try:
        audio_data = await file.read()
        text = await async_speech_to_text(audio_data)

        return {
            "text": text,
            "audio_path": ""  # 保留字段，供后续扩展
        }
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        raise HTTPException(500, str(e))


@app.post("/api/text", response_model=AudioResponse)
async def text_to_text(request: TextRequest):
    """文本问答接口"""
    start_time = time.time()

    try:
        # 业务逻辑处理
        answer = await process_text(request.text)

        return {
            "text": answer,
            "audio_path": ""  # 客户端本地生成语音
        }
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        raise HTTPException(500, str(e))
    finally:
        RESPONSE_TIME.labels("/api/text").observe(time.time() - start_time)


# 流式文本接口
@app.post("/v1/stream_text")
async def stream_text(request: TextRequest):
    def generate_stream():
        # for word in generate_answer(request.text):
        #     yield f"data: {word}\n\n"
        yield f"data: test\n\n"

    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream"
    )


@app.post("/text_to_voice")
@cache(expire=3600)  # 缓存1小时
async def text_to_voice(text: str):
    """带智能缓存的语音生成"""
    # 生成唯一哈希键
    key = hashlib.md5(f"{text}".encode()).hexdigest()
    audio_path = f"cache/{key}.opus"

    if Path(audio_path).exists():
        return FileResponse(audio_path)

    # 生成并压缩音频
    TTSManager.save_to_file(text, "temp.wav")

    # 转换为Opus格式
    subprocess.run([
        "ffmpeg", "-i", "temp.wav",
        "-c:a", "libopus",
        "-b:a", "12k",  # 12kbps码率
        "-vbr", "on",
        audio_path
    ])

    return FileResponse(audio_path)


# 音文交织接口
@app.post("/voice_to_voice")
async def voice_to_voice(file: UploadFile = File(...)):
    async def generate_interleaved():
        try:
            # 语音识别
            audio_data = await file.read()
            text = await async_speech_to_text(audio_data)

            # 生成完整答案
            answer = await process_text(text)

            # 生成音频流
            audio_stream = TTSManager.text_to_stream(answer)

            # 分块发送音频
            chunk_size = 4096
            for i in range(0, len(audio_stream), chunk_size):
                yield audio_stream[i:i + chunk_size]

        except Exception as e:
            print(f"语音交互错误: {str(e)}")
            yield b"ERROR"

    return StreamingResponse(
        generate_interleaved(),
        media_type="audio/wav"  # 根据实际格式调整
    )


# 修改为异步生成器
async def generate_answer(question):
    """异步流式生成答案"""
    if len(question) >= 3:
        full_answer = MockLLMService.get_answer(question)
        for word in full_answer.split():
            await asyncio.sleep(0.1)  # 模拟处理延迟
            yield word
    else:
        yield "对不起，没有听见您的声音。"


async def process_text(text: str) -> str:
    """文本处理核心逻辑"""
    loop = asyncio.get_event_loop()

    def _process():
        if len(text) == 0:
            return "您好，请您提问。"
        else:
            return generate_answer(text)

    try:
        return await loop.run_in_executor(executor, _process)
    except Exception as e:
        ERROR_COUNT.labels(500).inc()
        raise HTTPException(500, "文本处理失败")


# def generate_answer(question):
#     if len(question) >= 3:
#         answer = MockLLMService.get_answer(question)
#         if answer == "对不起，没有找到答案。":
#             openai_serv = OpenAIService()
#             model_id = "qwen2.5/qwen7b"
#             answer = openai_serv.call_api(model_id, question)
#         return answer
#     else:
#         return "对不起，没有听见您的声音。"
# 修改生成逻辑为异步生成器
# async def generate_answer(question):
#     """异步流式生成答案"""
#     if len(question) >= 3:
#         # 模拟流式生成
#         words = MockLLMService.get_answer(question).split()
#         for word in words:
#             await asyncio.sleep(0.1)  # 模拟处理延迟
#             yield word
#     else:
#         yield "对不起，没有听见您的声音。"


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)
