"""
Whisper Large V3 Turbo API服务
"""
import os
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Union
import uvicorn
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
from starlette.responses import Response

from .model_loader import WhisperModelLoader
from .inference import WhisperInference

# 配置日志
logging.basicConfig(
    level=os.getenv("LOG_LEVEL", "INFO"),
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# Prometheus指标
request_counter = Counter(
    "whisper_requests_total",
    "Total number of requests",
    ["endpoint", "status"]
)

request_duration = Histogram(
    "whisper_request_duration_seconds",
    "Request duration in seconds",
    ["endpoint"]
)

audio_duration = Histogram(
    "whisper_audio_duration_seconds",
    "Audio duration in seconds",
    ["endpoint"]
)

# 全局变量
model_loader = None
inference_engine = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global model_loader, inference_engine
    
    # 启动时加载模型
    logger.info("正在加载模型...")
    try:
        model_loader = WhisperModelLoader()
        model_loader.load_model()
        inference_engine = WhisperInference(
            model_loader.get_model(),
            model_loader.get_processor(),
            model_loader.get_device()
        )
        logger.info("模型加载完成")
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    
    yield
    
    # 关闭时清理资源
    logger.info("正在清理资源...")


app = FastAPI(
    title="Whisper Large V3 Turbo API",
    description="基于昇腾910B的Whisper语音识别服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class HealthResponse(BaseModel):
    """健康检查响应"""
    status: str
    model_loaded: bool
    npu_available: bool
    version: str


class TranscriptionRequest(BaseModel):
    """转录请求"""
    language: Optional[str] = None
    prompt: Optional[str] = None
    response_format: Optional[str] = "json"
    temperature: Optional[Union[float, List[float]]] = None
    timestamp_granularities: Optional[List[str]] = None


class BatchRequest(BaseModel):
    """批量请求"""
    files: List[dict]
    model: Optional[str] = "whisper-large-v3-turbo"
    language: Optional[str] = None
    task: Optional[str] = "transcribe"


@app.get("/health", response_model=HealthResponse)
async def health_check():
    """健康检查"""
    import torch
    return {
        "status": "healthy",
        "model_loaded": model_loader is not None and inference_engine is not None,
        "npu_available": torch.npu.is_available() if hasattr(torch, 'npu') else False,
        "version": "1.0.0"
    }


@app.get("/metrics")
async def metrics():
    """Prometheus指标"""
    return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)


@app.post("/v1/audio/transcriptions")
async def transcribe_audio(
    file: UploadFile = File(...),
    model: str = Form(default="whisper-large-v3-turbo"),
    language: Optional[str] = Form(default=None),
    prompt: Optional[str] = Form(default=None),
    response_format: str = Form(default="json"),
    temperature: Optional[str] = Form(default=None),
    timestamp_granularities: Optional[str] = Form(default=None)
):
    """
    语音转录接口
    遵循OpenAI Whisper API协议
    """
    request_counter.labels(endpoint="/v1/audio/transcriptions", status="received").inc()
    
    try:
        # 保存上传的文件
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        # 解析参数
        temp_value = None
        if temperature:
            try:
                temp_value = float(temperature)
            except:
                temp_value = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
        
        # 执行转录
        result = inference_engine.transcribe(
            audio=temp_path,
            language=language,
            task="transcribe",
            return_timestamps=timestamp_granularities is not None,
            temperature=temp_value or (0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
        )
        
        # 清理临时文件
        os.unlink(temp_path)
        
        # 格式化响应
        if response_format == "text":
            return result.get("text", "")
        elif response_format == "verbose_json":
            return {
                "task": "transcribe",
                "language": result.get("language", language or "unknown"),
                "duration": result.get("duration", 0),
                "text": result.get("text", ""),
                "words": result.get("chunks", []) if "chunks" in result else []
            }
        else:
            return {
                "text": result.get("text", ""),
                "language": result.get("language", language or "unknown")
            }
            
    except Exception as e:
        logger.error(f"转录失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/transcriptions", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/transcriptions", status="success").inc()


@app.post("/v1/audio/translations")
async def translate_audio(
    file: UploadFile = File(...),
    model: str = Form(default="whisper-large-v3-turbo")
):
    """
    语音翻译接口（翻译为英文）
    遵循OpenAI Whisper API协议
    """
    request_counter.labels(endpoint="/v1/audio/translations", status="received").inc()
    
    try:
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        # 执行翻译
        result = inference_engine.transcribe(
            audio=temp_path,
            task="translate"
        )
        
        # 清理临时文件
        os.unlink(temp_path)
        
        return {
            "text": result.get("text", "")
        }
        
    except Exception as e:
        logger.error(f"翻译失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/translations", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/translations", status="success").inc()


@app.post("/v1/audio/batch")
async def batch_process(request: BatchRequest):
    """
    批量处理接口
    """
    request_counter.labels(endpoint="/v1/audio/batch", status="received").inc()
    
    try:
        import base64
        import tempfile
        import aiofiles
        
        results = []
        temp_files = []
        
        for idx, file_data in enumerate(request.files):
            try:
                # 解码base64音频
                audio_bytes = base64.b64decode(file_data.get("file", ""))
                
                # 保存临时文件
                with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
                    tmp_file.write(audio_bytes)
                    temp_path = tmp_file.name
                    temp_files.append(temp_path)
                
                # 执行转录
                result = inference_engine.transcribe(
                    audio=temp_path,
                    language=file_data.get("language", request.language),
                    task=file_data.get("task", request.task or "transcribe")
                )
                
                results.append({
                    "index": idx,
                    "text": result.get("text", ""),
                    "language": result.get("language", "unknown")
                })
                
            except Exception as e:
                logger.error(f"处理文件 {idx} 失败: {str(e)}")
                results.append({
                    "index": idx,
                    "error": str(e)
                })
        
        # 清理临时文件
        for tmp_path in temp_files:
            try:
                os.unlink(tmp_path)
            except:
                pass
        
        return {"results": results}
        
    except Exception as e:
        logger.error(f"批量处理失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/batch", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/batch", status="success").inc()


if __name__ == "__main__":
    port = int(os.getenv("PORT", 8000))
    workers = int(os.getenv("WORKERS", 1))
    
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=port,
        workers=workers,
        log_level=os.getenv("LOG_LEVEL", "info").lower()
    )


