"""
AudioFly API服务
"""
import os
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Union
import uvicorn
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
from starlette.responses import Response

from .model_loader import AudioFlyModelLoader
from .inference import AudioFlyInference

# 配置日志
logging.basicConfig(
    level=os.getenv("LOG_LEVEL", "INFO"),
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# Prometheus指标
request_counter = Counter(
    "audiofly_requests_total",
    "Total number of requests",
    ["endpoint", "status", "task"]
)

request_duration = Histogram(
    "audiofly_request_duration_seconds",
    "Request duration in seconds",
    ["endpoint"]
)

# 全局变量
model_loader = None
inference_engine = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global model_loader, inference_engine
    
    # 启动时加载模型
    logger.info("正在加载模型...")
    try:
        model_loader = AudioFlyModelLoader()
        model_loader.load_model()
        inference_engine = AudioFlyInference(
            model_loader.get_model(),
            model_loader.get_processor(),
            model_loader.get_device()
        )
        logger.info("模型加载完成")
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    
    yield
    
    # 关闭时清理资源
    logger.info("正在清理资源...")


app = FastAPI(
    title="AudioFly API",
    description="基于昇腾910B的AudioFly音频处理服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class HealthResponse(BaseModel):
    """健康检查响应"""
    status: str
    model_loaded: bool
    npu_available: bool
    version: str
    supported_tasks: List[str]


class BatchRequest(BaseModel):
    """批量请求"""
    files: List[dict]
    task: Optional[str] = "transcription"


@app.get("/health", response_model=HealthResponse)
async def health_check():
    """健康检查"""
    import torch
    return {
        "status": "healthy",
        "model_loaded": model_loader is not None and inference_engine is not None,
        "npu_available": torch.npu.is_available() if hasattr(torch, 'npu') else False,
        "version": "1.0.0",
        "supported_tasks": ["transcription", "translation", "classification", "enhancement"]
    }


@app.get("/metrics")
async def metrics():
    """Prometheus指标"""
    return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)


@app.post("/v1/audio/generate")
async def generate_audio(
    text: str = Form(...),
    name: Optional[str] = Form(default=None),
    savedir: Optional[str] = Form(default=None),
    cfg: float = Form(default=3.5),
    ddim_steps: int = Form(default=200)
):
    """
    文本到音频生成接口（AudioFly的主要功能）
    参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
    """
    request_counter.labels(endpoint="/v1/audio/generate", status="received", task="generation").inc()
    
    try:
        result = inference_engine.process(
            text=text,
            task="generation",
            name=name,
            savedir=savedir or "/app/result",
            cfg=cfg,
            ddim_steps=ddim_steps
        )
        
        return result
        
    except Exception as e:
        logger.error(f"音频生成失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/generate", status="error", task="generation").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/generate", status="success", task="generation").inc()


@app.post("/v1/audio/process")
async def process_audio(
    file: Optional[UploadFile] = File(default=None),
    text: Optional[str] = Form(default=None),
    task: str = Form(...),
    language: Optional[str] = Form(default=None),
    output_format: str = Form(default="json")
):
    """
    通用音频处理接口
    支持文本到音频生成（text参数）和音频处理（file参数）
    """
    request_counter.labels(endpoint="/v1/audio/process", status="received", task=task).inc()
    
    try:
        # 文本到音频生成
        if text and (task == "generation" or not file):
            result = inference_engine.process(
                text=text,
                task="generation"
            )
            return result
        
        # 音频处理
        if not file:
            raise HTTPException(status_code=400, detail="需要提供file或text参数")
        
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        # 执行处理
        result = inference_engine.process(
            audio=temp_path,
            task=task,
            language=language
        )
        
        # 清理临时文件
        os.unlink(temp_path)
        
        if output_format == "text" and "text" in result:
            return result["text"]
        
        return result
        
    except Exception as e:
        logger.error(f"处理失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/process", status="error", task=task).inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/process", status="success", task=task).inc()


@app.post("/v1/audio/transcribe")
async def transcribe_audio(
    file: UploadFile = File(...),
    language: Optional[str] = Form(default=None),
    return_timestamps: bool = Form(default=False)
):
    """
    语音转文字接口
    """
    request_counter.labels(endpoint="/v1/audio/transcribe", status="received", task="transcription").inc()
    
    try:
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        result = inference_engine.process(
            audio=temp_path,
            task="transcription",
            language=language,
            return_timestamps=return_timestamps
        )
        
        os.unlink(temp_path)
        
        return result
        
    except Exception as e:
        logger.error(f"转录失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/transcribe", status="error", task="transcription").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/transcribe", status="success", task="transcription").inc()


@app.post("/v1/audio/translate")
async def translate_audio(
    file: UploadFile = File(...),
    source_language: Optional[str] = Form(default=None),
    target_language: str = Form(default="en")
):
    """
    语音翻译接口
    """
    request_counter.labels(endpoint="/v1/audio/translate", status="received", task="translation").inc()
    
    try:
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        result = inference_engine.process(
            audio=temp_path,
            task="translation",
            language=source_language
        )
        
        os.unlink(temp_path)
        
        return result
        
    except Exception as e:
        logger.error(f"翻译失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/translate", status="error", task="translation").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/translate", status="success", task="translation").inc()


@app.post("/v1/audio/classify")
async def classify_audio(
    file: UploadFile = File(...)
):
    """
    音频分类接口
    """
    request_counter.labels(endpoint="/v1/audio/classify", status="received", task="classification").inc()
    
    try:
        import tempfile
        import aiofiles
        
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
            async with aiofiles.open(tmp_file.name, 'wb') as f:
                content = await file.read()
                await f.write(content)
            
            temp_path = tmp_file.name
        
        result = inference_engine.process(
            audio=temp_path,
            task="classification"
        )
        
        os.unlink(temp_path)
        
        return result
        
    except Exception as e:
        logger.error(f"分类失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/classify", status="error", task="classification").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/classify", status="success", task="classification").inc()


@app.post("/v1/audio/batch")
async def batch_process(request: BatchRequest):
    """
    批量处理接口
    """
    request_counter.labels(endpoint="/v1/audio/batch", status="received", task=request.task or "transcription").inc()
    
    try:
        import base64
        import tempfile
        import aiofiles
        
        results = []
        temp_files = []
        
        for idx, file_data in enumerate(request.files):
            try:
                # 解码base64音频
                audio_bytes = base64.b64decode(file_data.get("file", ""))
                
                # 保存临时文件
                with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
                    tmp_file.write(audio_bytes)
                    temp_path = tmp_file.name
                    temp_files.append(temp_path)
                
                # 执行处理
                task = file_data.get("task", request.task or "transcription")
                result = inference_engine.process(
                    audio=temp_path,
                    task=task,
                    language=file_data.get("language")
                )
                
                results.append({
                    "index": idx,
                    "task": task,
                    "result": result
                })
                
            except Exception as e:
                logger.error(f"处理文件 {idx} 失败: {str(e)}")
                results.append({
                    "index": idx,
                    "error": str(e)
                })
        
        # 清理临时文件
        for tmp_path in temp_files:
            try:
                os.unlink(tmp_path)
            except:
                pass
        
        return {"results": results}
        
    except Exception as e:
        logger.error(f"批量处理失败: {str(e)}")
        request_counter.labels(endpoint="/v1/audio/batch", status="error", task=request.task or "transcription").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/audio/batch", status="success", task=request.task or "transcription").inc()


if __name__ == "__main__":
    port = int(os.getenv("PORT", 8000))
    workers = int(os.getenv("WORKERS", 1))
    
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=port,
        workers=workers,
        log_level=os.getenv("LOG_LEVEL", "info").lower()
    )


