"""
DeepSpeed MOE + Qwen3-Coder Web API服务器
提供OpenAI兼容的API接口
"""

from fastapi import FastAPI, HTTPException, Depends, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any, Generator
import logging
import time
import uuid
import json
from contextlib import asynccontextmanager

from config import APIConfig, AuthConfig, ModelConfig, ENVIRONMENT, DEBUG
from engine import get_engine

# 配置日志
logging.basicConfig(
    level=getattr(logging, APIConfig.LOG_LEVEL if hasattr(APIConfig, 'LOG_LEVEL') else 'INFO'),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Pydantic模型定义
class ChatMessage(BaseModel):
    role: str = Field(..., description="消息角色: system, user, assistant")
    content: str = Field(..., description="消息内容")

class ChatCompletionRequest(BaseModel):
    model: str = Field(..., description="模型名称")
    messages: List[ChatMessage] = Field(..., description="消息列表")
    max_tokens: Optional[int] = Field(None, description="最大生成token数")
    temperature: Optional[float] = Field(None, description="采样温度")
    top_p: Optional[float] = Field(None, description="nucleus sampling参数")
    top_k: Optional[int] = Field(None, description="top-k sampling参数")
    repetition_penalty: Optional[float] = Field(None, description="重复惩罚")
    stream: Optional[bool] = Field(False, description="是否流式输出")
    stop: Optional[List[str]] = Field(None, description="停止序列")
    system: Optional[str] = Field(None, description="系统提示")

class ChatCompletionChoice(BaseModel):
    index: int
    message: ChatMessage
    finish_reason: str = "stop"

class Usage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int
    latency_seconds: float
    tokens_per_second: float

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[ChatCompletionChoice]
    usage: Usage

class ErrorResponse(BaseModel):
    error: Dict[str, Any]

class ModelInfo(BaseModel):
    id: str
    object: str = "model"
    created: int
    owned_by: str
    permission: List[Any] = []
    root: str
    parent: Optional[str] = None

class ModelsResponse(BaseModel):
    object: str = "list"
    data: List[ModelInfo]

# 全局变量
engine = get_engine()

@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    # 启动时初始化模型
    logger.info("启动DeepSpeed MOE + Qwen3-Coder API服务器...")
    
    try:
        success = engine.load_model()
        if success:
            logger.info("模型初始化成功")
        else:
            logger.error("模型初始化失败")
            raise RuntimeError("模型初始化失败")
    except Exception as e:
        logger.error(f"应用启动失败: {e}")
        # 在生产环境中，你可能想要停止应用启动
        if ENVIRONMENT == "production":
            raise
    
    yield
    
    # 关闭时清理资源
    logger.info("关闭API服务器...")
    engine.cleanup()

# 创建FastAPI应用
app = FastAPI(
    title="DeepSpeed MOE + Qwen3-Coder API",
    description="基于DeepSpeed MOE技术的Qwen3-Coder推理API，提供OpenAI兼容的接口",
    version="1.0.0",
    lifespan=lifespan
)

# 添加中间件
if hasattr(APIConfig, 'ALLOWED_ORIGINS'):
    app.add_middleware(
        CORSMiddleware,
        allow_origins=APIConfig.ALLOWED_ORIGINS,
        allow_credentials=True,
        allow_methods=APIConfig.ALLOWED_METHODS,
        allow_headers=APIConfig.ALLOWED_HEADERS,
    )

app.add_middleware(
    TrustedHostMiddleware,
    allowed_hosts=["*"] if DEBUG else ["localhost", "127.0.0.1"]
)

# API Key认证依赖
async def verify_api_key(request: Request) -> str:
    """验证API Key"""
    api_key = request.headers.get("Authorization", "").replace("Bearer ", "")
    
    # 简化版API Key检查（生产环境中应该使用更安全的方法）
    if api_key and api_key in AuthConfig.API_KEYS.values():
        return api_key
    
    # 也检查查询参数
    api_key = request.query_params.get("api_key", "")
    if api_key and api_key in AuthConfig.API_KEYS.values():
        return api_key
    
    raise HTTPException(
        status_code=401,
        detail="Invalid API key",
        headers={"WWW-Authenticate": "Bearer"}
    )

# API路由
@app.get("/", tags=["根目录"])
async def root():
    """根路径"""
    return {
        "service": "DeepSpeed MOE + Qwen3-Coder API",
        "version": "1.0.0",
        "model": ModelConfig.MODEL_NAME,
        "docs": "/docs"
    }

@app.get("/health", tags=["健康检查"])
async def health_check():
    """健康检查"""
    return {
        "status": "healthy",
        "model_loaded": engine.is_initialized,
        "stats": engine.get_stats()
    }

@app.get("/models", response_model=ModelsResponse, tags=["模型"])
async def list_models(api_key: str = Depends(verify_api_key)):
    """列出可用模型"""
    model_info = ModelInfo(
        id=ModelConfig.MODEL_NAME,
        object="model",
        created=int(time.time()),
        owned_by="DeepSpeed-MOE"
    )
    
    return ModelsResponse(data=[model_info])

@app.post("/v1/chat/completions", tags=["Chat"])
async def create_chat_completion(
    request: ChatCompletionRequest,
    api_key: str = Depends(verify_api_key)
):
    """创建聊天完成（OpenAI兼容接口）"""
    
    try:
        # 验证模型名称
        if request.model != ModelConfig.MODEL_NAME:
            raise HTTPException(
                status_code=400,
                detail=f"Model {request.model} not available. Available model: {ModelConfig.MODEL_NAME}"
            )
        
        # 构建提示
        system_prompt = request.system or ModelConfig.SYSTEM_PROMPT
        user_prompt = ""
        
        for message in request.messages:
            if message.role == "system":
                system_prompt = message.content
            elif message.role == "user":
                user_prompt = message.content
            elif message.role == "assistant":
                # 在这里可以处理助手回复历史
                pass
        
        if not user_prompt:
            raise HTTPException(status_code=400, detail="No user message provided")
        
        # 执行推理
        logger.info(f"处理请求: {user_prompt[:100]}...")
        
        if request.stream:
            # 流式响应
            return StreamingResponse(
                stream_chat_completion(request, system_prompt),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                }
            )
        else:
            # 非流式响应
            result = engine.generate(
                prompt=user_prompt,
                system_prompt=system_prompt,
                max_new_tokens=request.max_tokens,
                temperature=request.temperature,
                top_p=request.top_p,
                top_k=request.top_k,
                repetition_penalty=request.repetition_penalty,
                stop_sequences=request.stop
            )
            
            # 构建响应
            response_id = f"chatcmpl-{uuid.uuid4().hex}"
            created = int(time.time())
            
            choice = ChatCompletionChoice(
                index=0,
                message=ChatMessage(role="assistant", content=result["response"]),
                finish_reason="stop"
            )
            
            usage = Usage(**result["usage"])
            
            response = ChatCompletionResponse(
                id=response_id,
                created=created,
                model=ModelConfig.MODEL_NAME,
                choices=[choice],
                usage=usage
            )
            
            return response
            
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"处理聊天完成请求失败: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=str(e))

async def stream_chat_completion(request: ChatCompletionRequest, system_prompt: str):
    """流式聊天完成"""
    try:
        # 生成流式内容
        for chunk in engine.stream_generate(
            prompt=request.messages[-1].content,
            system_prompt=system_prompt,
            max_new_tokens=request.max_tokens,
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
            repetition_penalty=request.repetition_penalty
        ):
            # 构建SSE格式的响应
            response_data = {
                "id": f"chatcmpl-{uuid.uuid4().hex}",
                "object": "chat.completion.chunk",
                "created": int(time.time()),
                "model": ModelConfig.MODEL_NAME,
                "choices": chunk["choices"],
                "usage": chunk["usage"] if not chunk.get("partial", True) else None
            }
            
            yield f"data: {json.dumps(response_data, ensure_ascii=False)}\n\n"
        
        # 结束标记
        yield "data: [DONE]\n\n"
        
    except Exception as e:
        logger.error(f"流式生成失败: {e}", exc_info=True)
        error_data = {
            "error": {
                "code": "stream_error",
                "message": str(e)
            }
        }
        yield f"data: {json.dumps(error_data)}\n\n"

# OpenAI兼容的路由
@app.post("/v1/completions")
async def create_completion(request: dict, api_key: str = Depends(verify_api_key)):
    """文本完成（兼容性接口）"""
    # 将completion请求转换为chat completion
    chat_request = ChatCompletionRequest(
        model=request.get("model", ModelConfig.MODEL_NAME),
        messages=[
            ChatMessage(role="user", content=request.get("prompt", ""))
        ],
        max_tokens=request.get("max_tokens"),
        temperature=request.get("temperature"),
        top_p=request.get("top_p"),
        stream=request.get("stream", False)
    )
    
    return await create_chat_completion(chat_request, api_key)

@app.get("/v1/models", response_model=ModelsResponse)
async def list_models_v1(api_key: str = Depends(verify_api_key)):
    """列出模型（OpenAI兼容路径）"""
    return await list_models(api_key)

@app.get("/stats")
async def get_stats(api_key: str = Depends(verify_api_key)):
    """获取性能统计"""
    return engine.get_stats()

if __name__ == "__main__":
    import uvicorn
    
    uvicorn.run(
        "api_server:app",
        host=APIConfig.HOST,
        port=APIConfig.PORT,
        workers=APIConfig.WORKERS if not DEBUG else 1,
        log_level="info",
        access_log=True
    )