"""
Gemma 3 270M API服务
"""
import os
import json
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Union, Dict
import uvicorn
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
from starlette.responses import Response

from .model_loader import Gemma3ModelLoader
from .inference import Gemma3Inference

# 配置日志
logging.basicConfig(
    level=os.getenv("LOG_LEVEL", "INFO"),
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# Prometheus指标
request_counter = Counter(
    "gemma3_requests_total",
    "Total number of requests",
    ["endpoint", "status"]
)

request_duration = Histogram(
    "gemma3_request_duration_seconds",
    "Request duration in seconds",
    ["endpoint"]
)

generation_tokens = Histogram(
    "gemma3_generation_tokens",
    "Number of generated tokens",
    ["endpoint"]
)

# 全局变量
model_loader = None
inference_engine = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global model_loader, inference_engine
    
    # 启动时加载模型
    logger.info("正在加载模型...")
    try:
        model_loader = Gemma3ModelLoader()
        model_loader.load_model()
        inference_engine = Gemma3Inference(
            model_loader.get_model(),
            model_loader.get_tokenizer(),
            model_loader.get_device()
        )
        logger.info("模型加载完成")
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    
    yield
    
    # 关闭时清理资源
    logger.info("正在清理资源...")


app = FastAPI(
    title="Gemma 3 270M API",
    description="基于 PyTorch + torch.compile 优化的 Gemma 3 270M 文本生成服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class HealthResponse(BaseModel):
    """健康检查响应"""
    status: str
    model_loaded: bool
    npu_available: bool  # 保留字段以兼容，但 LiteRT 主要针对 CPU
    version: str
    model_info: dict


class ChatMessage(BaseModel):
    """聊天消息"""
    role: str  # "system", "user", "assistant"
    content: str


class ChatCompletionRequest(BaseModel):
    """聊天完成请求"""
    messages: List[ChatMessage]
    model: Optional[str] = "gemma-3-270m"
    max_tokens: Optional[int] = 512
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.9
    top_k: Optional[int] = 50
    repetition_penalty: Optional[float] = 1.1
    stop: Optional[Union[str, List[str]]] = None
    stream: Optional[bool] = False


class CompletionRequest(BaseModel):
    """文本补全请求"""
    prompt: Union[str, List[str]]
    model: Optional[str] = "gemma-3-270m"
    max_tokens: Optional[int] = 100
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.9
    top_k: Optional[int] = 50
    repetition_penalty: Optional[float] = 1.1
    stop: Optional[Union[str, List[str]]] = None
    stream: Optional[bool] = False


@app.get("/health", response_model=HealthResponse)
async def health_check():
    """健康检查"""
    import torch
    return {
        "status": "healthy",
        "model_loaded": model_loader is not None and inference_engine is not None,
        "npu_available": False,  # CPU 版本，不支持 NPU
        "version": "1.0.0",
        "model_info": {
            "name": "gemma-3-270m",
            "type": "text-generation",
            "max_context_length": 2048,
            "framework": "PyTorch + torch.compile",
            "pytorch_version": torch.__version__,
            "device": model_loader.get_device() if model_loader else "cpu",
            "optimization": "torch.compile (类似 LiteRT)"
        }
    }


@app.get("/metrics")
async def metrics():
    """Prometheus指标"""
    return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)


@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest):
    """
    创建聊天完成
    遵循OpenAI Chat Completions API协议
    支持流式输出（stream=True）
    """
    request_counter.labels(endpoint="/v1/chat/completions", status="received").inc()
    
    try:
        # 转换消息格式
        messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
        
        # 处理停止序列
        stop_sequences = None
        if request.stop:
            if isinstance(request.stop, str):
                stop_sequences = [request.stop]
            else:
                stop_sequences = request.stop
        
        # 流式输出
        if request.stream:
            async def generate_chat_stream_response():
                chat_id = f"chatcmpl-{os.urandom(16).hex()}"
                
                # 构建提示以计算token数
                if hasattr(inference_engine.tokenizer, 'apply_chat_template'):
                    prompt = inference_engine.tokenizer.apply_chat_template(
                        messages,
                        tokenize=False,
                        add_generation_prompt=True
                    )
                else:
                    prompt_parts = []
                    for msg in messages:
                        role = msg.get("role", "user")
                        content = msg.get("content", "")
                        if role == "user":
                            prompt_parts.append(f"User: {content}")
                        elif role == "assistant":
                            prompt_parts.append(f"Assistant: {content}")
                        elif role == "system":
                            prompt_parts.append(f"System: {content}")
                    prompt_parts.append("Assistant:")
                    prompt = "\n".join(prompt_parts)
                
                prompt_tokens = len(inference_engine.tokenizer.encode(prompt))
                completion_tokens = 0
                
                try:
                    # 发送初始响应
                    yield f"data: {json.dumps({'id': chat_id, 'object': 'chat.completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {'role': 'assistant', 'content': ''}, 'finish_reason': None}]})}\n\n"
                    
                    # 流式生成
                    full_content = ""
                    for token_text in inference_engine.chat_stream(
                        messages=messages,
                        max_new_tokens=request.max_tokens or 512,
                        temperature=request.temperature or 0.7,
                        top_p=request.top_p or 0.9,
                        stop_sequences=stop_sequences,
                        **({"top_k": request.top_k} if request.top_k else {}),
                        **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
                    ):
                        full_content += token_text
                        completion_tokens += 1
                        yield f"data: {json.dumps({'id': chat_id, 'object': 'chat.completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {'content': token_text}, 'finish_reason': None}]})}\n\n"
                    
                    # 发送完成响应
                    yield f"data: {json.dumps({'id': chat_id, 'object': 'chat.completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}], 'usage': {'prompt_tokens': prompt_tokens, 'completion_tokens': completion_tokens, 'total_tokens': prompt_tokens + completion_tokens}})}\n\n"
                    yield "data: [DONE]\n\n"
                except Exception as e:
                    logger.error(f"流式聊天生成失败: {str(e)}")
                    yield f"data: {json.dumps({'error': str(e)})}\n\n"
            
            return StreamingResponse(
                generate_chat_stream_response(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        # 非流式输出
        # 生成回复
        result = inference_engine.chat(
            messages=messages,
            max_new_tokens=request.max_tokens or 512,
            temperature=request.temperature or 0.7,
            top_p=request.top_p or 0.9,
            stop_sequences=stop_sequences,
            **({"top_k": request.top_k} if request.top_k else {}),
            **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
        )
        
        # 格式化响应
        response_message = result.get("message", {})
        usage = result.get("usage", {})
        
        return {
            "id": f"chatcmpl-{os.urandom(16).hex()}",
            "object": "chat.completion",
            "created": int(os.path.getmtime(__file__)),
            "model": request.model,
            "choices": [{
                "index": 0,
                "message": {
                    "role": response_message.get("role", "assistant"),
                    "content": response_message.get("content", "")
                },
                "finish_reason": "stop"
            }],
            "usage": {
                "prompt_tokens": usage.get("prompt_tokens", 0),
                "completion_tokens": usage.get("completion_tokens", 0),
                "total_tokens": usage.get("total_tokens", 0)
            }
        }
        
    except Exception as e:
        logger.error(f"聊天完成失败: {str(e)}")
        request_counter.labels(endpoint="/v1/chat/completions", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/chat/completions", status="success").inc()


@app.post("/v1/completions")
async def create_completion(request: CompletionRequest):
    """
    创建文本补全
    遵循OpenAI Completions API协议
    支持流式输出（stream=True）
    """
    request_counter.labels(endpoint="/v1/completions", status="received").inc()
    
    try:
        # 处理停止序列
        stop_sequences = None
        if request.stop:
            if isinstance(request.stop, str):
                stop_sequences = [request.stop]
            else:
                stop_sequences = request.stop
        
        # 流式输出
        if request.stream and isinstance(request.prompt, str):
            async def generate_stream_response():
                completion_id = f"cmpl-{os.urandom(16).hex()}"
                prompt_text = request.prompt
                prompt_tokens = len(inference_engine.tokenizer.encode(prompt_text))
                completion_tokens = 0
                
                try:
                    # 发送初始响应
                    yield f"data: {json.dumps({'id': completion_id, 'object': 'text_completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {'text': ''}, 'finish_reason': None}]})}\n\n"
                    
                    # 流式生成
                    full_text = ""
                    for token_text in inference_engine.generate_stream(
                        prompt=prompt_text,
                        max_new_tokens=request.max_tokens or 100,
                        temperature=request.temperature or 0.7,
                        top_p=request.top_p or 0.9,
                        stop_sequences=stop_sequences,
                        **({"top_k": request.top_k} if request.top_k else {}),
                        **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
                    ):
                        full_text += token_text
                        completion_tokens += 1
                        yield f"data: {json.dumps({'id': completion_id, 'object': 'text_completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {'text': token_text}, 'finish_reason': None}]})}\n\n"
                    
                    # 发送完成响应
                    yield f"data: {json.dumps({'id': completion_id, 'object': 'text_completion.chunk', 'created': int(os.path.getmtime(__file__)), 'model': request.model, 'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}], 'usage': {'prompt_tokens': prompt_tokens, 'completion_tokens': completion_tokens, 'total_tokens': prompt_tokens + completion_tokens}})}\n\n"
                    yield "data: [DONE]\n\n"
                except Exception as e:
                    logger.error(f"流式生成失败: {str(e)}")
                    yield f"data: {json.dumps({'error': str(e)})}\n\n"
            
            return StreamingResponse(
                generate_stream_response(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        # 非流式输出
        if isinstance(request.prompt, str):
            result = inference_engine.generate(
                prompt=request.prompt,
                max_new_tokens=request.max_tokens or 100,
                temperature=request.temperature or 0.7,
                top_p=request.top_p or 0.9,
                stop_sequences=stop_sequences,
                **({"top_k": request.top_k} if request.top_k else {}),
                **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
            )
            
            generated_text = result.get("generated_text", "")
            
            # 计算token数（简单估算）
            prompt_tokens = len(inference_engine.tokenizer.encode(request.prompt))
            completion_tokens = len(inference_engine.tokenizer.encode(generated_text))
            
            return {
                "id": f"cmpl-{os.urandom(16).hex()}",
                "object": "text_completion",
                "created": int(os.path.getmtime(__file__)),
                "model": request.model,
                "choices": [{
                    "text": generated_text,
                    "index": 0,
                    "logprobs": None,
                    "finish_reason": "stop"
                }],
                "usage": {
                    "prompt_tokens": prompt_tokens,
                    "completion_tokens": completion_tokens,
                    "total_tokens": prompt_tokens + completion_tokens
                }
            }
        else:
            # 批量处理
            results = inference_engine.generate(
                prompt=request.prompt,
                max_new_tokens=request.max_tokens or 100,
                temperature=request.temperature or 0.7,
                top_p=request.top_p or 0.9,
                stop_sequences=stop_sequences,
                **({"top_k": request.top_k} if request.top_k else {}),
                **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
            )
            
            choices = []
            total_prompt_tokens = 0
            total_completion_tokens = 0
            
            if isinstance(results, dict) and "results" in results:
                for idx, item in enumerate(results["results"]):
                    generated_text = item.get("generated_text", "")
                    prompt_text = item.get("prompt", "")
                    
                    prompt_tokens = len(inference_engine.tokenizer.encode(prompt_text))
                    completion_tokens = len(inference_engine.tokenizer.encode(generated_text))
                    
                    total_prompt_tokens += prompt_tokens
                    total_completion_tokens += completion_tokens
                    
                    choices.append({
                        "text": generated_text,
                        "index": idx,
                        "logprobs": None,
                        "finish_reason": "stop"
                    })
            
            return {
                "id": f"cmpl-{os.urandom(16).hex()}",
                "object": "text_completion",
                "created": int(os.path.getmtime(__file__)),
                "model": request.model,
                "choices": choices,
                "usage": {
                    "prompt_tokens": total_prompt_tokens,
                    "completion_tokens": total_completion_tokens,
                    "total_tokens": total_prompt_tokens + total_completion_tokens
                }
            }
        
    except Exception as e:
        logger.error(f"文本补全失败: {str(e)}")
        request_counter.labels(endpoint="/v1/completions", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/completions", status="success").inc()


@app.post("/v1/generate")
async def generate_text(request: CompletionRequest):
    """
    文本生成接口（简化版）
    """
    request_counter.labels(endpoint="/v1/generate", status="received").inc()
    
    try:
        # 处理停止序列
        stop_sequences = None
        if request.stop:
            if isinstance(request.stop, str):
                stop_sequences = [request.stop]
            else:
                stop_sequences = request.stop
        
        # 生成文本
        result = inference_engine.generate(
            prompt=request.prompt if isinstance(request.prompt, str) else request.prompt[0],
            max_new_tokens=request.max_tokens or 100,
            temperature=request.temperature or 0.7,
            top_p=request.top_p or 0.9,
            stop_sequences=stop_sequences,
            **({"top_k": request.top_k} if request.top_k else {}),
            **({"repetition_penalty": request.repetition_penalty} if request.repetition_penalty else {})
        )
        
        return {
            "result": result
        }
        
    except Exception as e:
        logger.error(f"文本生成失败: {str(e)}")
        request_counter.labels(endpoint="/v1/generate", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/generate", status="success").inc()


if __name__ == "__main__":
    port = int(os.getenv("PORT", 8000))
    workers = int(os.getenv("WORKERS", 1))
    
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=port,
        workers=workers,
        log_level=os.getenv("LOG_LEVEL", "info").lower()
    )

