from fastapi import FastAPI
from typing import List
from fastapi import Request, HTTPException
from fastapi.concurrency import asynccontextmanager
from pydantic import BaseModel
from embedding_engine import EmbeddingEngine
from embedding_onnx_provider import ONNXExecutionProvider
from log_config import logger,LOGGING_CONFIG
import os
import uvicorn

# 当前文件的绝对路径
filepath = os.path.abspath(__file__)

def download_models():
    models_dir = os.path.join(os.path.dirname(os.path.dirname(filepath)), "models")
    os.makedirs(models_dir, exist_ok=True)
    embedding_dir = os.path.join(models_dir,"bce_embedding_base")
    if os.path.exists(embedding_dir):
        return embedding_dir
    
    from modelscope import snapshot_download,model_file_download
    snapshot_download(model_id='netease-youdao/bce-embedding-base_v1',local_dir = embedding_dir)
    model_file_download(model_id='maidalun/bce-embedding-base_v1',file_path='tokenizer_config.json',local_dir=embedding_dir)
    model_file_download(model_id='maidalun/bce-embedding-base_v1',file_path='tokenizer.json',local_dir=embedding_dir)
    return embedding_dir

def create_app(model_dir) -> FastAPI:
    @asynccontextmanager
    async def lifespan(app:FastAPI):
        """生命周期"""
        app.state.embed_engine = EmbeddingEngine(model_dir, 8)
        app.state.embed_engine.start()
        yield
        app.state.embed_engine.stop()

    # 创建应用
    app = FastAPI(
            title="Embedding Inference Server",
            lifespan=lifespan,
            root_path="/api/v1",
            version="1.0",
            license_info={"name":"Apache License 2.0","identifier":"Apache"}
        )
    
    class EmbeddingRequest(BaseModel):
        texts: List[str]

    class EmbeddingResponse(BaseModel):
        embeddings: List[List[float]]
        tokens:int

    @app.post("/embedding",status_code=200,response_model=EmbeddingResponse)
    def text_embedding(request: Request,embed_request:EmbeddingRequest):
        if not embed_request.texts:
            raise HTTPException(status_code=400, detail="texts 不能为空")
        if len(embed_request.texts) > 2000:  # 示例限制最大2000条
            raise HTTPException(status_code=413, detail="texts 最大2000条")

        engine = request.app.state.embed_engine
        embeddings,tokens = engine.execute(embed_request.texts)
        return {"tokens":tokens,"embeddings": embeddings} 
    
    return app

if __name__ == "__main__":
    # 下载模型
    embedding_dir = download_models()
    # 创建应用
    app = create_app(embedding_dir)
    # 启动应用
    uvicorn.run(app, host="0.0.0.0", port=8077,log_config=LOGGING_CONFIG)