#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json, time, logging
from fastapi import APIRouter, FastAPI, HTTPException, Request, status, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from sse_starlette.sse import EventSourceResponse
from llmapi.base import *
from llmapi.context import LLMAPIContext
from llmapi.utils import torch_gc
router = APIRouter()
def create_app(ctx: LLMAPIContext) -> FastAPI:
    app = FastAPI(
        title="llm openai like API",
        version="0.0.1",
    )
    app.state.ctx = ctx

    app.add_middleware(
        CORSMiddleware,
        allow_origins=['*'],
        allow_credentials=True,
        allow_methods=['*'],
        allow_headers=['*'],
    )
    app.include_router(router)
    return app

@router.get("/")
def api_root():
    return {'message': 'started', 'success': True}

@router.get("/v1/models", response_model=GetModelResponse)
def get_models(req: Request) -> ModelList:
    if req.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx: LLMAPIContext = req.app.state.ctx
    mds = ModelList(object="list", data=[])
    for m in ctx.models.values():
        mds["data"].append({
            "id": m.name,
            "object": "model",
            "created": int(time.time()),
            "owned_by": "yunlu",
            "permission": get_default_model_permission(),
            "root": m.name,
            "parent": None,
        })
    return mds

@router.post(
    "/v1/embeddings",
    response_model=CreateEmbeddingResponse,
)
def create_embedding(
    req: Request,
    request: CreateEmbeddingRequest,
    background_tasks: BackgroundTasks):
    background_tasks.add_task(torch_gc)
    if req.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx: LLMAPIContext = req.app.state.ctx
    model_name =  request.model
    if model_name not in ctx.models:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Model {model_name} not found",
        )
    model = ctx.models[model_name]
    return model.embedding(**request.dict(exclude={"model", "user"}))

@router.post(
    "/v1/chat/completions",
    response_model=CreateChatCompletionResponse,
)
async def create_chat_completion(
    req: Request,
    request: CreateChatCompletionRequest,
    background_tasks: BackgroundTasks
) -> Union[ChatCompletion, EventSourceResponse]:
    background_tasks.add_task(torch_gc)
    if req.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx: LLMAPIContext = req.app.state.ctx
    model_name = request.model
    if model_name not in ctx.models:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Model {model_name} not found",
        )
    model = ctx.models[model_name]
    completion_or_chunks = model.chat_completion(**request.dict(exclude={"model", "user"}))
    if request.stream:
        chunks: Iterator[ChatCompletionChunk] = completion_or_chunks
        async def eval_llm():
            for chunk in chunks:
                yield json.dumps(chunk, ensure_ascii=False)
            yield "[DONE]"
        return EventSourceResponse(eval_llm(), ping=10000)
    if isinstance(completion_or_chunks, dict):
        return completion_or_chunks
    completion: Completion = next(completion_or_chunks)  # type: ignore
    return completion

@router.post(
    "/v1/completions",
    response_model=CreateCompletionResponse,
)
def create_completion(
    req: Request,
    request: CreateCompletionRequest,
    background_tasks: BackgroundTasks)-> Union[Completion, EventSourceResponse]:
    background_tasks.add_task(torch_gc)
    if req.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx: LLMAPIContext = req.app.state.ctx
    model_name = request.model
    if model_name not in ctx.models:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Model {model_name} not found",
        )
    model = ctx.models[model_name]
    completion_or_chunks = model.completion(**request.dict(exclude={"model", "user"}))
    if request.stream:
        chunks: Iterator[CompletionChunk] = completion_or_chunks
        async def eval_llm():
            for chunk in chunks:
                yield json.dumps(chunk, ensure_ascii=False)
            yield "[DONE]"
        return EventSourceResponse(eval_llm(), ping=10000)
    if isinstance(completion_or_chunks, dict):
        return completion_or_chunks
    completion: Completion = next(completion_or_chunks)
    return completion