import json
import logging
import sys

from fastapi import (
    FastAPI,
    HTTPException,
    status,
)
from starlette.requests import Request

from llm_service.env import GLOBAL_LOG_LEVEL
from llm_service.routers import ollama
from llm_service.utils.chat import (
    generate_chat_completion as chat_completion_handler
)
from llm_service.utils.middleware import process_chat_response
from llm_service.utils.payload import convert_payload_openai_to_ollama

# 输出日志到文件
logging.basicConfig(level=GLOBAL_LOG_LEVEL, stream=sys.stdout, filemode='w',
                    format='%(asctime)s - %(levelname)s -line%(lineno)d - %(name)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S')

log = logging.getLogger(__name__)
app = FastAPI()

########################################
#
# OLLAMA
#
########################################

app.state.OLLAMA_MODELS = {}

app.include_router(ollama.router, prefix="/ollama", tags=["ollama"])


@app.post("/api/chat/completions")
async def generate_ollama_chat_completion(request: Request, form_data: dict):
    print("post from frontend" + "*" * 40)
    print(json.dumps(form_data, indent=4, ensure_ascii=False))
    try:
        form_data = convert_payload_openai_to_ollama(form_data)
        response = await chat_completion_handler(request, form_data)

        return await process_chat_response(
            request, response, form_data
        )
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=str(e),
        )
