from fastapi import Request
from starlette.responses import StreamingResponse

from llm_service.routers.ollama import generate_chat_completion as generate_ollama_chat_completion
from llm_service.utils.response import convert_streaming_response_ollama_to_openai, convert_response_ollama_to_openai


async def generate_chat_completion(
        request: Request,
        form_data: dict
):

    # Using /ollama/api/chat endpoint
    response = await generate_ollama_chat_completion(
        request=request,
        form_data=form_data
    )

    if form_data.get("stream"):
        response.headers["content-type"] = "text/event-stream"
        return StreamingResponse(
            convert_streaming_response_ollama_to_openai(response),
            headers=dict(response.headers),
            background=response.background,
        )
    else:
        # return convert_response_ollama_to_openai(response)
        return response
