import contextlib
import anyio
import typing
import json
from anyio.streams.memory import MemoryObjectSendStream

from threading import Lock
from functools import partial
from typing import Iterator, List, Optional, Union, Dict

from starlette_context.middleware import RawContextMiddleware
from starlette_context.plugins import RequestIdPlugin
from starlette.concurrency import run_in_threadpool, iterate_in_threadpool
from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body
from fastapi.middleware import Middleware
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import HTTPBearer
from sse_starlette.sse import EventSourceResponse

from llm_server.server.settings import ModelSettings, ServerSettings, Settings
from llm_server.server.model import LLM
from llm_server.server.types import CreateCompletionRequest, CreateChatCompletionRequest

from llm_server.types import CreateCompletionResponse

import llm_server

router = APIRouter()
_server_settings: Optional[ServerSettings] = None

def set_server_settings(server_settings: ServerSettings):
    global _server_settings
    _server_settings = server_settings


def get_server_settings():
    yield _server_settings

_llm: Optional[LLM] = None

llm_outer_lock = Lock()
llm_inner_lock = Lock()


def set_llm(model_settings: List[ModelSettings]):
    global _llm
    _llm = LLM(models=model_settings)

def get_llm():
    llm_outer_lock.acquire()
    release_outer_lock = True
    try:
        llm_inner_lock.acquire()
        try:
            llm_outer_lock.release()
            release_outer_lock = False
            yield _llm
        finally:
            llm_inner_lock.release()
    finally:
        if release_outer_lock:
            llm_outer_lock.release()

_ping_message_factory: typing.Optional[typing.Callable[[], bytes]] = None

def create_app(
    server_settings: ServerSettings | None = None,
    model_settings: List[ModelSettings] | None = None,
):

    set_server_settings(server_settings)

    middleware = [Middleware(RawContextMiddleware, plugins=(RequestIdPlugin(),))]

    app = FastAPI(
        title="LLM Server API",
        version=llm_server.__version__,
        root_path=server_settings.root_path,
    )

    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )
    app.include_router(router)

    set_llm(model_settings=model_settings)

    return app


# Setup Bearer authentication scheme
bearer_scheme = HTTPBearer(auto_error=False)


async def authenticate(
    settings: Settings = Depends(get_server_settings),
    authorization: Optional[str] = Depends(bearer_scheme),
):
    # Skip API key check if it's not set in settings
    if settings.api_key is None:
        return True

    # check bearer credentials against the api_key
    if authorization and authorization.credentials == settings.api_key:
        # api key is valid
        return authorization.credentials

    # raise http error 401
    raise HTTPException(
        status_code=status.HTTP_401_UNAUTHORIZED,
        detail="Invalid API key",
    )


async def get_event_publisher(
    request: Request,
    inner_send_chan: MemoryObjectSendStream[typing.Any],
    iterator: Iterator[typing.Any],
    on_complete: typing.Optional[typing.Callable[[], None]] = None,
):
    server_settings = next(get_server_settings())
    interrupt_requests = (
        server_settings.interrupt_requests if server_settings else False
    )
    async with inner_send_chan:
        try:
            async for chunk in iterate_in_threadpool(iterator):
                await inner_send_chan.send(dict(data=json.dumps(chunk)))
                if await request.is_disconnected():
                    raise anyio.get_cancelled_exc_class()()
                if interrupt_requests and llm_outer_lock.locked():
                    await inner_send_chan.send(dict(data="[DONE]"))
                    raise anyio.get_cancelled_exc_class()()
            await inner_send_chan.send(dict(data="[DONE]"))
        except anyio.get_cancelled_exc_class() as e:
            print("disconnected")
            with anyio.move_on_after(1, shield=True):
                print(f"Disconnected from client (via refresh/close) {request.client}")
                raise e
        finally:
            if on_complete:
                on_complete()


@router.post(
    "/v1/completions",
    summary="Completion",
    dependencies=[Depends(authenticate)]
)
async def create_completion(
    request: Request,
    body: CreateCompletionRequest,
):
    exit_stack = contextlib.ExitStack()
    llm = await run_in_threadpool(
        lambda: exit_stack.enter_context(contextlib.contextmanager(get_llm)())
    )

    if llm is None:
        raise HTTPException(
            status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
            detail="Service is not available",
        )

    if isinstance(body.prompt, list):
        assert len(body.prompt) <= 1
        body.prompt = body.prompt[0] if len(body.prompt) > 0 else ""

    model = llm(
        body.model
    )

    exclude = {
        "n",
        "best_of",
        "logit_bias_type",
        "user",
        "min_tokens",
    }
    kwargs = body.model_dump(exclude=exclude)

    iterator_or_completion: Union[
        Iterator[CreateCompletionResponse],
    ] = await run_in_threadpool(model, **kwargs)

    first_response = await run_in_threadpool(next, iterator_or_completion)

    def iterator() -> Iterator[CreateCompletionResponse]:
            yield first_response
            yield from iterator_or_completion
            exit_stack.close()

    send_chan, recv_chan = anyio.create_memory_object_stream(10)
    return EventSourceResponse(
        recv_chan,
        data_sender_callable=partial(  # type: ignore
            get_event_publisher,
            request=request,
            inner_send_chan=send_chan,
            iterator=iterator(),
            on_complete=exit_stack.close,
        ),
        sep="\n",
        ping_message_factory=_ping_message_factory,
    )

@router.post(
    "/v1/chat/completions",
    summary="Completion",
    dependencies=[Depends(authenticate)]
)
async def create_chat_completion(
    request: Request,
    body: CreateChatCompletionRequest
):
    exit_stack = contextlib.ExitStack()
    llm = await run_in_threadpool(
        lambda: exit_stack.enter_context(contextlib.contextmanager(get_llm)())
    )

    if llm is None:
        raise HTTPException(
            status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
            detail="Service is not available",
        )

    model = llm(
        body.model
    )

    exclude = {
        "n",
        "best_of",
        "logit_bias_type",
        "user",
        "min_tokens",
    }
    kwargs = body.model_dump(exclude=exclude)

    iterator_or_completion: Union[
        Iterator[CreateCompletionResponse],
    ] = await run_in_threadpool(model.create_chat_completion, **kwargs)

    first_response = await run_in_threadpool(next, iterator_or_completion)

    def iterator() -> Iterator[CreateCompletionResponse]:
        yield first_response
        yield from iterator_or_completion
        exit_stack.close()

    send_chan, recv_chan = anyio.create_memory_object_stream(10)
    return EventSourceResponse(
        recv_chan,
        data_sender_callable=partial(  # type: ignore
            get_event_publisher,
            request=request,
            inner_send_chan=send_chan,
            iterator=iterator(),
            on_complete=exit_stack.close,
        ),
        sep="\n",
        ping_message_factory=_ping_message_factory,
    )