import uvicorn

from argparse import ArgumentParser
from typing import cast
from functools import lru_cache
from collections.abc import AsyncIterable
from contextlib import asynccontextmanager

from tqdm import tqdm
from fastapi import Query, FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import StreamingResponse

from llm import PromptData, GeneratedData, Llm, Gemma, TinyLLM, InferLLM
from config import settings


@lru_cache
def get_llm(testing=False, model="gemma"):
    if testing:
        return Llm()
    if model == "tinyllm":
        return TinyLLM()
    if model == "inferllm":
        return InferLLM()
    return Gemma()


async def chat_llm(prompt_data: PromptData):
    llm = get_llm(
        settings.testing or (settings.ckpt == "" and settings.tiny_llm_ckpt == ""),
        model=prompt_data.model,
    )
    print(f"chat_llm: {prompt_data}")
    yield "[\n"
    seq_ = 0
    async for text in llm.generate(prompt_data):
        yield GeneratedData(
            gen_text=text,
            seq_=seq_,
            is_last_=False,
            full_update=prompt_data.model == "tinyllm",
        ).json()
        yield ","
        seq_ += 1
    yield GeneratedData(
        gen_text="",
        seq_=seq_,
        is_last_=True,
        full_update=prompt_data.model == "tinyllm",
    ).json()
    yield "\n]\n"


@asynccontextmanager
async def lifespan(app: FastAPI):
    # async for word in tqdm(chat_llm(PromptData(prompt="1+1="))):
    #     print(word.strip())
    # print()
    yield


app = FastAPI(lifespan=lifespan)


@app.post("/api/llm/chat", summary="chat")
async def stream_response(
    prompt_data: PromptData,
    max_tokens: int = Query(256, title="max_tokens", description="max_tokens", ge=32),
    temperature: float = Query(
        0.7, title="temperature", description="temperature", gt=0.0, le=1.0
    ),
):
    prompt_data.max_tokens = prompt_data.max_tokens or max_tokens
    prompt_data.temperature = prompt_data.temperature or temperature
    return StreamingResponse(
        content=cast(AsyncIterable, chat_llm(prompt_data)),
        media_type="text/event-stream",
    )


@app.get("/api/hello")
async def root():
    return {"message": "Hello World"}


app.mount("/", StaticFiles(directory="web-static", html=True), name="web")

def main():
    parser = ArgumentParser()
    parser.add_argument("--host", default="0.0.0.0")
    parser.add_argument("--port", default=8000, type=int)
    parser.add_argument("--reload", action="store_true", default=False)
    parser.add_argument("--testing", action="store_true", default=False)
    args = parser.parse_args()
    reload = args.reload

    uvicorn.run("main:app", reload=reload, host=args.host, port=args.port)


if __name__ == "__main__":
    main()
