from fastapi import FastAPI, APIRouter, status, Request, Response, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException
from sse_starlette.sse import EventSourceResponse
from llmweb.context import Context
from llmweb.models import (
    APIResult, 
    APICode,
    CreateCompletionRequest,
    CreateChatCompletionRequest,
    Completion,
    ChatCompletion,
    CompletionChunk,
    ChatCompletionChunk,
)
from typing import List, AsyncGenerator, Any, Union
from llmweb import logger
import json
import uuid
import time

root_router = APIRouter()
api_router = APIRouter(prefix="/v1")
def create_app(ctx: Context) -> FastAPI:
    app = FastAPI(
        title="llmweb",
        version="0.0.1",
    )
    app.state.ctx = ctx
    app.add_middleware(
        CORSMiddleware,
        allow_origins=['*'],
        allow_credentials=True,
        allow_methods=['*'],
        allow_headers=['*'],
    )
    root_router.include_router(api_router)
    app.include_router(root_router)

    @app.exception_handler(StarletteHTTPException)
    async def custom_http_exception_handler(request, exc :StarletteHTTPException):
        return JSONResponse(
            status_code=exc.status_code,
            content=APIResult(code=APICode.Failed, msg=str(exc.detail)).dict(exclude_none=True, exclude_unset=True)
        )
    #{"detail": exc.errors(), "body": exc.body}
    @app.exception_handler(RequestValidationError)
    async def validation_exception_handler(request: Request, exc: RequestValidationError):
        return JSONResponse(
            status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
            content=APIResult(code=APICode.Failed, 
                              msg=str("参数验证错误"),
                              data={"detail": exc.errors(), "body": exc.body}
                              ).dict(exclude_none=True, exclude_unset=True)
        )
    
    return app


@root_router.get("/healthcheck", status_code=status.HTTP_200_OK, response_model=APIResult)
def health_check():
    return APIResult(code=0, msg="ok")

@api_router.post("/completions", status_code=status.HTTP_200_OK)
async def create_completion(request: Request, 
                            body: CreateCompletionRequest):
    if body.logprobs is not None:
        raise HTTPException(status_code=501, detail="Not implemented")
    
    if request.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx: Context = request.app.state.ctx
    model_id = body.model
    if model_id not in ctx.models:
        raise HTTPException(status_code=400, detail=str(f"model {model_id} not found"))
    
    model = ctx.models[model_id]

    stop = body.stop if body.stop is not None else []
    logprobs_or_none = None
    # prepare response
    completion_id: str = f"chatcmpl-{str(uuid.uuid4())}"
    created: int = int(time.time())
    response: str
    if body.stream:
        async def stream_results() ->AsyncGenerator[CompletionChunk, None]:
            try:
                finish_reason: str = "length"
                async for response in model.generate(body.prompt, stop=stop):
                    for s in stop:
                        if s in response:
                            response = response.split(s)[0].strip()
                            finish_reason = "stop"
                            break
                    yield json.dumps( {
                        "id": completion_id,
                        "object": "text_completion",
                        "created": created,
                        "model": model_id,
                        "choices": [
                            {
                                "text": response,
                                "index": 0,
                                "logprobs": logprobs_or_none,
                                "finish_reason": finish_reason,
                            }
                        ],
                    })
                    if "stop" == finish_reason:
                        break
                yield "[DONE]"
            except Exception as e:
                logger.exception("Completion stream got an error: %s", e)
                yield dict(data=json.dumps({"error": str(e)}))
            return
        return EventSourceResponse(stream_results())
    else:
        try:
            finish_reason: str = "length"
            response = ""
            async for result in model.generate(body.prompt, stop=stop):
                for s in stop:
                    if s in result:
                        result = result.split(s)[0].strip()
                        finish_reason = "stop"
                        break
                response += result
                if finish_reason == "stop":
                    break
                
            return Completion( {
                "id": completion_id,
                "object": "text_completion",
                "created": created,
                "model": model_id,
                "choices": [
                    {
                        "text": response,
                        "index": 0,
                        "logprobs": logprobs_or_none,
                        "finish_reason": finish_reason,
                    }
                ],
                "usage": {
                    "prompt_tokens": len(body.prompt),
                    "completion_tokens": len(response),
                    "total_tokens": len(body.prompt) + len(response),
                },
            })
            
        except Exception as e:
            logger.exception("Completion stream got an error: %s", e)

    
@api_router.post(
    "/chat/completions",
    response_model=ChatCompletion,
)
async def create_chat_completion(
    request: Request,
    body: CreateChatCompletionRequest,
) -> AsyncGenerator[ChatCompletion, EventSourceResponse]:
    if request.app.state.ctx is None:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"App context is None",
        )
    ctx = request.app.state.ctx
    model_id = body.model
    if model_id not in ctx.models:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Model {model_id} not found",
        )
    model = ctx.models[model_id]
    stop = body.stop if body.stop is not None else []
    prompt = "".join(
        f'###{"user" if message.role == "user" else "assistant"}:{message.content}'
        for message in body.messages
    )
    prompt = prompt  + "###assistant:"
    PROMPT_STOP = ["###assistant:", "###user:"]
    stop = PROMPT_STOP + stop

    # prepare response
    completion_id: str = f"chatcmpl-{str(uuid.uuid4())}"
    created: int = int(time.time())
    response: str
    if body.stream:
        async def stream_results() -> AsyncGenerator[ChatCompletionChunk, None]:
            try:
                finish_reason: str = "length"
                yield json.dumps({
                        "id": completion_id,
                        "model": model_id,
                        "created": created,
                        "object": "chat.completion.chunk",
                        "choices": [
                            {
                                "index": 0,
                                "delta": {
                                    "role": "assistant",
                                },
                                "finish_reason": None,
                            }
                        ],
                    })
                async for resp in model.generate(prompt, stop=stop):
                    for s in stop:
                        if s in resp:
                            resp = resp.split(s)[0].strip()
                            finish_reason = "stop"
                            break
                
                    yield json.dumps({
                        "id": completion_id,
                        "model": model_id,
                        "created": created,
                        "object": "chat.completion.chunk",
                        "choices": [
                            {
                                "index": 0,
                                "delta": {
                                    "content": str(resp),
                                },
                                "finish_reason": finish_reason,
                            }
                        ],
                    })
                    if "stop" == finish_reason:
                        break
                yield json.dumps({
                    "id": completion_id,
                    "model": model_id,
                    "created": created,
                    "object": "chat.completion.chunk",
                    "choices": [
                        {
                            "index": 0,
                            "delta": {
                                "content": '',
                            },
                            "finish_reason": finish_reason,
                        }
                    ],
                })
            except Exception as e:
                logger.exception("Completion stream got an error: %s", e)
                yield dict(data=json.dumps({"error": str(e)}))
            return
        return EventSourceResponse(stream_results())
    else:
        try:
            finish_reason: str = "length"
            response = ""
            async for result in model.generate(prompt, stop=stop):
                for s in stop:
                    if s in result:
                        result = result.split(s)[0].strip()
                        finish_reason = "stop"
                        break
                response += result
                if finish_reason == "stop":
                    break
            return ChatCompletion( {
                "id": completion_id,
                "object": "chat.completion",
                "created": created,
                "model": model_id,
                "choices": [
                    {
                        "index": 0,
                        "message": {
                            "role": "assistant",
                            "content": str(response),
                        },
                        "finish_reason": finish_reason,
                    }
                ],
                "usage": {
                    "prompt_tokens": len(prompt),
                    "completion_tokens": len(response),
                    "total_tokens": len(prompt) + len(response),
                },
            })
            
        except Exception as e:
            logger.exception("Completion stream got an error: %s", e)
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                detail=str(e),
            )