import enum
import json

from fastapi import Request
from loguru import logger

from app.api.database.models import LLMModel, LLMServer
from app.api.database.models.llm import LLMDao, LLMModelType, LLMModelCategory
from app.api.exception.errcode import ServerExistError, BaseErrorCode, ServerError
from app.api.schemas import LLMServerCreateReq, LLMModelCreateReq, LLMServerInfo


class LLMService:

    @classmethod
    def add_server(cls, request: Request, server: LLMServerCreateReq) -> LLMServerInfo:
        exist_server = LLMDao.get_server_by_name(server.name)
        if exist_server:
            raise ServerExistError.http_exception()
        logger.info(f"add server: {server}")
        db_server = LLMServer(**server.dict(exclude={'id'}))
        db_server = LLMDao.add_server(db_server)
        db_server = LLMServerInfo(**db_server.model_dump())

        return db_server

    @classmethod
    def is_valid_enum(cls, enum_instance: enum, value: str) -> bool:
        return value in {e.value for e in enum_instance}

    @classmethod
    def add_model(cls, request: Request, model: LLMModelCreateReq):

        model_type = model.model_type
        model_category = model.model_category
        model_keys = model.model_keys
        if not cls.is_valid_enum(LLMModelType, model_type):
            # 模型类型不在枚举中
            raise BaseErrorCode.http_exception(f"不支持的模型类型:{model_type}")
        for c in model_category:
            logger.debug(f"model_category:{c}")
            if not cls.is_valid_enum(LLMModelCategory, c):
                raise BaseErrorCode.http_exception(f"不支持的模型分类:{c}")

        db_model = LLMModel(**model.dict(exclude={'id'}))

        db_model = LLMDao.add_model(db_model)
        return db_model


    # @classmethod
    # def invoke_llm(cls, req: Request, payload: LLMInvokeReq):
    #     model_id = payload.model_id
    #     messages = payload.messages
    #     config = payload.config
    #     llm = TengitsLLM(model_id=model_id, **config)
    #     return llm.stream(messages)
