from pydantic import BaseModel, Field
from pydantic_yaml import YamlStrEnum, YamlModel
from typing import List, Optional, Dict, Union, Annotated
from llmapi.base import *
from llmapi.utils import init_glm,load_embeddings_model
from llmapi.models import GLMModel, LLamaCPPModel, Text2vecModel
from llama_cpp import Llama
class ModelLoadType(YamlStrEnum):
    GLM = "glm"
    TEXT2VEC = "text2vec"
    LLAMA = "llamacpp"
    FAKE = "fake"

class ModelInfo(YamlModel):
    name: str = Field(default=None, description="name of the mode")
    check_point_path: Optional[str] = Field(None, description="path to the model checkpoint")
    mtype: str = Field(default=ModelLoadType.GLM, description="model loading type llama or glm")

class LLAMAConfig(BaseModel):
    max_context_size : Optional[int] = Field(default=2048, description="Maximum context size")
    gpu_layers: Optional[int] = Field(default=0, description="num layers for gpu to load")
    
class LLMAPIConfig(YamlModel):
    models: Optional[List[ModelInfo]] = Field(default_factory=list)
    verbose: Optional[bool] = False
    llama: Optional[LLAMAConfig] = Field(default_factory=LLAMAConfig)
    def load_models(self)-> Dict[Annotated[str, "model name"],Union[ChatModel, EmbeddingModel]]:
        """
        Loads all the models from the config and returns them as a dictionary.
        """
        if self.verbose:
            print("[+] Load models")
        result = {}
        for info in self.models:
            if info.mtype == ModelLoadType.GLM:
                tokenizer, model = init_glm(info.check_point_path, verbose=self.verbose)
                result[info.name] = GLMModel(model=model, tokenizer=tokenizer, model_name=info.name)
            if info.mtype == ModelLoadType.LLAMA:
                llm = Llama(model_path=info.check_point_path, n_ctx=self.llama.max_context_size, n_gpu_layers=self.llama.gpu_layers)
                result[info.name] = LLamaCPPModel(llama=llm, model_name=info.name)
            if info.mtype == ModelLoadType.TEXT2VEC:
                model = load_embeddings_model(info.check_point_path)
                result[info.name] = Text2vecModel(model_name=info.name, model=model)
        return result
