import logging

from gpt.model.adapter.llm_adapter import LLMModelAdapter
from gpt.configs.model_config import get_device
from gpt.model.base import ModelType
from gpt.model.parameters import ModelParameters
from gpt.util.parameter_utils import genenv_ignoring_key_case

logger = logging.getLogger(__name__)


def get_model_real_path(model_name, default_model_path):
    env_prefix = model_name + "_"
    env_prefix = env_prefix.replace("-", "_")
    env_model_path = genenv_ignoring_key_case("model_path", env_prefix=env_prefix)
    if env_model_path:
        return env_model_path
    return genenv_ignoring_key_case("model_path", default_value=default_model_path)


class ModelLoader:
    def __init__(self, model_path: str, model_name: str = None) -> None:
        self.device = get_device()
        self.model_path = model_path
        self.model_name = model_name
        self.prompt_template: str = None

    def loader_with_params(
            self, model_params: ModelParameters, llm_adapter: LLMModelAdapter
    ):
        model_type = llm_adapter.model_type()
        self.prompt_template = model_params.prompt_template
        if model_type == ModelType.PROXY:
            # return proxyllm_loader(llm_adapter, model_params)
            return llm_adapter.load_from_params(model_params)
        else:
            raise Exception(f"Unkown model type {model_type}")
