# AppRegistry Global variable storage
import torch
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer

class AppRegistry(object):
    def __init__(self):
        self.name = 'common.app_registry.AppRegistry'

    @staticmethod
    def is_initialized() -> bool:
        if AppRegistry.model is None:
            return False
        return True
    
    @staticmethod
    def initialize() -> None:
        if AppRegistry.is_initialized():
            return
        AppRegistry.tokenizer = AutoTokenizer.from_pretrained(AppRegistry.path)
        AppRegistry.model = AutoModelForCausalLM.from_pretrained(AppRegistry.path, torch_dtype=torch.bfloat16, device_map=AppRegistry.device, trust_remote_code=True)
        AppRegistry.emb_model = AutoModel.from_pretrained(AppRegistry.DEFAULT_EMBEDDING_MODEL, trust_remote_code=True)
        # AppRegistry.emb_model.eval()
    
    fig, ax, axs = None, None, None

    # VectorDB
    VDB_FN = './work/vdb/rag001'
    COLLECTION_NAME = 't_rag'
    RAG_DIM = 1024

    path = 'openbmb/MiniCPM4-8B'
    device = "cuda"
    tokenizer = None
    model = None
    emb_model = None

    # Constants
    DEFAULT_MODEL = "openbmb/MiniCPM4-8B"
    DEFAULT_EMBEDDING_MODEL = "openbmb/MiniCPM-Embedding-Light"
    DEFAULT_TEMPERATURE = 0.7
    DEFAULT_MAX_TOKENS = 500
    DEFAULT_CHUNK_SIZE = 1000
    DEFAULT_CHUNK_OVERLAP = 200
    DEFAULT_TOP_K = 3