import torch
import threading
from plugins.common import success_print, error_print
model = None
tokenizer = None

def load_LLM():
    try:
        from importlib import import_module

        success_print("开始加载LLM模型")
        LLM = import_module('tools.glm.glm3')
 
        return LLM
    except Exception as e:
        print(e)
        error_print("LLM模型加载失败")

LLM = load_LLM()
def load_model():
    #加载模型并清空 GPU 缓存
    LLM.load_model()
    torch.cuda.empty_cache()
    success_print("模型加载完成")

#在一个子线程中异步启动模型
thread_load_model = threading.Thread(target=load_model)
thread_load_model.start()