from transformers import AutoTokenizer, AutoModel
import config

tokenizer = AutoTokenizer.from_pretrained(config.visual_glm_path, trust_remote_code=True)
model = AutoModel.from_pretrained(config.visual_glm_path, trust_remote_code=True).half().cuda()


def getText(image_path: str) -> str:
    global model, tokenizer
    response, history = model.chat(tokenizer, image_path, config.img_prompt, history=[])
    return response
