import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer


device = "cuda"
tokenizer = AutoTokenizer.from_pretrained("/data/llm/models/glm-4v-9b", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    "/data/llm/models/glm-4v-9b",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    trust_remote_code=True).to(device).eval()


def glm4v_9B(jpg_name, question=''):
    image = Image.open(jpg_name).convert('RGB')
    if not question:
        question = '''这是一张发票，先利用orc识别出图片内容，再给出如下字段：发票号码，开票日期，购买方名称，购买方纳税人识别号，购买方地址、电话，购买方开户行及账号，货物或应税劳务、服务名称，单价，金额，税率，税额，价税合计小写, 并用json格式输出'''
    inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": question}],
                                            add_generation_prompt=True,
                                           tokenize=True,
                                           return_tensors="pt",
                                           return_dict=True)  # chat mode
    inputs = inputs.to(device)
    gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
    with torch.no_grad():
        outputs = model.generate(**inputs, **gen_kwargs)
        outputs = outputs[:, inputs['input_ids'].shape[1]:]
        res = tokenizer.decode(outputs[0])
    return res
