
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSequenceClassification,BitsAndBytesConfig
import pymupdf
from accelerate import infer_auto_device_map

import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128" #, max_cached_engines:10

print(torch.__version__)
print(torch.cuda.is_available())

def dealwithImage(model, tokenizer, image, device):
    # device = "cuda"
    # device = "cpu"

    # model_path="D:\\LLM\\Models\\THUDM_glm-4v-9b"
    # tokenizer_path = model_path
    # # tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4v-9b", trust_remote_code=True)
    # tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True)

    # model = AutoModelForCausalLM.from_pretrained(
    #     # "THUDM/glm-4v-9b",
    #     model_path,
    #     torch_dtype=torch.bfloat16,
    #     low_cpu_mem_usage=True,
    #     trust_remote_code=True
    # ).to(device).eval()

    gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
    query = '描述这张图片'
    # image = Image.open("D:\\xiaojuan.jpg").convert('RGB')
    # print(type(image))
    inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
                                        add_generation_prompt=True, tokenize=True, return_tensors="pt",
                                        return_dict=True)  # chat mode

    # inputs = inputs.to(device)
    inputs = inputs.to(device=device)
    with torch.no_grad():
        outputs = model.generate(**inputs, **gen_kwargs)
        outputs = outputs[:, inputs['input_ids'].shape[1]:]
        print(tokenizer.decode(outputs[0]))

# testChatGlm4V2()

def pdf_to_image(pdf_path, model, tokenizer, device):
    docu = pymupdf.open(pdf_path)
    for page_number in range(len(docu)):
        page = docu[page_number]

        # 需要把图片转化为最大边长不超过1120 
        zoom_x = 0.8
        zoom_y = 0.8
        w = page.rect.width
        h = page.rect.height
        print(w, h)
        if w > h:
            zoom_x = 1120 / w
            zoom_y = zoom_x
        else:
            zoom_y = 1120 / h
            zoom_x = zoom_y
        mat = pymupdf.Matrix(zoom_x, zoom_y)
        pix = page.get_pixmap(matrix=mat)
        print(type(pix))
        mode = "RGB"
        img = Image.frombytes(mode, [pix.width, pix.height], pix.samples)
        print(type(img))

        # img.save("D:\\xiaojuan1.png")

        # 处理图片
        dealwithImage(model, tokenizer, img, device)

    docu.close()

# pdf_to_image("D:\\xiaojuan.pdf")

def testChatGlm4V2(pdf_path):
    device = "cuda"
    # device = "cpu"

    # model_path="D:\\LLM\\Models\\THUDM_glm-4v-9b"
    model_path="/opt/ai/llm/models/THUDM_glm-4v-9b"
    tokenizer_path = model_path
    # max_mem_map={0:"18G", 1:"18G"}
    # tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4v-9b", trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, 
                                              trust_remote_code=True)
    

    model = AutoModelForCausalLM.from_pretrained(
        # "THUDM/glm-4v-9b",
        model_path,
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        # quantization_config=BitsAndBytesConfig(load_in_4bit=True),
        # max_memory=max_mem_map,
        device_map="auto",
        trust_remote_code=True
    ).to(device).eval()

    # device_map = infer_auto_device_map(model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"})

    # if torch.cuda.device_count() > 1:
    #     from accelerate import dispatch_model
    #     from accelerate.utils import infer_auto_device_map, get_balanced_memory

    #     device_map = infer_auto_device_map(model, max_memory=get_balanced_memory(model))
    #     model = dispatch_model(model, device_map=device_map)
    #     print(f"Model has been moved to {device_map} devices.")
    # else:
    #     print(f"GPU不够")
    #     return

    # model = AutoModelForSequenceClassification.from_pretrained(
    #     # "THUDM/glm-4v-9b",
    #     model_path,
    #     torch_dtype=torch.bfloat16,
    #     trust_remote_code=True
    # )

    # model = torch.nn.DataParallel(model)
    # model.to(device)
    # model.eval()

    pdf_to_image(pdf_path, model, tokenizer, device)

    # gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}

    # query = '描述这张图片'
    # # image = Image.open("D:\\xiaojuan.jpg").convert('RGB')
    # # print(type(image))
    # inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
    #                                     add_generation_prompt=True, tokenize=True, return_tensors="pt",
    #                                     return_dict=True)  # chat mode

    # inputs = inputs.to(device)
    # with torch.no_grad():
    #     outputs = model.generate(**inputs, **gen_kwargs)
    #     outputs = outputs[:, inputs['input_ids'].shape[1]:]
    #     print(tokenizer.decode(outputs[0]))

testChatGlm4V2("xiaojuan.pdf")
