
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSequenceClassification,BitsAndBytesConfig
import pymupdf
# from accelerate import infer_auto_device_map
# from transformers.generation import GenerationConfig

import os
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128" #, max_cached_engines:10

print(torch.__version__)
print(torch.cuda.is_available())

#history = None

def dealwithImage(model, tokenizer, image, device):
    # 1st dialogue turn
    query = tokenizer.from_list_format([
        {'image': image},
        {'text': '请帮我提取样本编号的信息，以JSON格式提供。'},
    ])

    response, history = model.chat(tokenizer, query=query, history=None)
    print(response)

    # # 图中是一名年轻女子在沙滩上和她的狗玩耍，狗的品种可能是拉布拉多。她们坐在沙滩上，狗的前腿抬起来，似乎在和人类击掌。两人之间充满了信任和爱。

    # # 2nd dialogue turn
    # response, history = model.chat(tokenizer, '图片中有分析结果表吗？如果有，请以JSON格式提供。如果没有，请输出"[]"', history=history)
    # print(response)
    # # # <ref>击掌</ref><box>(517,508),(589,611)</box>
    # # image = tokenizer.draw_bbox_on_latest_picture(response, history)
    # # if image:
    # # image.save('1.jpg')
    # # else:
    # # print("no box")

# testChatGlm4V2()

def pdf_to_image(pdf_path, model, tokenizer, device):
    # 获取pdf文件的文件夹
    base_path = os.path.dirname(pdf_path)
    # 保存的图片路径
    image_path = os.path.join(base_path, 'images')
    if not os.path.exists(image_path):
        os.makedirs(image_path)
    docu = pymupdf.open(pdf_path)
    for page_number in range(len(docu)):
        page = docu[page_number]

        # 需要把图片转化为最大边长不超过1120 
        zoom_x = 0.8
        zoom_y = 0.8
        w = page.rect.width
        h = page.rect.height
        print(w, h)
        if w > h:
            zoom_x = 1120 / w
            zoom_y = zoom_x
        else:
            zoom_y = 1120 / h
            zoom_x = zoom_y
        mat = pymupdf.Matrix(zoom_x, zoom_y)
        pix = page.get_pixmap(matrix=mat)
        print(type(pix))
        mode = "RGB"
        img = Image.frombytes(mode, [pix.width, pix.height], pix.samples)
        print(type(img))

        image_file_name = os.path.join(image_path, f"{page_number}.jpg")

        img.save(image_file_name)

        # 处理图片
        dealwithImage(model, tokenizer, image_file_name, device)

    docu.close()

# pdf_to_image("D:\\xiaojuan.pdf")

def test(pdf_path):
    device = "cuda"
    torch.manual_seed(1234)

    # Note: The default behavior now has injection attack prevention off.
    tokenizer = AutoTokenizer.from_pretrained("/opt/ai/llm/models/Qwen-VL-Chat", trust_remote_code=True)

    # use bf16
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
    # use fp16
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
    # use cpu only
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="cpu", trust_remote_code=True).eval()
    # use cuda device
    model = AutoModelForCausalLM.from_pretrained("/opt/ai/llm/models/Qwen-VL-Chat", device_map="cuda", trust_remote_code=True).eval()

    # Specify hyperparameters for generation (No need to do this if you are using transformers>=4.32.0)
    # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)

    # device_map = infer_auto_device_map(model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"})

    # if torch.cuda.device_count() > 1:
    #     from accelerate import dispatch_model
    #     from accelerate.utils import infer_auto_device_map, get_balanced_memory

    #     device_map = infer_auto_device_map(model, max_memory=get_balanced_memory(model))
    #     model = dispatch_model(model, device_map=device_map)
    #     print(f"Model has been moved to {device_map} devices.")
    # else:
    #     print(f"GPU不够")
    #     return

    # model = AutoModelForSequenceClassification.from_pretrained(
    #     # "THUDM/glm-4v-9b",
    #     model_path,
    #     torch_dtype=torch.bfloat16,
    #     trust_remote_code=True
    # )

    # model = torch.nn.DataParallel(model)
    # model.to(device)
    # model.eval()

    pdf_to_image(pdf_path, model, tokenizer, device)

    # gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}

    # query = '描述这张图片'
    # # image = Image.open("D:\\xiaojuan.jpg").convert('RGB')
    # # print(type(image))
    # inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
    #                                     add_generation_prompt=True, tokenize=True, return_tensors="pt",
    #                                     return_dict=True)  # chat mode

    # inputs = inputs.to(device)
    # with torch.no_grad():
    #     outputs = model.generate(**inputs, **gen_kwargs)
    #     outputs = outputs[:, inputs['input_ids'].shape[1]:]
    #     print(tokenizer.decode(outputs[0]))

test("xiaojuan.pdf")
