#coding:utf-8
import os
import sys
import time
from PIL import Image
import torch

project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
if project_root not in sys.path:
    sys.path.append(project_root)

from config.config import Config


def image_understand(image_path, config: Config):
    image = Image.open(image_path)
    query = '请描述这张图片'

    # here the message formatting refers to https://huggingface.co/microsoft/Phi-3-vision-128k-instruct#sample-inference-code

    messages = [
        {"role": "user", "content": "<|image_1|>\n{prompt}".format(prompt=query)},
    ]
    model, processor = config.image_model
    prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

    # Generate predicted tokens
    with torch.inference_mode():
        inputs = processor(prompt, [image], return_tensors="pt").to('xpu')
        st = time.time()
        output = model.generate(**inputs,
                                eos_token_id=processor.tokenizer.eos_token_id,
                                num_beams=1,
                                do_sample=True,
                                max_new_tokens=config.max_new_tokens,
                                temperature=0.5
                                )
        end = time.time()
        print(f'Inference time: {end - st} s')
        output_str = processor.decode(output[0],
                                      skip_special_tokens=True,
                                      clean_up_tokenization_spaces=False)
        print('-' * 20, 'Prompt', '-' * 20)
        print(f'Message: {messages}')
        print(output_str)


if __name__ == '__main__':
    image_understand("../img/travel.png", Config())
