import torch
from transformers import AutoModel
from transformers import AutoTokenizer
import util
from transformers import TextIteratorStreamer
from threading import Thread

path = 'hf-models/InternVL2-8B'
model = AutoModel.from_pretrained(
    path,
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    trust_remote_code=True).eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)



def recognize(image_url, target_text):
    # set the max number of tiles in `max_num`
    pixel_values = util.load_image(image_url,
                                   max_num=6).to(torch.bfloat16).cuda()
    question = f'''
    <image>\n
    你现在的任务是从图片中提取相关信息，提取目标包括：{target_text}，各提取目标使用|分隔，请自行拆分，不要随意识别不相关信息。
    要求：
    1. 返回结果使用json格式，每条数据包含一个key-value对，key值为我指定的关键信息，value值为所抽取的结果。
    2. 仅考虑单条结果情况。
    3. 如果认为OCR识别结果中没有关键信息key，则将value赋值为“未找到相关信息”即可。如果图片模糊，请不要随意猜测内容，将value赋值为“无法识别”即可。 
    4. 请只输出json格式的结果，不要包含其它多余文字。.
    '''
    # Initialize the streamer
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=10)
    # Define the generation configuration
    generation_config = dict(max_new_tokens=1024, do_sample=False, streamer=streamer)
    # Start the model chat in a separate thread
    thread = Thread(target=model.chat, kwargs=dict(
        tokenizer=tokenizer, pixel_values=pixel_values, question=question,
        history=None, return_history=False, generation_config=generation_config,
    ))
    thread.start()

    # Initialize an empty string to store the generated text
    generated_text = ''
    # Loop through the streamer to get the new text as it is generated
    for new_text in streamer:
        if new_text == model.conv_template.sep:
            break
        generated_text += new_text
        print(new_text, end='', flush=True)  # Print each new chunk of generated text on the same line

    print(f'User: {question}')
    print(f'Assistant: {generated_text}')
    return generated_text
