from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch 
import json, glob, os, sys

def get_table_extract_prompt():
    instruction = '''
# 请识别文档中所包含的表格，包括表格描述、数据单位和表格内容，其中表格内容输出为md格式。

## 如果文档中不包含表格，输出“无表格“。

##如果文档中包含多个表格，每表格分别输出，如下所示：

描述：<表格1的描述>
单位：<表格1的数据单位>
<表格1的内容>

描述：<表格2的描述>
单位：<表格2的数据单位>
<表格2的内容>

......
    '''
    return instruction 

def get_table_extract_prompt2():
    instruction = '''
# 请识别文档中所包含的表格，包括表格描述、数据单位和表格内容，其中表格内容输出为md格式。

## 如果文档中不包含表格，输出“无表格“。

##如果文档中包含多个表格，每表格分别输出，如下所示：

描述：
单位：
内容：

描述：
单位：
内容：

......
    '''
    return instruction 
def get_messages(img_file_path, prompt="Describe the image."):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": f"{img_file_path}",
                },
                {"type": "text", "text": f"{prompt}"},
            ],
        }
    ]
    return messages

def is_no_table_intent(response):
    if len(response) > 50:
        return False
    
    if response.find("描述") >= 0:
        return False
    
    if response.find("单位") > 0:
        return False
    
    patterns = ["无表格", "没有表格", "没有包含", "没有提供", "没有包括"]
    for pattern in patterns:
        if response.find(pattern) >= 0:
            return True
    
    return False

def get_page_id(img_path):
    stem = os.path.splitext(os.path.basename(img_path))[0]
    return int(stem.split('_')[-1])
    
def get_vl_outputs(model, batch):
    messages = [get_messages(img, get_table_extract_prompt2()) for img in batch]
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=text,
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to(model.device)
    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=8192)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_texts = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
        
    return output_texts
        
# main
if __name__ == '__main__':
    img_path = "pdf/eec23035376ae0e339a7643402fdbdccd92ad703/"
    extract_path = "data/湖南长远锂科股份有限公司_table.jsonl"
    company = "湖南长远锂科股份有限公司"
    
    # Qwen2.5-VL-7B
    model_name = "/root/autodl-tmp/models/Qwen/Qwen2.5-VL-7B-Instruct"
    
    # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16,
        attn_implementation="flash_attention_2",
        device_map="auto",
    )

    # default processer
    processor = AutoProcessor.from_pretrained(model_name, use_fast=False)
    
    # 开始对图像数据进行处理
    imgs = glob.glob(img_path + "/*.png")
    img_pairs = [(get_page_id(img), img) for img in imgs]
    img_pairs.sort()
    imgs = [item[1] for item in img_pairs]
    
    batch_size = 1
    start_idx = 0
    end_idx = 0

    table_data = []
    while start_idx < len(imgs):
        end_idx = start_idx + batch_size
        if end_idx > len(imgs): end_idx = len(imgs)
        batch = imgs[start_idx:end_idx]
        
        # 调用vl model
        output_texts = get_vl_outputs(model, batch)
    
        for i in range(start_idx, end_idx):
            print(f"processing {imgs[i]}")
            result = output_texts[i-start_idx]
            if is_no_table_intent(result):
                continue
            
            item = {"company": company}
            item["page"] = get_page_id(imgs[i])
            item["content"] = result
            table_data.append(item)

            print(item['page'])
            print(item['content'])
            print("\n")
            sys.stdout.flush()

        start_idx = end_idx

    with open(extract_path, 'w') as f:
        for item in table_data:
            f.write(json.dumps(item, ensure_ascii=False)+"\n")

    


