File size: 5,422 Bytes
e1cddb8
f48278d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98d1d9c
f48278d
 
 
 
 
 
 
 
 
 
 
98d1d9c
f48278d
98d1d9c
f48278d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gradio as gr
from transformers import AutoProcessor, AutoModelForCausalLM
import spaces

import requests
import copy

from PIL import Image, ImageDraw, ImageFont 
import io
import matplotlib.pyplot as plt
import matplotlib.patches as patches

import random
import numpy as np

import subprocess
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)

models = {
    'microsoft/Florence-2-large-ft': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large-ft', trust_remote_code=True).to("cuda").eval(),
    'microsoft/Florence-2-large': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to("cuda").eval(),
    'microsoft/Florence-2-base-ft': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base-ft', trust_remote_code=True).to("cuda").eval(),
    'microsoft/Florence-2-base': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to("cuda").eval(),
}

processors = {
    'microsoft/Florence-2-large-ft': AutoProcessor.from_pretrained('microsoft/Florence-2-large-ft', trust_remote_code=True),
    'microsoft/Florence-2-large': AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True),
    'microsoft/Florence-2-base-ft': AutoProcessor.from_pretrained('microsoft/Florence-2-base-ft', trust_remote_code=True),
    'microsoft/Florence-2-base': AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True),
}


DESCRIPTION = "# [Florence-2 OCR Demo](https://huggingface.co/microsoft/Florence-2-large)"

colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
            'lime','indigo','violet','aqua','magenta','coral','gold','tan','skyblue']

def fig_to_pil(fig):
    buf = io.BytesIO()
    fig.savefig(buf, format='png')
    buf.seek(0)
    return Image.open(buf)

@spaces.GPU
def run_example(task_prompt, image, text_input=None, model_id='microsoft/Florence-2-large'):
    model = models[model_id]
    processor = processors[model_id]
    if text_input is None:
        prompt = task_prompt
    else:
        prompt = task_prompt + text_input
    inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
    generated_ids = model.generate(
        input_ids=inputs["input_ids"],
        pixel_values=inputs["pixel_values"],
        max_new_tokens=1024,
        early_stopping=False,
        do_sample=False,
        num_beams=3,
    )
    generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
    parsed_answer = processor.post_process_generation(
        generated_text,
        task=task_prompt,
        image_size=(image.width, image.height)
    )
    return parsed_answer




def process_image(image, task_prompt, text_input=None, model_id='microsoft/Florence-2-large'):
    image = Image.fromarray(image)  # Convert NumPy array to PIL Image
   
    if task_prompt == 'OCR':
        task_prompt = '<OCR>'
        results = run_example(task_prompt, image, model_id=model_id)
        return results, None
    else:
        return "", None  # Return empty string and None for unknown task prompts

css = """
  #output {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""


single_task_list =[
    'Caption', 'Detailed Caption', 'More Detailed Caption', 'Object Detection',
    'Dense Region Caption', 'Region Proposal', 'Caption to Phrase Grounding',
    'Referring Expression Segmentation', 'Region to Segmentation',
    'Open Vocabulary Detection', 'Region to Category', 'Region to Description',
    'OCR', 'OCR with Region'
]

cascased_task_list =[
    'Caption + Grounding', 'Detailed Caption + Grounding', 'More Detailed Caption + Grounding'
]


def update_task_dropdown(choice):
    if choice == 'Cascased task':
        return gr.Dropdown(choices=cascased_task_list, value='Caption + Grounding')
    else:
        return gr.Dropdown(choices=single_task_list, value='Caption')



with gr.Blocks(css=css) as demo:
    gr.Markdown(DESCRIPTION)
    with gr.Tab(label="Florence-2 Image Captioning"):
        with gr.Row():
            with gr.Column():
                input_img = gr.Image(label="Input Picture")
                model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value='microsoft/Florence-2-large')
                task_type = gr.Radio(choices=['Single task', 'Cascased task'], label='Task type selector', value='Single task')
                task_prompt = gr.Dropdown(choices=single_task_list, label="Task Prompt", value="Caption")
                task_type.change(fn=update_task_dropdown, inputs=task_type, outputs=task_prompt)
                text_input = gr.Textbox(label="Text Input (optional)")
                submit_btn = gr.Button(value="Submit")
            with gr.Column():
                output_text = gr.Textbox(label="Output Text")
               

        gr.Examples(
            examples=[
                ["image1.jpg", 'Object Detection'],
                ["image2.jpg", 'OCR with Region']
            ],
            inputs=[input_img, task_prompt],
            outputs=[output_text],
            fn=process_image,
            cache_examples=True,
            label='Try examples'
        )

        submit_btn.click(process_image, [input_img, task_prompt, text_input, model_selector], [output_text])

demo.launch(debug=True)