ivelin commited on
Commit
3798862
2 Parent(s): c04ab4e 94fd60d

fix: gradio update

Browse files

Signed-off-by: ivelin <ivelin.eth@gmail.com>

Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +51 -38
  3. start-ec2-service.sh +10 -0
.gitignore CHANGED
@@ -1 +1,2 @@
1
  __pycache__/
 
 
1
  __pycache__/
2
+ flagged/
app.py CHANGED
@@ -7,18 +7,26 @@ import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
 
10
- global model, processor, device
 
 
 
 
 
11
 
12
 
13
  def load_model(pretrained_revision: str = 'main'):
14
- global model, processor, device
15
  pretrained_repo_name = 'ivelin/donut-refexp-click'
16
  # revision can be git commit hash, branch or tag
17
  # use 'main' for latest revision
18
  print(
19
  f"Loading model checkpoint from repo: {pretrained_repo_name}, revision: {pretrained_revision}")
 
 
 
20
  processor = DonutProcessor.from_pretrained(
21
- pretrained_repo_name, revision=pretrained_revision, use_auth_token="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")
22
  processor.image_processor.do_align_long_axis = False
23
  # do not manipulate image size and position
24
  processor.image_processor.do_resize = False
@@ -28,8 +36,8 @@ def load_model(pretrained_revision: str = 'main'):
28
  processor.image_processor.do_normalize = True
29
  print(f'processor image size: {processor.image_processor.size}')
30
  model = VisionEncoderDecoderModel.from_pretrained(
31
- pretrained_repo_name, use_auth_token="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", revision=pretrained_revision)
32
-
33
  device = "cuda" if torch.cuda.is_available() else "cpu"
34
  model.to(device)
35
 
@@ -90,7 +98,7 @@ def translate_point_coords_from_out_to_in(point=None, input_image_size=None, out
90
  f"translated point={point}, resized_image_size: {resized_width, resized_height}")
91
 
92
 
93
- def process_refexp(image, prompt: str, model_revision: str = 'main'):
94
 
95
  print(f"(image, prompt): {image}, {prompt}")
96
 
@@ -183,13 +191,16 @@ def process_refexp(image, prompt: str, model_revision: str = 'main'):
183
  print(
184
  f"to image pixel values: x, y: {x, y}")
185
 
186
- # draw center point circle
187
- img1 = ImageDraw.Draw(image)
188
-
189
- r = 30
190
- shape = [(x-r, y-r), (x+r, y+r)]
191
- img1.ellipse(shape, outline="green", width=20)
192
- img1.ellipse(shape, outline="white", width=10)
 
 
 
193
 
194
  return image, center_point
195
 
@@ -197,33 +208,34 @@ def process_refexp(image, prompt: str, model_revision: str = 'main'):
197
  title = "Demo: GuardianUI RefExp Click"
198
  description = "Gradio Demo for Donut RefExp task, an instance of `VisionEncoderDecoderModel` fine-tuned on [UIBert RefExp](https://huggingface.co/datasets/ivelin/ui_refexp_saved) Dataset (UI Referring Expression). To use it, simply upload your image and type a prompt and click 'submit', or click one of the examples to load them. Optionally enter value for model git revision; latest checkpoint will be used by default."
199
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
200
- examples = [["example_1.jpg", "select the setting icon from top right corner", ""],
201
- ["example_1.jpg", "click on down arrow beside the entertainment", ""],
202
- ["example_1.jpg", "select the down arrow button beside lifestyle", ""],
203
- ["example_1.jpg", "click on the image beside the option traffic", ""],
204
- ["example_3.jpg", "select the third row first image", ""],
205
- ["example_3.jpg", "click the tick mark on the first image", ""],
206
- ["example_3.jpg", "select the ninth image", ""],
207
- ["example_3.jpg", "select the add icon", ""],
208
- ["example_3.jpg", "click the first image", ""],
209
- ["val-image-4.jpg", 'select 4153365454', ""],
210
- ['val-image-4.jpg', 'go to cell', ""],
211
- ['val-image-4.jpg', 'select number above cell', ""],
212
- ["val-image-1.jpg", "select calendar option", ""],
213
- ["val-image-1.jpg", "select photos&videos option", ""],
214
- ["val-image-2.jpg", "click on change store", ""],
215
- ["val-image-2.jpg", "click on shop menu at the bottom", ""],
216
- ["val-image-3.jpg", "click on image above short meow", ""],
217
- ["val-image-3.jpg", "go to cat sounds", ""],
218
- ["example_2.jpg", "click on green color button", ""],
219
- ["example_2.jpg", "click on text which is beside call now", ""],
220
- ["example_2.jpg", "click on more button", ""],
221
- ["example_2.jpg", "enter the text field next to the name", ""],
222
  ]
223
 
224
  demo = gr.Interface(fn=process_refexp,
225
- inputs=[gr.Image(type="pil"), "text", "text"],
226
- outputs=[gr.Image(type="pil", shape=(820)), "json"],
 
227
  title=title,
228
  description=description,
229
  article=article,
@@ -232,4 +244,5 @@ demo = gr.Interface(fn=process_refexp,
232
  cache_examples=False
233
  )
234
 
235
- demo.launch() # share=True when running in a Jupyter Notebook
 
 
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
 
10
+ global model, loaded_revision, processor, device
11
+ model = None
12
+ previous_revision = None
13
+ processor = None
14
+ device = None
15
+ loaded_revision = None
16
 
17
 
18
  def load_model(pretrained_revision: str = 'main'):
19
+ global model, loaded_revision, processor, device
20
  pretrained_repo_name = 'ivelin/donut-refexp-click'
21
  # revision can be git commit hash, branch or tag
22
  # use 'main' for latest revision
23
  print(
24
  f"Loading model checkpoint from repo: {pretrained_repo_name}, revision: {pretrained_revision}")
25
+ if processor is None or loaded_revision is None or loaded_revision != pretrained_revision:
26
+ loaded_revision = pretrained_revision
27
+
28
  processor = DonutProcessor.from_pretrained(
29
+ pretrained_repo_name, revision=pretrained_revision) # , use_auth_token="...")
30
  processor.image_processor.do_align_long_axis = False
31
  # do not manipulate image size and position
32
  processor.image_processor.do_resize = False
 
36
  processor.image_processor.do_normalize = True
37
  print(f'processor image size: {processor.image_processor.size}')
38
  model = VisionEncoderDecoderModel.from_pretrained(
39
+ pretrained_repo_name, revision=pretrained_revision) # use_auth_token="...",
40
+ print(f'model checkpoint loaded')
41
  device = "cuda" if torch.cuda.is_available() else "cpu"
42
  model.to(device)
43
 
 
98
  f"translated point={point}, resized_image_size: {resized_width, resized_height}")
99
 
100
 
101
+ def process_refexp(image, prompt: str, model_revision: str = 'main', return_annotated_image: bool = True):
102
 
103
  print(f"(image, prompt): {image}, {prompt}")
104
 
 
191
  print(
192
  f"to image pixel values: x, y: {x, y}")
193
 
194
+ if return_annotated_image:
195
+ # draw center point circle
196
+ img1 = ImageDraw.Draw(image)
197
+ r = 30
198
+ shape = [(x-r, y-r), (x+r, y+r)]
199
+ img1.ellipse(shape, outline="green", width=20)
200
+ img1.ellipse(shape, outline="white", width=10)
201
+ else:
202
+ # do not return image if its an API call to save bandwidth
203
+ image = None
204
 
205
  return image, center_point
206
 
 
208
  title = "Demo: GuardianUI RefExp Click"
209
  description = "Gradio Demo for Donut RefExp task, an instance of `VisionEncoderDecoderModel` fine-tuned on [UIBert RefExp](https://huggingface.co/datasets/ivelin/ui_refexp_saved) Dataset (UI Referring Expression). To use it, simply upload your image and type a prompt and click 'submit', or click one of the examples to load them. Optionally enter value for model git revision; latest checkpoint will be used by default."
210
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
211
+ examples = [["example_1.jpg", "select the setting icon from top right corner", "", True],
212
+ ["example_1.jpg", "click on down arrow beside the entertainment", "", True],
213
+ ["example_1.jpg", "select the down arrow button beside lifestyle", "", True],
214
+ ["example_1.jpg", "click on the image beside the option traffic", "", True],
215
+ ["example_3.jpg", "select the third row first image", "", True],
216
+ ["example_3.jpg", "click the tick mark on the first image", "", True],
217
+ ["example_3.jpg", "select the ninth image", "", True],
218
+ ["example_3.jpg", "select the add icon", "", True],
219
+ ["example_3.jpg", "click the first image", "", True],
220
+ ["val-image-4.jpg", 'select 4153365454', "", True],
221
+ ['val-image-4.jpg', 'go to cell', "", True],
222
+ ['val-image-4.jpg', 'select number above cell', "", True],
223
+ ["val-image-1.jpg", "select calendar option", "", True],
224
+ ["val-image-1.jpg", "select photos&videos option", "", True],
225
+ ["val-image-2.jpg", "click on change store", "", True],
226
+ ["val-image-2.jpg", "click on shop menu at the bottom", "", True],
227
+ ["val-image-3.jpg", "click on image above short meow", "", True],
228
+ ["val-image-3.jpg", "go to cat sounds", "", True],
229
+ ["example_2.jpg", "click on green color button", "", True],
230
+ ["example_2.jpg", "click on text which is beside call now", "", True],
231
+ ["example_2.jpg", "click on more button", "", True],
232
+ ["example_2.jpg", "enter the text field next to the name", "", True],
233
  ]
234
 
235
  demo = gr.Interface(fn=process_refexp,
236
+ inputs=[gr.Image(type="pil"), "text", "text", gr.Checkbox(
237
+ value=True, label="Return Annotated Image", visible=False)],
238
+ outputs=[gr.Image(type="pil"), "json"],
239
  title=title,
240
  description=description,
241
  article=article,
 
244
  cache_examples=False
245
  )
246
 
247
+ # share=True when running in a Jupyter Notebook
248
+ demo.launch(server_name="0.0.0.0")
start-ec2-service.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ echo "Starting a service on a aws ec2 instance"
2
+ echo "Start this script within tmux session in order to keep it running after the EC2 console closes"
3
+ source activate pytorch
4
+ cd ui-refexp-click/
5
+ pip3 install -r requirements.txt
6
+ python3 -m app
7
+ echo "\n\n\n-----\n\n\n"
8
+ echo "Press CTRL+B then D to detach from terminal without killing the app"
9
+ echo "Then use 'tmux attach' to reconnect to the virtual terminal"
10
+