anas-awadalla commited on
Commit
f569b1b
β€’
1 Parent(s): 3d4e892

added stuff

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -8,7 +8,7 @@ login(token=os.environ["HUGGINGFACE_TOKEN"])
8
 
9
  demo_imgs = [
10
  ["images/chinchilla_web-1024x683.jpg", "images/shiba-inu-dog-in-the-snow.jpg"],
11
- ["images/900.jpeg", "images/hummus.jpg", "images/london-underground-sign.jpg", "images/COCO_train2014_000000194806.jpg"],
12
  ["images/COCO_train2014_000000572279.jpg", "images/COCO_train2014_000000194806.jpg"],
13
  [
14
  "images/bcee7a-20190225-a-london-underground-sign.jpg",
@@ -167,7 +167,7 @@ def generate(
167
  with gr.Blocks() as demo:
168
  # As a consequence, you should treat this model as a research prototype and not as a production-ready model. Before using this demo please familiarize yourself with our [model card](https://github.com/mlfoundations/open_flamingo/blob/main/MODEL_CARD.md) and [terms and conditions](https://github.com/mlfoundations/open_flamingo/blob/main/TERMS_AND_CONDITIONS.md)
169
  gr.Markdown(
170
- """
171
  # 🦩 OpenFlamingo Demo
172
 
173
  Blog posts: #1 [An open-source framework for training vision-language models with in-context learning](https://laion.ai/blog/open-flamingo/) // #2 [OpenFlamingo v2: New Models and Enhanced Training Setup]()\n
@@ -206,10 +206,11 @@ RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-I
206
  query_image = gr.Image(type="pil")
207
  text_output = gr.Textbox(value="Output:", label="Model output")
208
 
 
209
  run_btn = gr.Button("Run model")
210
 
211
- def on_click_fn(img): return generate(1, img, "", tc=read_tc)
212
- run_btn.click(on_click_fn, inputs=[query_image], outputs=[text_output])
213
 
214
  with gr.Tab("πŸ¦“ Animal recognition"):
215
  with gr.Row():
@@ -233,8 +234,8 @@ RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-I
233
 
234
  run_btn = gr.Button("Run model")
235
 
236
- def on_click_fn(img): return generate(0, img, "", tc=read_tc)
237
- run_btn.click(on_click_fn, inputs=[query_image], outputs=[text_output])
238
 
239
  with gr.Tab("πŸ”’ Counting objects"):
240
  with gr.Row():
@@ -258,8 +259,8 @@ RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-I
258
 
259
  run_btn = gr.Button("Run model")
260
 
261
- def on_click_fn(img): return generate(4, img, "", tc=read_tc)
262
- run_btn.click(on_click_fn, inputs=[query_image], outputs=[text_output])
263
 
264
  with gr.Tab("πŸ•΅οΈ Visual Question Answering"):
265
  with gr.Row():
@@ -285,9 +286,9 @@ RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-I
285
  text_output = gr.Textbox(value="", label="Model output")
286
 
287
  run_btn = gr.Button("Run model")
288
- def on_click_fn(img, txt): return generate(2, img, txt, tc=read_tc)
289
  run_btn.click(
290
- on_click_fn, inputs=[query_image, question], outputs=[text_output]
291
  )
292
 
293
  with gr.Tab("🌎 Custom"):
@@ -310,13 +311,14 @@ RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-I
310
 
311
  run_btn = gr.Button("Run model")
312
 
313
- def on_click_fn(img, example_img_1, example_txt_1, example_img_2, example_txt_2): return generate(
314
  -1, img, "", example_img_1, example_txt_1, example_img_2, example_txt_2, tc=read_tc
315
  )
316
  run_btn.click(
317
  on_click_fn,
318
  inputs=[
319
  query_image,
 
320
  demo_image_one,
321
  demo_text_one,
322
  demo_image_two,
 
8
 
9
  demo_imgs = [
10
  ["images/chinchilla_web-1024x683.jpg", "images/shiba-inu-dog-in-the-snow.jpg"],
11
+ ["images/900.jpeg", "images/hummus.jpg"],
12
  ["images/COCO_train2014_000000572279.jpg", "images/COCO_train2014_000000194806.jpg"],
13
  [
14
  "images/bcee7a-20190225-a-london-underground-sign.jpg",
 
167
  with gr.Blocks() as demo:
168
  # As a consequence, you should treat this model as a research prototype and not as a production-ready model. Before using this demo please familiarize yourself with our [model card](https://github.com/mlfoundations/open_flamingo/blob/main/MODEL_CARD.md) and [terms and conditions](https://github.com/mlfoundations/open_flamingo/blob/main/TERMS_AND_CONDITIONS.md)
169
  gr.Markdown(
170
+ """
171
  # 🦩 OpenFlamingo Demo
172
 
173
  Blog posts: #1 [An open-source framework for training vision-language models with in-context learning](https://laion.ai/blog/open-flamingo/) // #2 [OpenFlamingo v2: New Models and Enhanced Training Setup]()\n
 
206
  query_image = gr.Image(type="pil")
207
  text_output = gr.Textbox(value="Output:", label="Model output")
208
 
209
+
210
  run_btn = gr.Button("Run model")
211
 
212
+ def on_click_fn(img, read_tc): return generate(1, img, "", tc=read_tc)
213
+ run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
214
 
215
  with gr.Tab("πŸ¦“ Animal recognition"):
216
  with gr.Row():
 
234
 
235
  run_btn = gr.Button("Run model")
236
 
237
+ def on_click_fn(img, read_tc): return generate(0, img, "", tc=read_tc)
238
+ run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
239
 
240
  with gr.Tab("πŸ”’ Counting objects"):
241
  with gr.Row():
 
259
 
260
  run_btn = gr.Button("Run model")
261
 
262
+ def on_click_fn(img, read_tc): return generate(4, img, "", tc=read_tc)
263
+ run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
264
 
265
  with gr.Tab("πŸ•΅οΈ Visual Question Answering"):
266
  with gr.Row():
 
286
  text_output = gr.Textbox(value="", label="Model output")
287
 
288
  run_btn = gr.Button("Run model")
289
+ def on_click_fn(img, txt, read_tc): return generate(2, img, txt, tc=read_tc)
290
  run_btn.click(
291
+ on_click_fn, inputs=[query_image, question, read_tc], outputs=[text_output]
292
  )
293
 
294
  with gr.Tab("🌎 Custom"):
 
311
 
312
  run_btn = gr.Button("Run model")
313
 
314
+ on_click_fn = lambda img, read_tc, example_img_1, example_txt_1, example_img_2, example_txt_2: generate(
315
  -1, img, "", example_img_1, example_txt_1, example_img_2, example_txt_2, tc=read_tc
316
  )
317
  run_btn.click(
318
  on_click_fn,
319
  inputs=[
320
  query_image,
321
+ read_tc,
322
  demo_image_one,
323
  demo_text_one,
324
  demo_image_two,