ryaalbr commited on
Commit
0671f1e
1 Parent(s): 9f201d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -45,6 +45,7 @@ clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
45
 
46
  # Load the Unsplash dataset
47
  dataset = load_dataset("jamescalam/unsplash-25k-photos", split="train") # all 25K images are in train split
 
48
 
49
  # Load gpt and modifed weights for captions
50
  gpt = GPT2LMHeadModel.from_pretrained('gpt2')
@@ -229,7 +230,7 @@ with gr.Blocks() as demo:
229
  1. Enter list of labels separated by commas (or select one of the examples below)
230
  2. Click **Get Random Image** to grab a random image from dataset
231
  3. Click **Classify Image** to analyze current image against the labels (including after changing labels)
232
- 4. The dataset (<a href="https://github.com/unsplash/datasets" target="_blank">Unsplash Lite</a>) contains 25,000 nature-focused images"""
233
  gr.Markdown(instructions)
234
  with gr.Row(variant="compact"):
235
  label_text = gr.Textbox(show_label=False, placeholder="Enter classification labels").style(container=False)
@@ -253,15 +254,17 @@ with gr.Blocks() as demo:
253
  get_btn.click(fn=rand_image, outputs=im)
254
  #im.change(predict, inputs=[im, labels], outputs=cf)
255
  class_btn.click(predict, inputs=[im, labels], outputs=cf)
 
 
256
 
257
  with gr.Tab("Captioning"):
258
  instructions = """## Instructions:
259
  1. Click **Get Random Image** to grab a random image from dataset
260
  1. Click **Create Caption** to generate a caption for the image
261
  1. Different models can be selected:
262
- * **COCO** generally produces more straight-forward captions, but it is a smaller dataset and therefore struggles to recognize certain objects
263
- * **Conceptual Captions** is a much larger dataset but sometimes produces overly ``poetic'' results
264
- 1. The dataset (<a href="https://github.com/unsplash/datasets" target="_blank">Unsplash Lite</a>) contains 25,000 nature-focused images"""
265
  gr.Markdown(instructions)
266
  with gr.Row():
267
  with gr.Column(variant="panel"):
@@ -274,12 +277,13 @@ with gr.Blocks() as demo:
274
  get_btn_cap.click(fn=rand_image, outputs=im_cap)
275
  #im_cap.change(generate_text, inputs=im_cap, outputs=caption)
276
  caption_btn.click(get_caption, inputs=[im_cap, model_name], outputs=caption)
 
277
 
278
  with gr.Tab("Search"):
279
  instructions = """## Instructions:
280
  1. Enter a search query (or select one of the examples below)
281
  2. Click **Find Images** to find images that match the query (top 5 are shown in order from left to right)
282
- 3. The dataset (<a href="https://github.com/unsplash/datasets" target="_blank">Unsplash Lite</a>) contains 25,000 nature-focused images"""
283
  gr.Markdown(instructions)
284
  with gr.Column(variant="panel"):
285
  desc = gr.Textbox(show_label=False, placeholder="Enter description").style(container=False)
@@ -292,5 +296,6 @@ with gr.Blocks() as demo:
292
  search_btn = gr.Button("Find Images").style(full_width=False)
293
  gallery = gr.Gallery(show_label=False).style(grid=(2,2,3,5))
294
  search_btn.click(search,inputs=desc, outputs=gallery, postprocess=False)
 
295
 
296
  demo.launch()
 
45
 
46
  # Load the Unsplash dataset
47
  dataset = load_dataset("jamescalam/unsplash-25k-photos", split="train") # all 25K images are in train split
48
+ dataset_size = len(dataset)
49
 
50
  # Load gpt and modifed weights for captions
51
  gpt = GPT2LMHeadModel.from_pretrained('gpt2')
 
230
  1. Enter list of labels separated by commas (or select one of the examples below)
231
  2. Click **Get Random Image** to grab a random image from dataset
232
  3. Click **Classify Image** to analyze current image against the labels (including after changing labels)
233
+ """
234
  gr.Markdown(instructions)
235
  with gr.Row(variant="compact"):
236
  label_text = gr.Textbox(show_label=False, placeholder="Enter classification labels").style(container=False)
 
254
  get_btn.click(fn=rand_image, outputs=im)
255
  #im.change(predict, inputs=[im, labels], outputs=cf)
256
  class_btn.click(predict, inputs=[im, labels], outputs=cf)
257
+ gr.HTML(f"Dataset: <a href='https://github.com/unsplash/datasets' target='_blank'>Unsplash Lite</a>; Number of Images: {dataset_size}")
258
+
259
 
260
  with gr.Tab("Captioning"):
261
  instructions = """## Instructions:
262
  1. Click **Get Random Image** to grab a random image from dataset
263
  1. Click **Create Caption** to generate a caption for the image
264
  1. Different models can be selected:
265
+ * **COCO** generally produces more straight-forward captions, but it is a smaller dataset and therefore struggles to recognize certain objects
266
+ * **Conceptual Captions** is a much larger dataset but sometimes produces results that resemble social media posts
267
+ """
268
  gr.Markdown(instructions)
269
  with gr.Row():
270
  with gr.Column(variant="panel"):
 
277
  get_btn_cap.click(fn=rand_image, outputs=im_cap)
278
  #im_cap.change(generate_text, inputs=im_cap, outputs=caption)
279
  caption_btn.click(get_caption, inputs=[im_cap, model_name], outputs=caption)
280
+ gr.HTML(f"Dataset: <a href='https://github.com/unsplash/datasets' target='_blank'>Unsplash Lite</a>; Number of Images: {dataset_size}")
281
 
282
  with gr.Tab("Search"):
283
  instructions = """## Instructions:
284
  1. Enter a search query (or select one of the examples below)
285
  2. Click **Find Images** to find images that match the query (top 5 are shown in order from left to right)
286
+ 3. Keep in mind that the dataset contains mostly nature-focused images"""
287
  gr.Markdown(instructions)
288
  with gr.Column(variant="panel"):
289
  desc = gr.Textbox(show_label=False, placeholder="Enter description").style(container=False)
 
296
  search_btn = gr.Button("Find Images").style(full_width=False)
297
  gallery = gr.Gallery(show_label=False).style(grid=(2,2,3,5))
298
  search_btn.click(search,inputs=desc, outputs=gallery, postprocess=False)
299
+ gr.HTML(f"Dataset: <a href='https://github.com/unsplash/datasets' target='_blank'>Unsplash Lite</a>; Number of Images: {dataset_size}")
300
 
301
  demo.launch()