zhiqiulin commited on
Commit
3e68751
1 Parent(s): 748aa2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -16
app.py CHANGED
@@ -114,22 +114,6 @@ def load_example(model_name, images, prompt):
114
  return model_name, images, prompt
115
 
116
 
117
- demo_vqascore_ranking = gr.Interface(
118
- fn=rank_images, # function to call
119
- inputs=[
120
- gr.Dropdown(["clip-flant5-xxl", "clip-flant5-xl"], label="Model Name"),
121
- gr.Gallery(label="Generated Images", elem_id="input-gallery", columns=4, allow_preview=True),
122
- gr.Textbox(label="Prompt")
123
- ], # define the types of inputs
124
- examples=[
125
- ["clip-flant5-xxl", example_imgs, example_prompt0],
126
- ["clip-flant5-xxl", example_imgs, example_prompt1],
127
- ],
128
- outputs=gr.Gallery(label="Ranked Images with Scores", elem_id="ranked-gallery", columns=4, allow_preview=True), # define the type of output
129
- title="VQAScore Ranking", # title of the app
130
- description="This model ranks a gallery of images based on their similarity to a text prompt."
131
- )
132
-
133
  # demo_vqascore = gr.Interface(
134
  # fn=generate, # function to call
135
  # inputs=[
@@ -146,6 +130,26 @@ demo_vqascore_ranking = gr.Interface(
146
  # description="This model evaluates the similarity between an image and a text prompt."
147
  # )
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  # # Create the second demo
150
  # with gr.Blocks() as demo_vqascore_ranking:
151
  # gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")
 
114
  return model_name, images, prompt
115
 
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  # demo_vqascore = gr.Interface(
118
  # fn=generate, # function to call
119
  # inputs=[
 
130
  # description="This model evaluates the similarity between an image and a text prompt."
131
  # )
132
 
133
+ # Create the second demo: VQAScore Ranking
134
+ with gr.Blocks() as demo_vqascore_ranking:
135
+ gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")
136
+
137
+ with gr.Row():
138
+ with gr.Column():
139
+ model_dropdown = gr.Dropdown(["clip-flant5-xxl", "clip-flant5-xl"], value="clip-flant5-xxl", label="Model Name")
140
+ prompt = gr.Textbox(label="Prompt")
141
+ gallery = gr.Gallery(label="Generated Images", elem_id="input-gallery", columns=4, allow_preview=True)
142
+ rank_button = gr.Button("Rank Images")
143
+ rank_button.click(fn=rank_images, inputs=[model_dropdown, gallery, prompt], outputs=ranked_gallery)
144
+
145
+ with gr.Column():
146
+ example1_button = gr.Button("Load Example 1")
147
+ example2_button = gr.Button("Load Example 2")
148
+ example1_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt0), inputs=[], outputs=[model_dropdown, gallery, prompt])
149
+ example2_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt1), inputs=[], outputs=[model_dropdown, gallery, prompt])
150
+
151
+ ranked_gallery = gr.Gallery(label="Ranked Images with Scores", elem_id="ranked-gallery", columns=4, allow_preview=True)
152
+
153
  # # Create the second demo
154
  # with gr.Blocks() as demo_vqascore_ranking:
155
  # gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")