merve HF staff commited on
Commit
1e300d8
1 Parent(s): 55dc83f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -82
app.py CHANGED
@@ -89,93 +89,94 @@ with gr.Blocks(fill_height=True) as demo:
89
  output = gr.Textbox(label="Output")
90
 
91
  with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
92
- examples=[["./example_images/docvqa_example.png", "How many items are sold?", "Greedy", 0.4, 512, 1.2, 0.8],
93
  ["./example_images/s2w_example.png", "What is this UI about?", "Greedy", 0.4, 512, 1.2, 0.8],
94
  ["./example_images/example_images_travel_tips.jpg", "I want to go somewhere similar to the one in the photo. Give me destinations and travel tips.", 0.4, 512, 1.2, 0.8],
95
  ["./example_images/chicken_on_money.png", "Can you tell me a very short story based on this image?", 0.4, 512, 1.2, 0.8],
96
  ["./example_images/baklava.png", "Where is this pastry from?", 0.4, 512, 1.2, 0.8],
97
  ["./example_images/dummy_pdf.png", "How much percent is the order status?", 0.4, 512, 1.2, 0.8],
98
  ["./example_images/art_critic.png", "As an art critic AI assistant, could you describe this painting in details and make a thorough critic?.", 0.4, 512, 1.2, 0.8]]
99
- gr.Examples(
100
- examples = examples,
101
- inputs=[image_input, query_input, decoding_strategy, temperature,
102
- max_new_tokens, repetition_penalty, top_p],
103
- outputs=output,
104
- fn=model_inference
105
- )
106
- # Hyper-parameters for generation
107
- max_new_tokens = gr.Slider(
108
- minimum=8,
109
- maximum=1024,
110
- value=512,
111
- step=1,
112
- interactive=True,
113
- label="Maximum number of new tokens to generate",
114
- )
115
- repetition_penalty = gr.Slider(
116
- minimum=0.01,
117
- maximum=5.0,
118
- value=1.2,
119
- step=0.01,
120
- interactive=True,
121
- label="Repetition penalty",
122
- info="1.0 is equivalent to no penalty",
123
- )
124
- temperature = gr.Slider(
125
- minimum=0.0,
126
- maximum=5.0,
127
- value=0.4,
128
- step=0.1,
129
- interactive=True,
130
- label="Sampling temperature",
131
- info="Higher values will produce more diverse outputs.",
132
- )
133
- top_p = gr.Slider(
134
- minimum=0.01,
135
- maximum=0.99,
136
- value=0.8,
137
- step=0.01,
138
- interactive=True,
139
- label="Top P",
140
- info="Higher values is equivalent to sampling more low-probability tokens.",
141
- )
142
- decoding_strategy = gr.Radio(
143
- [
144
- "Greedy",
145
- "Top P Sampling",
146
- ],
147
- value="Greedy",
148
- label="Decoding strategy",
149
- interactive=True,
150
- info="Higher values is equivalent to sampling more low-probability tokens.",
151
- )
152
- decoding_strategy.change(
153
- fn=lambda selection: gr.Slider(
154
- visible=(
155
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
156
- )
157
- ),
158
- inputs=decoding_strategy,
159
- outputs=temperature,
160
- )
161
-
162
- decoding_strategy.change(
163
- fn=lambda selection: gr.Slider(
164
- visible=(
165
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
166
- )
167
- ),
168
- inputs=decoding_strategy,
169
- outputs=repetition_penalty,
170
- )
171
- decoding_strategy.change(
172
- fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
173
- inputs=decoding_strategy,
174
- outputs=top_p,
175
- )
176
-
177
- submit_btn.click(model_inference, inputs = [image_input, query_input, decoding_strategy, temperature,
178
- max_new_tokens, repetition_penalty, top_p], outputs=output)
179
-
 
180
 
181
  demo.launch(debug=True)
 
89
  output = gr.Textbox(label="Output")
90
 
91
  with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
92
+ examples=[["./example_images/docvqa_example.png", "How many items are sold?", "Greedy", 0.4, 512, 1.2, 0.8],
93
  ["./example_images/s2w_example.png", "What is this UI about?", "Greedy", 0.4, 512, 1.2, 0.8],
94
  ["./example_images/example_images_travel_tips.jpg", "I want to go somewhere similar to the one in the photo. Give me destinations and travel tips.", 0.4, 512, 1.2, 0.8],
95
  ["./example_images/chicken_on_money.png", "Can you tell me a very short story based on this image?", 0.4, 512, 1.2, 0.8],
96
  ["./example_images/baklava.png", "Where is this pastry from?", 0.4, 512, 1.2, 0.8],
97
  ["./example_images/dummy_pdf.png", "How much percent is the order status?", 0.4, 512, 1.2, 0.8],
98
  ["./example_images/art_critic.png", "As an art critic AI assistant, could you describe this painting in details and make a thorough critic?.", 0.4, 512, 1.2, 0.8]]
99
+
100
+ # Hyper-parameters for generation
101
+ max_new_tokens = gr.Slider(
102
+ minimum=8,
103
+ maximum=1024,
104
+ value=512,
105
+ step=1,
106
+ interactive=True,
107
+ label="Maximum number of new tokens to generate",
108
+ )
109
+ repetition_penalty = gr.Slider(
110
+ minimum=0.01,
111
+ maximum=5.0,
112
+ value=1.2,
113
+ step=0.01,
114
+ interactive=True,
115
+ label="Repetition penalty",
116
+ info="1.0 is equivalent to no penalty",
117
+ )
118
+ temperature = gr.Slider(
119
+ minimum=0.0,
120
+ maximum=5.0,
121
+ value=0.4,
122
+ step=0.1,
123
+ interactive=True,
124
+ label="Sampling temperature",
125
+ info="Higher values will produce more diverse outputs.",
126
+ )
127
+ top_p = gr.Slider(
128
+ minimum=0.01,
129
+ maximum=0.99,
130
+ value=0.8,
131
+ step=0.01,
132
+ interactive=True,
133
+ label="Top P",
134
+ info="Higher values is equivalent to sampling more low-probability tokens.",
135
+ )
136
+ decoding_strategy = gr.Radio(
137
+ [
138
+ "Greedy",
139
+ "Top P Sampling",
140
+ ],
141
+ value="Greedy",
142
+ label="Decoding strategy",
143
+ interactive=True,
144
+ info="Higher values is equivalent to sampling more low-probability tokens.",
145
+ )
146
+ decoding_strategy.change(
147
+ fn=lambda selection: gr.Slider(
148
+ visible=(
149
+ selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
150
+ )
151
+ ),
152
+ inputs=decoding_strategy,
153
+ outputs=temperature,
154
+ )
155
+
156
+ decoding_strategy.change(
157
+ fn=lambda selection: gr.Slider(
158
+ visible=(
159
+ selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
160
+ )
161
+ ),
162
+ inputs=decoding_strategy,
163
+ outputs=repetition_penalty,
164
+ )
165
+ decoding_strategy.change(
166
+ fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
167
+ inputs=decoding_strategy,
168
+ outputs=top_p,
169
+ )
170
+ gr.Examples(
171
+ examples = examples,
172
+ inputs=[image_input, query_input, decoding_strategy, temperature,
173
+ max_new_tokens, repetition_penalty, top_p],
174
+ outputs=output,
175
+ fn=model_inference
176
+ )
177
+
178
+ submit_btn.click(model_inference, inputs = [image_input, query_input, decoding_strategy, temperature,
179
+ max_new_tokens, repetition_penalty, top_p], outputs=output)
180
+
181
 
182
  demo.launch(debug=True)