Added examples
Browse files
app.py
CHANGED
@@ -25,6 +25,11 @@ model_name = "microsoft/phi-2"
|
|
25 |
phi2_text = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
27 |
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
def textMode(text, count):
|
30 |
count = int(count)
|
@@ -76,4 +81,13 @@ with gr.Blocks() as demo:
|
|
76 |
image_button.click(imageMode, inputs=[image_input,image_text_input], outputs=image_text_output)
|
77 |
audio_button.click(audioMode, inputs=audio_input, outputs=audio_text_output)
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
demo.launch()
|
|
|
25 |
phi2_text = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
27 |
|
28 |
+
def example_inference(input_text, count, image, img_qn, audio):
|
29 |
+
pred_text = textMode(input_text, count)
|
30 |
+
return pred_text, "in progress", "in progress"
|
31 |
+
|
32 |
+
|
33 |
|
34 |
def textMode(text, count):
|
35 |
count = int(count)
|
|
|
81 |
image_button.click(imageMode, inputs=[image_input,image_text_input], outputs=image_text_output)
|
82 |
audio_button.click(audioMode, inputs=audio_input, outputs=audio_text_output)
|
83 |
|
84 |
+
gr.Examples(
|
85 |
+
examples=[
|
86 |
+
["What is a large language model?","50","","Describe the image",""]
|
87 |
+
],
|
88 |
+
inputs=[input_image, n_top_classes, require_gradcam],
|
89 |
+
outputs=[text_output, image_text_output, audio_text_output],
|
90 |
+
fn=example_inference,
|
91 |
+
)
|
92 |
+
|
93 |
demo.launch()
|