sneha
commited on
Commit
•
def57e6
1
Parent(s):
5161efd
turn off sharing
Browse files
app.py
CHANGED
@@ -105,8 +105,8 @@ css = ".output-image, .input-image, .image-preview {height: 600px !important}"
|
|
105 |
|
106 |
markdown ="This is a demo for the Visual Cortex models. When passed an image input, it displays the attention of the last layer of the transformer.\n \
|
107 |
The user can decide how the attention heads will be combined. \
|
108 |
-
Along with the attention heatmap, it also displays the embedding values reshaped to a 16x48 or 16x64 grid."
|
109 |
demo = gr.Interface(fn=run_attn, title="Visual Cortex Base Model", description=markdown,
|
110 |
examples=[[os.path.join('./imgs',x),None,None]for x in os.listdir(os.path.join(os.getcwd(),'imgs')) if 'jpg' in x],
|
111 |
inputs=[input_img,model_type,input_button],outputs=[output_img,output_plot],css=css)
|
112 |
-
demo.launch(
|
|
|
105 |
|
106 |
markdown ="This is a demo for the Visual Cortex models. When passed an image input, it displays the attention of the last layer of the transformer.\n \
|
107 |
The user can decide how the attention heads will be combined. \
|
108 |
+
Along with the attention heatmap, it also displays the embedding values reshaped to a 16x48 for VC1-Base or 16x64 grid for VC1-Large."
|
109 |
demo = gr.Interface(fn=run_attn, title="Visual Cortex Base Model", description=markdown,
|
110 |
examples=[[os.path.join('./imgs',x),None,None]for x in os.listdir(os.path.join(os.getcwd(),'imgs')) if 'jpg' in x],
|
111 |
inputs=[input_img,model_type,input_button],outputs=[output_img,output_plot],css=css)
|
112 |
+
demo.launch()
|