sneha commited on
Commit
1df99f6
1 Parent(s): 267367c

more images, add description

Browse files
Files changed (3) hide show
  1. app.py +4 -1
  2. imgs/ego4d_2.jpg +0 -0
  3. imgs/ego4d_3.jpg +0 -0
app.py CHANGED
@@ -73,7 +73,10 @@ input_button = gr.Radio(["min", "max", "mean"], value="min",label="Attention Hea
73
  output_img = gr.Image(shape=(250,250))
74
  output_plot = gr.Plot()
75
 
76
- demo = gr.Interface(fn=run_attn, title="Visual Cortex Base Model",
 
 
 
77
  examples=[[os.path.join('./imgs',x),None]for x in os.listdir(os.path.join(os.getcwd(),'imgs')) if 'jpg' in x],
78
  inputs=[input_img,input_button],outputs=[output_img,output_plot])
79
  demo.launch()
73
  output_img = gr.Image(shape=(250,250))
74
  output_plot = gr.Plot()
75
 
76
+ markdown ="This is a demo for the Visual Cortex (Base) model. When passed an image input, it displays the attention of the last layer of the transformer.\n \
77
+ The user can decide how the attention heads will be combined. \
78
+ Along with the attention heatmap, it also displays the embedding values reshaped to a 16x48 grid."
79
+ demo = gr.Interface(fn=run_attn, title="Visual Cortex Base Model", description=markdown,
80
  examples=[[os.path.join('./imgs',x),None]for x in os.listdir(os.path.join(os.getcwd(),'imgs')) if 'jpg' in x],
81
  inputs=[input_img,input_button],outputs=[output_img,output_plot])
82
  demo.launch()
imgs/ego4d_2.jpg ADDED
imgs/ego4d_3.jpg ADDED