Sujit Pal commited on
Commit
fcc5cad
1 Parent(s): 63b4da4

fix: including image for thumbnail

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -9,14 +9,17 @@ PAGES = {
9
  "Retrieve Images given Image": dashboard_image2image,
10
  "Find Feature in Image": dashboard_featurefinder,
11
  }
12
- st.sidebar.title("CLIP-RSICD")
13
 
 
 
14
  st.sidebar.markdown("""
 
 
 
 
15
  The CLIP model from OpenAI is trained in a self-supervised manner using
16
  contrastive learning to project images and caption text onto a common
17
- embedding space. We have fine-tuned the model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2))
18
- using the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal).
19
- Click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd).
20
  """)
21
  selection = st.sidebar.radio("Go to", list(PAGES.keys()))
22
  page = PAGES[selection]
 
9
  "Retrieve Images given Image": dashboard_image2image,
10
  "Find Feature in Image": dashboard_featurefinder,
11
  }
 
12
 
13
+ st.sidebar.title("CLIP-RSICD")
14
+ st.sidebar.image("thumbnail.jpg")
15
  st.sidebar.markdown("""
16
+ We have fine-tuned the CLIP model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2))
17
+ using remote sensing images from the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal).
18
+ Click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd).
19
+
20
  The CLIP model from OpenAI is trained in a self-supervised manner using
21
  contrastive learning to project images and caption text onto a common
22
+ embedding space.
 
 
23
  """)
24
  selection = st.sidebar.radio("Go to", list(PAGES.keys()))
25
  page = PAGES[selection]