File size: 902 Bytes
357b0b8
 
a78bf29
357b0b8
 
 
 
6d88167
 
 
357b0b8
6d88167
357b0b8
6d88167
 
 
 
 
 
 
357b0b8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import dashboard_text2image
import dashboard_image2image
import dashboard_featurefinder

import streamlit as st

PAGES = {
    "Retrieve Images given Text": dashboard_text2image,
    "Retrieve Images given Image": dashboard_image2image,
    "Find Feature in Image": dashboard_featurefinder,
}
st.sidebar.title("CLIP-RSICD")

st.sidebar.markdown("""
    The CLIP model from OpenAI is trained in a self-supervised manner using 
    contrastive learning to project images and caption text onto a common 
    embedding space. We have fine-tuned the model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2)) 
    using the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal). 
    Click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd).
""")
selection = st.sidebar.radio("Go to", list(PAGES.keys()))
page = PAGES[selection]
page.app()