import dashboard_text2image import dashboard_image2image import dashboard_featurefinder import streamlit as st PAGES = { "Retrieve Images given Text": dashboard_text2image, "Retrieve Images given Image": dashboard_image2image, "Find Feature in Image": dashboard_featurefinder, } st.sidebar.title("CLIP-RSICD") st.sidebar.markdown(""" The CLIP model from OpenAI is trained in a self-supervised manner using contrastive learning to project images and caption text onto a common embedding space. We have fine-tuned the model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2)) using the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal). Click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd). """) selection = st.sidebar.radio("Go to", list(PAGES.keys())) page = PAGES[selection] page.app()