File size: 2,706 Bytes
da172d6
 
 
 
 
3922ac7
 
 
754239b
da172d6
 
 
 
754239b
 
 
da172d6
a519b9e
da172d6
 
 
 
 
754239b
da172d6
29302cd
 
 
 
da172d6
 
 
 
 
 
 
 
 
 
 
 
3ca8c75
 
754239b
3ca8c75
 
 
 
 
 
 
 
 
 
3922ac7
 
04e5920
 
 
 
 
754239b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from home import read_markdown_file
import streamlit as st


def app():
    #st.title("Examples & Applications")
    st.markdown("<h1 style='text-align: center; color: #CD212A;'> Examples & Applications </h1>", unsafe_allow_html=True)
    st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Complex Queries -Image Retrieval </h2>", unsafe_allow_html=True)
    
    st.write(
        """
        

        Even though we trained the Italian CLIP model on way less examples(~1.4M) than the original
        OpenAI's CLIP (~400M), our training choices and quality datasets led to impressive results!
        Here, we present some of **the most impressive text-image associations** learned by our model.
        
        Remember you can head to the **Text to Image** section of the demo at any time to test your own🤌 Italian queries!
        
        """
    )

    st.markdown("### 1. Actors in Scenes")
    st.markdown("These examples were taken from the CC dataset")

    st.subheader("una coppia")
    st.markdown("*a couple*")
    st.image("static/img/examples/couple_0.jpeg")

    col1, col2 = st.beta_columns(2)
    col1.subheader("una coppia con il tramonto sullo sfondo")
    col1.markdown("*a couple with the sunset in the background*")
    col1.image("static/img/examples/couple_1.jpeg")

    col2.subheader("una coppia che passeggia sulla spiaggia")
    col2.markdown("*a couple walking on the beach*")
    col2.image("static/img/examples/couple_2.jpeg")

    st.subheader("una coppia che passeggia sulla spiaggia al tramonto")
    st.markdown("*a couple walking on the beach at sunset*")
    st.image("static/img/examples/couple_3.jpeg")

    st.markdown("### 2. Dresses")
    st.markdown("These examples were taken from the Unsplash dataset")

    col1, col2 = st.beta_columns(2)
    col1.subheader("un vestito primavrile")
    col1.markdown("*a dress for the spring*")
    col1.image("static/img/examples/vestito1.png")

    col2.subheader("un vestito autunnale")
    col2.markdown("*a dress for the autumn*")
    col2.image("static/img/examples/vestito_autunnale.png")

    #st.markdown("## Image Classification")
    st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Zero Shot Image Classification </h2>", unsafe_allow_html=True)
    st.markdown("We report this cool example provided by the "
                "[DALLE-mini team](https://github.com/borisdayma/dalle-mini). "
                "Is the DALLE-mini logo an *avocado* or an armchair (*poltrona*)?")

    st.image("static/img/examples/dalle_mini.png")
    st.markdown("It seems it's half an armchair and half an avocado! We thank the DALLE-mini team for the great idea :)")