Spaces:
Running
Running
fix some stuff
Browse files- app.py +1 -2
- examples.py +4 -7
- home.py +0 -2
- image2text.py +11 -8
- introduction.md +3 -4
- text2image.py +11 -12
app.py
CHANGED
@@ -15,8 +15,7 @@ PAGES = {
|
|
15 |
st.sidebar.title("Explore our CLIP-Italian demo")
|
16 |
|
17 |
logo = Image.open("static/img/clip_italian_logo.png")
|
18 |
-
st.sidebar.image(logo)
|
19 |
-
#, caption="CLIP-Italian logo"
|
20 |
|
21 |
page = st.sidebar.radio("", list(PAGES.keys()))
|
22 |
PAGES[page].app()
|
|
|
15 |
st.sidebar.title("Explore our CLIP-Italian demo")
|
16 |
|
17 |
logo = Image.open("static/img/clip_italian_logo.png")
|
18 |
+
st.sidebar.image(logo, caption="CLIP-Italian logo")
|
|
|
19 |
|
20 |
page = st.sidebar.radio("", list(PAGES.keys()))
|
21 |
PAGES[page].app()
|
examples.py
CHANGED
@@ -3,16 +3,13 @@ import streamlit as st
|
|
3 |
|
4 |
|
5 |
def app():
|
6 |
-
|
7 |
-
st.markdown("<h1 style='text-align: center; color: #CD212A;'> Examples & Applications </h1>", unsafe_allow_html=True)
|
8 |
-
st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Complex Queries -Image Retrieval </h2>", unsafe_allow_html=True)
|
9 |
-
|
10 |
st.write(
|
11 |
"""
|
12 |
|
13 |
|
14 |
-
Even though we trained the Italian CLIP model on way less examples
|
15 |
-
OpenAI's CLIP
|
16 |
Here, we present some of **the most impressive text-image associations** learned by our model.
|
17 |
|
18 |
Remember you can head to the **Text to Image** section of the demo at any time to test your own🤌 Italian queries!
|
@@ -59,4 +56,4 @@ def app():
|
|
59 |
"Is the DALLE-mini logo an *avocado* or an armchair (*poltrona*)?")
|
60 |
|
61 |
st.image("static/img/examples/dalle_mini.png")
|
62 |
-
st.markdown("It seems it's half an armchair and half an avocado! We thank the
|
|
|
3 |
|
4 |
|
5 |
def app():
|
6 |
+
st.title("Examples & Applications")
|
|
|
|
|
|
|
7 |
st.write(
|
8 |
"""
|
9 |
|
10 |
|
11 |
+
Even though we trained the Italian CLIP model on way less examples than the original
|
12 |
+
OpenAI's CLIP, our training choices and quality datasets led to impressive results!
|
13 |
Here, we present some of **the most impressive text-image associations** learned by our model.
|
14 |
|
15 |
Remember you can head to the **Text to Image** section of the demo at any time to test your own🤌 Italian queries!
|
|
|
56 |
"Is the DALLE-mini logo an *avocado* or an armchair (*poltrona*)?")
|
57 |
|
58 |
st.image("static/img/examples/dalle_mini.png")
|
59 |
+
st.markdown("It seems it's half an armchair and half an avocado! We thank the DALL-E mini team for the great idea :)")
|
home.py
CHANGED
@@ -7,7 +7,5 @@ def read_markdown_file(markdown_file):
|
|
7 |
|
8 |
|
9 |
def app():
|
10 |
-
st.markdown("<h1 style='text-align: center; color: #CD212A;'> CLIP-Italian </h1>", unsafe_allow_html=True)
|
11 |
-
|
12 |
intro_markdown = read_markdown_file("introduction.md")
|
13 |
st.markdown(intro_markdown, unsafe_allow_html=True)
|
|
|
7 |
|
8 |
|
9 |
def app():
|
|
|
|
|
10 |
intro_markdown = read_markdown_file("introduction.md")
|
11 |
st.markdown(intro_markdown, unsafe_allow_html=True)
|
image2text.py
CHANGED
@@ -10,22 +10,25 @@ import gc
|
|
10 |
|
11 |
|
12 |
def app():
|
13 |
-
|
14 |
-
st.markdown("<h1 style='text-align: center; color: #CD212A;'> Zero Shot Image Classification </h1>", unsafe_allow_html=True)
|
15 |
-
st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Image to Text </h2>", unsafe_allow_html=True)
|
16 |
st.markdown(
|
17 |
"""
|
18 |
|
19 |
-
👋 Ciao!
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
"classify"!
|
23 |
|
24 |
"""
|
25 |
)
|
26 |
|
27 |
image_url = st.text_input(
|
28 |
-
"
|
29 |
value="https://www.petdetective.it/wp-content/uploads/2016/04/gatto-toilette.jpg",
|
30 |
)
|
31 |
|
@@ -35,14 +38,14 @@ def app():
|
|
35 |
|
36 |
with col2:
|
37 |
captions_count = st.selectbox(
|
38 |
-
"
|
39 |
)
|
40 |
compute = st.button("CLASSIFY")
|
41 |
|
42 |
with col1:
|
43 |
captions = list()
|
44 |
for idx in range(min(MAX_CAP, captions_count)):
|
45 |
-
captions.append(st.text_input(f"
|
46 |
|
47 |
if compute:
|
48 |
captions = [c for c in captions if c != ""]
|
|
|
10 |
|
11 |
|
12 |
def app():
|
13 |
+
st.title("From Image to Text")
|
|
|
|
|
14 |
st.markdown(
|
15 |
"""
|
16 |
|
17 |
+
### 👋 Ciao!
|
18 |
+
|
19 |
+
Here you can find the captions or the labels that are most related to a given image. It is a zero-shot
|
20 |
+
image classification task!
|
21 |
+
|
22 |
+
🤌 Italian mode on! 🤌
|
23 |
|
24 |
+
For example, try typing "gatto" (cat) in the space for label1 and "cane" (dog) in the space for label2 and click
|
25 |
"classify"!
|
26 |
|
27 |
"""
|
28 |
)
|
29 |
|
30 |
image_url = st.text_input(
|
31 |
+
"You can input the URL of an image",
|
32 |
value="https://www.petdetective.it/wp-content/uploads/2016/04/gatto-toilette.jpg",
|
33 |
)
|
34 |
|
|
|
38 |
|
39 |
with col2:
|
40 |
captions_count = st.selectbox(
|
41 |
+
"Number of labels", options=range(1, MAX_CAP + 1), index=1
|
42 |
)
|
43 |
compute = st.button("CLASSIFY")
|
44 |
|
45 |
with col1:
|
46 |
captions = list()
|
47 |
for idx in range(min(MAX_CAP, captions_count)):
|
48 |
+
captions.append(st.text_input(f"Insert label {idx+1}"))
|
49 |
|
50 |
if compute:
|
51 |
captions = [c for c in captions if c != ""]
|
introduction.md
CHANGED
@@ -1,10 +1,9 @@
|
|
|
|
1 |
|
2 |
-
CLIP
|
3 |
-
|
4 |
-
Clip-Italian (Contrastive Language-Image Pre-training in Italian language) is based on OpenAI’s CLIP ([Radford et al., 2021](https://arxiv.org/abs/2103.00020))which is an amazing model that can learn to represent images and text jointly in the same space.
|
5 |
|
6 |
In this project, we aim to propose the first CLIP model trained on Italian data, that in this context can be considered a
|
7 |
-
low resource language. Using a few techniques, we have been able to fine-tune a SOTA Italian CLIP model with **only 1.
|
8 |
is built upon the pre-trained [Italian BERT](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) model provided by [dbmdz](https://huggingface.co/dbmdz) and the OpenAI
|
9 |
[vision transformer](https://huggingface.co/openai/clip-vit-base-patch32).
|
10 |
|
|
|
1 |
+
# Italian CLIP
|
2 |
|
3 |
+
CLIP ([Radford et al., 2021](https://arxiv.org/abs/2103.00020)) is an amazing model that can learn to represent images and text jointly in the same space.
|
|
|
|
|
4 |
|
5 |
In this project, we aim to propose the first CLIP model trained on Italian data, that in this context can be considered a
|
6 |
+
low resource language. Using a few techniques, we have been able to fine-tune a SOTA Italian CLIP model with **only 1.4 million** training samples. Our Italian CLIP model
|
7 |
is built upon the pre-trained [Italian BERT](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) model provided by [dbmdz](https://huggingface.co/dbmdz) and the OpenAI
|
8 |
[vision transformer](https://huggingface.co/openai/clip-vit-base-patch32).
|
9 |
|
text2image.py
CHANGED
@@ -107,21 +107,21 @@ headers = {
|
|
107 |
|
108 |
def app():
|
109 |
|
110 |
-
|
111 |
-
st.markdown("<h1 style='text-align: center; color: #CD212A;'> Image Retrieval </h1>", unsafe_allow_html=True)
|
112 |
-
st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Text to Image </h2>", unsafe_allow_html=True)
|
113 |
st.markdown(
|
114 |
"""
|
115 |
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
Though these images were not used for training the model, you will see most queries make sense.
|
120 |
|
121 |
-
|
122 |
-
a)The model is answering in a wrong way or b) the image you are looking for are not in the dataset & the model is giving you the best answer it can get.
|
123 |
|
124 |
-
You can choose from one of the following examples
|
125 |
"""
|
126 |
)
|
127 |
|
@@ -157,7 +157,7 @@ def app():
|
|
157 |
|
158 |
col1, col2 = st.beta_columns([3, 1])
|
159 |
with col1:
|
160 |
-
query = st.text_input("
|
161 |
with col2:
|
162 |
dataset_name = st.selectbox("IR dataset", ["CC", "Unsplash"])
|
163 |
|
@@ -200,8 +200,7 @@ def app():
|
|
200 |
break
|
201 |
except (UnidentifiedImageError) as e:
|
202 |
if i == N - 1:
|
203 |
-
st.text(f'Tried to show
|
204 |
-
Maybe try a different query?')
|
205 |
|
206 |
gc.collect()
|
207 |
|
|
|
107 |
|
108 |
def app():
|
109 |
|
110 |
+
st.title("From Text to Image")
|
|
|
|
|
111 |
st.markdown(
|
112 |
"""
|
113 |
|
114 |
+
### 👋 Ciao!
|
115 |
+
|
116 |
+
Here you can search for ~150.000 images in the Conceptual Captions dataset (CC) or in the Unsplash 25k Photos dataset.
|
117 |
+
Even though we did not train on any of these images you will see most queries make sense. When you see errors, there might be two possibilities:
|
118 |
+
the model is answering in a wrong way or the image you are looking for is not in the dataset and the model is giving you the best answer it can get.
|
119 |
+
|
120 |
|
|
|
121 |
|
122 |
+
🤌 Italian mode on! 🤌
|
|
|
123 |
|
124 |
+
You can choose from one of the following examples:
|
125 |
"""
|
126 |
)
|
127 |
|
|
|
157 |
|
158 |
col1, col2 = st.beta_columns([3, 1])
|
159 |
with col1:
|
160 |
+
query = st.text_input("... or insert an Italian query text")
|
161 |
with col2:
|
162 |
dataset_name = st.selectbox("IR dataset", ["CC", "Unsplash"])
|
163 |
|
|
|
200 |
break
|
201 |
except (UnidentifiedImageError) as e:
|
202 |
if i == N - 1:
|
203 |
+
st.text(f'Tried to show {N} different image URLS but none of them were reachabele.\nMaybe try a different query?')
|
|
|
204 |
|
205 |
gc.collect()
|
206 |
|