vinid commited on
Commit
6c67d85
β€’
1 Parent(s): 03effd2

fixing a few things

Browse files
Files changed (3) hide show
  1. image2text.py +5 -4
  2. introduction.md +3 -3
  3. text2image.py +7 -3
image2text.py CHANGED
@@ -13,7 +13,8 @@ def app():
13
 
14
  ### πŸ‘‹ Ciao!
15
 
16
- Here you can find the captions that are most related to a given image.
 
17
 
18
  🀌 Italian mode on! 🀌
19
 
@@ -30,20 +31,20 @@ def app():
30
 
31
  with col2:
32
  captions_count = st.selectbox(
33
- "Number of captions", options=range(1, MAX_CAP + 1)
34
  )
35
  compute = st.button("Compute")
36
 
37
  with col1:
38
  captions = list()
39
  for idx in range(min(MAX_CAP, captions_count)):
40
- captions.append(st.text_input(f"Insert Caption {idx+1}"))
41
 
42
  if compute:
43
  captions = [c for c in captions if c != ""]
44
 
45
  if not captions or not filename:
46
- st.error("Please choose one image and at least one caption")
47
  else:
48
  with st.spinner("Computing..."):
49
  model = get_model()
 
13
 
14
  ### πŸ‘‹ Ciao!
15
 
16
+ Here you can find the captions or the labels that are most related to a given image. It is a zero-shot
17
+ image classification task!
18
 
19
  🀌 Italian mode on! 🀌
20
 
 
31
 
32
  with col2:
33
  captions_count = st.selectbox(
34
+ "Number of labels", options=range(1, MAX_CAP + 1)
35
  )
36
  compute = st.button("Compute")
37
 
38
  with col1:
39
  captions = list()
40
  for idx in range(min(MAX_CAP, captions_count)):
41
+ captions.append(st.text_input(f"Insert label {idx+1}"))
42
 
43
  if compute:
44
  captions = [c for c in captions if c != ""]
45
 
46
  if not captions or not filename:
47
+ st.error("Please choose one image and at least one label")
48
  else:
49
  with st.spinner("Computing..."):
50
  model = get_model()
introduction.md CHANGED
@@ -150,14 +150,14 @@ then there is its (partial) counting ability and finally the ability of understa
150
  Look at the following - slightly cherry picked (but not even that much) - examples:
151
 
152
  ### Colors
153
- Here's a blu flower
154
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/fiore_giallo.png" alt="drawing" width="600"/>
155
 
156
- And here's a yellow flower
157
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/fiore_blu.png" alt="drawing" width="600"/>
158
 
159
  ### Counting
160
- What about "one cat"
161
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/gatto.png" alt="drawing" width="600"/>
162
 
163
  And what about "two cats"?
 
150
  Look at the following - slightly cherry picked (but not even that much) - examples:
151
 
152
  ### Colors
153
+ Here's a yellow flower
154
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/fiore_giallo.png" alt="drawing" width="600"/>
155
 
156
+ And here's a blu flower
157
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/fiore_blu.png" alt="drawing" width="600"/>
158
 
159
  ### Counting
160
+ What about "one cat"?
161
  <img src="https://huggingface.co/spaces/clip-italian/clip-italian-demo/raw/main/static/img/gatto.png" alt="drawing" width="600"/>
162
 
163
  And what about "two cats"?
text2image.py CHANGED
@@ -103,7 +103,11 @@ def app():
103
 
104
  ### πŸ‘‹ Ciao!
105
 
106
- Here you can search for images in the Unsplash 25k Photos dataset.
 
 
 
 
107
 
108
  🀌 Italian mode on! 🀌
109
 
@@ -129,7 +133,7 @@ def app():
129
  )
130
  with col4:
131
  st.button(
132
- "Un fiore blu", on_click=update_query, kwargs=dict(value="Un fiore blu")
133
  )
134
 
135
  col1, col2 = st.beta_columns([3, 1])
@@ -163,7 +167,7 @@ def app():
163
  raise ValueError()
164
 
165
  image_paths = utils.find_image(
166
- query, model, dataset, tokenizer, image_features, 2, dataset_name
167
  )
168
 
169
  st.image(image_paths)
 
103
 
104
  ### πŸ‘‹ Ciao!
105
 
106
+ Here you can search for images in the Unsplash 25k Photos dataset and the Conceptual Caption dataset.
107
+ You will see most queries make sense. When you see errors, there might be two possibilities: the model is answering
108
+ in a wrong way or the image you are looking for and the model is giving you the best answer it can get.
109
+
110
+
111
 
112
  🀌 Italian mode on! 🀌
113
 
 
133
  )
134
  with col4:
135
  st.button(
136
+ "Un gatto sopra una sedia", on_click=update_query, kwargs=dict(value="Un gatto sopra una sedia")
137
  )
138
 
139
  col1, col2 = st.beta_columns([3, 1])
 
167
  raise ValueError()
168
 
169
  image_paths = utils.find_image(
170
+ query, model, dataset, tokenizer, image_features, 1, dataset_name
171
  )
172
 
173
  st.image(image_paths)