sohojoe commited on
Commit
1f50047
1 Parent(s): 2f87c44

update documentation

Browse files
Files changed (4) hide show
  1. Ray-Liotta-Goodfellas.jpg +0 -0
  2. SohoJoeEth + Ray.jpeg +0 -0
  3. SohoJoeEth.jpeg +0 -0
  4. app.py +34 -7
Ray-Liotta-Goodfellas.jpg ADDED
SohoJoeEth + Ray.jpeg ADDED
SohoJoeEth.jpeg ADDED
app.py CHANGED
@@ -228,12 +228,31 @@ with gr.Blocks() as demo:
228
  with gr.Row():
229
  gr.Markdown(
230
  """# Soho-Clip
 
231
 
 
 
 
 
232
  A tool for exploring CLIP embedding spaces.
233
- My interest is to use CLIP for image/video understanding (see [CLIP_visual-spatial-reasoning](https://github.com/Sohojoe/CLIP_visual-spatial-reasoning).)
234
 
235
- Try it out by uploading a few images/add text prompts and generate images of the average of their embeddings
236
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  with gr.Row():
238
  for i in range(max_tabs):
239
  with gr.Tab(f"Input {i+1}"):
@@ -287,7 +306,11 @@ Try it out by uploading a few images/add text prompts and generate images of the
287
 
288
  with gr.Row():
289
  gr.Markdown(
290
- """### Initial Features
 
 
 
 
291
 
292
  - Combine up to 10 Images and/or text inputs to create an average embedding space.
293
  - View embedding spaces as graph
@@ -301,13 +324,17 @@ Try it out by uploading a few images/add text prompts and generate images of the
301
 
302
  ### Acknowledgements
303
 
304
- - I heavily build on Justin Pinkney's Experiments in Image Variation (see https://www.justinpinkney.com/image-variation-experiments). Please credit them if you use this work.
305
- - [CLIP] (https://openai.com/blog/clip/)
306
  - [Stable Diffusion](https://github.com/CompVis/stable-diffusion)
307
 
308
  """)
309
 
310
-
 
 
 
 
311
 
312
  if __name__ == "__main__":
313
- demo.launch()
 
228
  with gr.Row():
229
  gr.Markdown(
230
  """# Soho-Clip
231
+ """)
232
 
233
+ with gr.Row():
234
+ with gr.Column(scale=5):
235
+ gr.Markdown(
236
+ """
237
  A tool for exploring CLIP embedding spaces.
 
238
 
239
+ Try uploading a few images/add text prompts and click generate images.
240
  """)
241
+ with gr.Column(scale=3):
242
+ with gr.Row():
243
+ with gr.Column(scale=1, min_width=66):
244
+ gr.Image(value = "SohoJoeEth.jpeg", shape=(66,66), show_label=False, interactive=False).style(height=66, width=66)
245
+ with gr.Column(scale=1, min_width=15):
246
+ gr.Markdown("# ")
247
+ gr.Markdown("# +")
248
+ with gr.Column(scale=1, min_width=66):
249
+ gr.Image(value = "Ray-Liotta-Goodfellas.jpg", shape=(66,66), show_label=False, interactive=False).style(height=66, width=66)
250
+ with gr.Column(scale=1, min_width=15):
251
+ gr.Markdown("# ")
252
+ gr.Markdown("# =")
253
+ with gr.Column(scale=1, min_width=66):
254
+ gr.Image(value = "SohoJoeEth + Ray.jpeg", shape=(66,66), show_label=False, interactive=False).style(height=66, width=66)
255
+
256
  with gr.Row():
257
  for i in range(max_tabs):
258
  with gr.Tab(f"Input {i+1}"):
 
306
 
307
  with gr.Row():
308
  gr.Markdown(
309
+ """
310
+ My interest is to use CLIP for image/video understanding (see [CLIP_visual-spatial-reasoning](https://github.com/Sohojoe/CLIP_visual-spatial-reasoning).)
311
+
312
+
313
+ ### Initial Features
314
 
315
  - Combine up to 10 Images and/or text inputs to create an average embedding space.
316
  - View embedding spaces as graph
 
324
 
325
  ### Acknowledgements
326
 
327
+ - I heavily build on Justin Pinkney's [Experiments in Image Variation](https://www.justinpinkney.com/image-variation-experiments). Please credit them if you use this work.
328
+ - [CLIP](https://openai.com/blog/clip/)
329
  - [Stable Diffusion](https://github.com/CompVis/stable-diffusion)
330
 
331
  """)
332
 
333
+ # ![Alt Text](file/pup1.jpg)
334
+
335
+ # <img src="file/pup1.jpg" width="100" height="100">
336
+
337
+ # ![Alt Text](file/pup1.jpg){height=100 width=100}
338
 
339
  if __name__ == "__main__":
340
+ demo.launch()