apolinario commited on
Commit
48e4d5d
1 Parent(s): e2f736d
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def predict_image(image,find,transform):
4
+ if(transform == "oreo cake"):
5
+ return("cake_oreo.png")
6
+ elif(transform == "ice"):
7
+ return("cake_ice.png")
8
+ elif(transform == "brioche"):
9
+ return("cake_brioche.png")
10
+ elif(transform == "spinach moss cake"):
11
+ return("cake_spinach.png")
12
+
13
+ def predict_video(image,find,transform):
14
+ if(transform == "stained glass giraffe"):
15
+ return("giraffe_stainedglass.mp4")
16
+ elif(transform == "giraffe with a neck warmer"):
17
+ return("giraffe_neck_warmer.mp4")
18
+ elif(transform=="giraffe with a hairy colorful mane"):
19
+ return("giraffe_hairy_colorful_mane.mp4")
20
+
21
+ #Image inputs
22
+ image_editing = gr.Image(label="Input image",interactive=False)
23
+ prompt_find = gr.Textbox(label="What to find on the image", interactive=False)
24
+ prompt_transform = gr.Textbox(label="What to turn the image into", interactive=False)
25
+ image_out = gr.outputs.Image()
26
+ results_image = ['cake_oreo.png',"cake_ice.png","cake_brioche.png", "cake_spinach.png"]
27
+ examples_image=[['cake.jpg','cake','oreo cake'], ['cake.jpg','cake','ice'], ['cake.jpg','cake','brioche'], ['cake.jpg','cake','spinach moss cake']]
28
+
29
+ #Video inputs
30
+ video_editing = gr.Video(label="Input video", interactive=False)
31
+ prompt_find_video = gr.Textbox(label="What to find on the image", interactive=False)
32
+ prompt_transform_video = gr.Textbox(label="What to turn the image into", interactive=False)
33
+ video_out = gr.outputs.Video()
34
+ results_video = ['cake_oreo.png',"cake_ice.png","cake_brioche.png", "cake_spinach.png"]
35
+ examples_video=[['giraffe.mp4','giraffe','stained glass giraffe'], ['giraffe.mp4','giraffe','giraffe with a neck warmer'], ['giraffe.mp4','giraffe','giraffe with a hairy colorful mane']]
36
+
37
+ with gr.Blocks() as demo:
38
+ gr.Markdown('''# Text2LIVE: Text-Driven Layered Image and Video Editing
39
+ This an interactive pre-processed demo for Text2Live. Check out the [paper](https://arxiv.org/abs/2204.02491) and the [code](https://github.com/omerbt/Text2LIVE) and the [project page](https://text2live.github.io/)
40
+ ''')
41
+ with gr.Tabs():
42
+ with gr.TabItem("Image editing"):
43
+ gr.Interface(
44
+ fn=predict_image,
45
+ inputs=[image_editing, prompt_find, prompt_transform],
46
+ outputs=image_out,
47
+ examples=examples_image,
48
+ allow_flagging=False,
49
+ live=True)
50
+ with gr.TabItem("Video editing"):
51
+ gr.Interface(
52
+ fn=predict_video,
53
+ inputs=[video_editing, prompt_find_video, prompt_transform_video],
54
+ outputs=video_out,
55
+ examples=examples_video,
56
+ allow_flagging=False,
57
+ live=True)
58
+ gr.Markdown("Bibtex")
59
+ gr.Markdown('''```
60
+ @article{bar2022text2live,
61
+ title = {Text2LIVE: Text-Driven Layered Image and Video Editing},
62
+ author = {Bar-Tal, Omer and Ofri-Amar, Dolev and Fridman, Rafail and Kasten, Yoni and Dekel, Tali},
63
+ journal = {arXiv preprint arXiv:2204.02491},
64
+ year = {2022}
65
+ }
66
+ ```''')
67
+ demo.launch()