File size: 3,342 Bytes
48e4d5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62ccae8
 
 
 
 
48e4d5d
 
 
62ccae8
 
 
 
 
48e4d5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23dbbde
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr 

def predict_image(image,find,transform):
    if(transform == "oreo cake"):
        return("cake_oreo.png")
    elif(transform == "ice"):
        return("cake_ice.png")
    elif(transform == "brioche"):
        return("cake_brioche.png")
    elif(transform == "spinach moss cake"):
        return("cake_spinach.png")

def predict_video(image,find,transform):
    if(transform == "stained glass giraffe"):
        return("giraffe_stainedglass.mp4")
    elif(transform == "giraffe with a neck warmer"):
        return("giraffe_neck_warmer.mp4")
    elif(transform=="giraffe with a hairy colorful mane"):
        return("giraffe_hairy_colorful_mane.mp4")

#Image inputs
image_editing = gr.Image(label="Input image",interactive=False, value="cake.jpg")
prompt_find = gr.Textbox(label="What to find on the image", interactive=False, value="cake")
prompt_transform = gr.Textbox(label="What to turn the image into", interactive=False, value="oreo cake")
image_out = gr.Image(value="cake_oreo.png")
#results_image = ['cake_oreo.png',"cake_ice.png","cake_brioche.png", "cake_spinach.png"]
examples_image=[['cake.jpg','cake','oreo cake'], ['cake.jpg','cake','ice'], ['cake.jpg','cake','brioche'], ['cake.jpg','cake','spinach moss cake']]

#Video inputs
video_editing = gr.Video(label="Input video", interactive=False, value="giraffe.mp4")
prompt_find_video = gr.Textbox(label="What to find on the video", interactive=False, value="giraffe")
prompt_transform_video = gr.Textbox(label="What to turn the video into", interactive=False, value="giraffe with a neck warmer")
video_out = gr.Video(value="giraffe_neck_warmer.mp4")
#results_video = ['cake_oreo.png',"cake_ice.png","cake_brioche.png", "cake_spinach.png"]
examples_video=[['giraffe.mp4','giraffe','stained glass giraffe'], ['giraffe.mp4','giraffe','giraffe with a neck warmer'], ['giraffe.mp4','giraffe','giraffe with a hairy colorful mane']]

with gr.Blocks() as demo:
    gr.Markdown('''# Text2LIVE: Text-Driven Layered Image and Video Editing
    This an interactive pre-processed demo for Text2Live. Check out the [paper](https://arxiv.org/abs/2204.02491) and the [code](https://github.com/omerbt/Text2LIVE) and the [project page](https://text2live.github.io/)
    ''')
    with gr.Tabs():
        with gr.TabItem("Image editing"):
            gr.Interface(
                fn=predict_image,
                inputs=[image_editing, prompt_find, prompt_transform],
                outputs=image_out,
                examples=examples_image,
                allow_flagging=False,
                live=True)
        with gr.TabItem("Video editing"):
            gr.Interface(
                fn=predict_video,
                inputs=[video_editing, prompt_find_video, prompt_transform_video],
                outputs=video_out,
                examples=examples_video,
                allow_flagging=False,
                live=True)
        gr.Markdown("Bibtex")
        gr.Markdown('''```
          @article{bar2022text2live,
           title      = {Text2LIVE: Text-Driven Layered Image and Video Editing},
           author     = {Bar-Tal, Omer and Ofri-Amar, Dolev and Fridman, Rafail and Kasten, Yoni and Dekel, Tali},
           journal    = {arXiv preprint arXiv:2204.02491},
           year       = {2022}
           }
        ```''')
demo.launch(enable_queue=False)