Cletrason commited on
Commit
5bd153a
1 Parent(s): 15e6fb5

Upload app_pix2pix_video.py

Browse files
Files changed (1) hide show
  1. app_pix2pix_video.py +103 -0
app_pix2pix_video.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from model import Model
3
+ import os
4
+ on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
5
+
6
+
7
+ def create_demo(model: Model):
8
+ examples = [
9
+ ['__assets__/pix2pix_video_2fps/camel.mp4',
10
+ 'make it Van Gogh Starry Night style', 512, 0, 1.0],
11
+ ['__assets__/pix2pix_video_2fps/mini-cooper.mp4',
12
+ 'make it Picasso style', 512, 0, 1.5],
13
+ ['__assets__/pix2pix_video_2fps/snowboard.mp4',
14
+ 'replace man with robot', 512, 0, 1.0],
15
+ ['__assets__/pix2pix_video_2fps/white-swan.mp4',
16
+ 'replace swan with mallard', 512, 0, 1.5],
17
+ ['__assets__/pix2pix_video_2fps/boat.mp4',
18
+ 'add city skyline in the background', 512, 0, 1.5],
19
+ ['__assets__/pix2pix_video_2fps/ballet.mp4',
20
+ 'make her a golden sculpture', 512, 0, 1.0],
21
+ ]
22
+ with gr.Blocks() as demo:
23
+ with gr.Row():
24
+ gr.Markdown('## Video Instruct Pix2Pix')
25
+ with gr.Row():
26
+ gr.HTML(
27
+ """
28
+ <div style="text-align: left; auto;">
29
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
30
+ Description: For performance purposes, our current preview release supports any input videos but caps output videos after 80 frames and the input videos are scaled down before processing. For faster inference you can choose lower output frames per seconds from Advanced Options.
31
+ </h3>
32
+ </div>
33
+ """)
34
+
35
+ with gr.Row():
36
+ with gr.Column():
37
+ input_image = gr.Video(label="Input Video", source='upload',
38
+ type='numpy', format="mp4", visible=True).style(height="auto")
39
+ with gr.Column():
40
+ prompt = gr.Textbox(label='Prompt')
41
+ run_button = gr.Button(label='Run')
42
+ with gr.Accordion('Advanced options', open=False):
43
+ watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
44
+ "None"], label="Watermark", value='Picsart AI Research')
45
+ image_resolution = gr.Slider(label='Image Resolution',
46
+ minimum=256,
47
+ maximum=1024,
48
+ value=512,
49
+ step=64)
50
+ seed = gr.Slider(label='Seed',
51
+ minimum=0,
52
+ maximum=65536,
53
+ value=0,
54
+ step=1)
55
+ image_guidance = gr.Slider(label='Image guidance scale',
56
+ minimum=0.5,
57
+ maximum=2,
58
+ value=1.0,
59
+ step=0.1)
60
+ start_t = gr.Slider(label='Starting time in seconds',
61
+ minimum=0,
62
+ maximum=10,
63
+ value=0,
64
+ step=1)
65
+ end_t = gr.Slider(label='End time in seconds (-1 corresponds to uploaded video duration)',
66
+ minimum=0,
67
+ maximum=10,
68
+ value=-1,
69
+ step=1)
70
+ out_fps = gr.Slider(label='Output video fps (-1 corresponds to uploaded video fps)',
71
+ minimum=1,
72
+ maximum=30,
73
+ value=-1,
74
+ step=1)
75
+ chunk_size = gr.Slider(
76
+ label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
77
+ with gr.Column():
78
+ result = gr.Video(label='Output', show_label=True)
79
+ inputs = [
80
+ input_image,
81
+ prompt,
82
+ image_resolution,
83
+ seed,
84
+ image_guidance,
85
+ start_t,
86
+ end_t,
87
+ out_fps,
88
+ chunk_size,
89
+ watermark
90
+ ]
91
+
92
+ gr.Examples(examples=examples,
93
+ inputs=inputs,
94
+ outputs=result,
95
+ fn=model.process_pix2pix,
96
+ cache_examples=on_huggingspace,
97
+ run_on_click=False,
98
+ )
99
+
100
+ run_button.click(fn=model.process_pix2pix,
101
+ inputs=inputs,
102
+ outputs=result)
103
+ return demo