Zhouyan248 commited on
Commit
7dd5ec2
1 Parent(s): 29a6729

Update base/app.py

Browse files
Files changed (1) hide show
  1. base/app.py +116 -116
base/app.py CHANGED
@@ -1,116 +1,116 @@
1
- import gradio as gr
2
- from text_to_video import model_t2v_fun,setup_seed
3
- from omegaconf import OmegaConf
4
- import torch
5
- import imageio
6
- import os
7
- import cv2
8
- import torchvision
9
- config_path = "/mnt/petrelfs/zhouyan/project/lavie-release/base/configs/sample.yaml"
10
- args = OmegaConf.load("/mnt/petrelfs/zhouyan/project/lavie-release/base/configs/sample.yaml")
11
- device = "cuda" if torch.cuda.is_available() else "cpu"
12
- # ------- get model ---------------
13
- model_t2V = model_t2v_fun(args)
14
- model_t2V.to(device)
15
- if device == "cuda":
16
- model_t2V.enable_xformers_memory_efficient_attention()
17
-
18
- # model_t2V.enable_xformers_memory_efficient_attention()
19
- css = """
20
- h1 {
21
- text-align: center;
22
- }
23
- #component-0 {
24
- max-width: 730px;
25
- margin: auto;
26
- }
27
- """
28
-
29
- def infer(prompt, seed_inp, ddim_steps):
30
-
31
-
32
- setup_seed(seed_inp)
33
- videos = model_t2V(prompt, video_length=16, height = 320, width= 512, num_inference_steps=ddim_steps, guidance_scale=7).video
34
- print(videos[0].shape)
35
- if not os.path.exists(args.output_folder):
36
- os.mkdir(args.output_folder)
37
- torchvision.io.write_video(args.output_folder + prompt.replace(' ', '_') + '-.mp4', videos[0], fps=8)
38
- # imageio.mimwrite(args.output_folder + prompt.replace(' ', '_') + '.mp4', videos[0], fps=8)
39
- # video = cv2.VideoCapture(args.output_folder + prompt.replace(' ', '_') + '.mp4')
40
- # video = imageio.get_reader(args.output_folder + prompt.replace(' ', '_') + '.mp4', 'ffmpeg')
41
-
42
-
43
- # video = model_t2V(prompt, seed_inp, ddim_steps)
44
-
45
- return args.output_folder + prompt.replace(' ', '_') + '-.mp4'
46
-
47
- print(1)
48
-
49
- # def clean():
50
- # return gr.Image.update(value=None, visible=False), gr.Video.update(value=None)
51
- def clean():
52
- return gr.Video.update(value=None)
53
-
54
- title = """
55
- <div style="text-align: center; max-width: 700px; margin: 0 auto;">
56
- <div
57
- style="
58
- display: inline-flex;
59
- align-items: center;
60
- gap: 0.8rem;
61
- font-size: 1.75rem;
62
- "
63
- >
64
- <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
65
- Intern·Vchitect (Text-to-Video)
66
- </h1>
67
- </div>
68
- <p style="margin-bottom: 10px; font-size: 94%">
69
- Apply Intern·Vchitect to generate a video
70
- </p>
71
- </div>
72
- """
73
-
74
- # print(1)
75
- with gr.Blocks(css='style.css') as demo:
76
- gr.Markdown("<font color=red size=10><center>LaVie</center></font>")
77
- with gr.Row(elem_id="col-container"):
78
-
79
- with gr.Column():
80
-
81
- prompt = gr.Textbox(value="a teddy bear walking on the street", label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in", min_width=200, lines=2)
82
-
83
- ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=50, step=1)
84
- seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=400, elem_id="seed-in")
85
- # with gr.Row():
86
- # # control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
87
- # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
88
- # seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456, elem_id="seed-in")
89
-
90
- # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
91
- with gr.Column():
92
- submit_btn = gr.Button("Generate video")
93
- clean_btn = gr.Button("Clean video")
94
- # submit_btn = gr.Button("Generate video", size='sm')
95
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
96
- video_out = gr.Video(label="Video result", elem_id="video-output")
97
- # with gr.Row():
98
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
99
- # submit_btn = gr.Button("Generate video", size='sm')
100
-
101
-
102
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
103
- inputs = [prompt, seed_inp, ddim_steps]
104
- outputs = [video_out]
105
-
106
-
107
- # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
108
- # submit_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
109
- clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
110
- submit_btn.click(infer, inputs, outputs)
111
- # share_button.click(None, [], [], _js=share_js)
112
-
113
- print(2)
114
- demo.queue(max_size=12).launch(server_name="0.0.0.0", server_port=7860)
115
-
116
-
 
1
+ import gradio as gr
2
+ from text_to_video import model_t2v_fun,setup_seed
3
+ from omegaconf import OmegaConf
4
+ import torch
5
+ import imageio
6
+ import os
7
+ import cv2
8
+ import torchvision
9
+ config_path = "./base/configs/sample.yaml"
10
+ args = OmegaConf.load("./base/configs/sample.yaml")
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ # ------- get model ---------------
13
+ model_t2V = model_t2v_fun(args)
14
+ model_t2V.to(device)
15
+ if device == "cuda":
16
+ model_t2V.enable_xformers_memory_efficient_attention()
17
+
18
+ # model_t2V.enable_xformers_memory_efficient_attention()
19
+ css = """
20
+ h1 {
21
+ text-align: center;
22
+ }
23
+ #component-0 {
24
+ max-width: 730px;
25
+ margin: auto;
26
+ }
27
+ """
28
+
29
+ def infer(prompt, seed_inp, ddim_steps):
30
+
31
+
32
+ setup_seed(seed_inp)
33
+ videos = model_t2V(prompt, video_length=16, height = 320, width= 512, num_inference_steps=ddim_steps, guidance_scale=7).video
34
+ print(videos[0].shape)
35
+ if not os.path.exists(args.output_folder):
36
+ os.mkdir(args.output_folder)
37
+ torchvision.io.write_video(args.output_folder + prompt.replace(' ', '_') + '-.mp4', videos[0], fps=8)
38
+ # imageio.mimwrite(args.output_folder + prompt.replace(' ', '_') + '.mp4', videos[0], fps=8)
39
+ # video = cv2.VideoCapture(args.output_folder + prompt.replace(' ', '_') + '.mp4')
40
+ # video = imageio.get_reader(args.output_folder + prompt.replace(' ', '_') + '.mp4', 'ffmpeg')
41
+
42
+
43
+ # video = model_t2V(prompt, seed_inp, ddim_steps)
44
+
45
+ return args.output_folder + prompt.replace(' ', '_') + '-.mp4'
46
+
47
+ print(1)
48
+
49
+ # def clean():
50
+ # return gr.Image.update(value=None, visible=False), gr.Video.update(value=None)
51
+ def clean():
52
+ return gr.Video.update(value=None)
53
+
54
+ title = """
55
+ <div style="text-align: center; max-width: 700px; margin: 0 auto;">
56
+ <div
57
+ style="
58
+ display: inline-flex;
59
+ align-items: center;
60
+ gap: 0.8rem;
61
+ font-size: 1.75rem;
62
+ "
63
+ >
64
+ <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
65
+ Intern·Vchitect (Text-to-Video)
66
+ </h1>
67
+ </div>
68
+ <p style="margin-bottom: 10px; font-size: 94%">
69
+ Apply Intern·Vchitect to generate a video
70
+ </p>
71
+ </div>
72
+ """
73
+
74
+ # print(1)
75
+ with gr.Blocks(css='style.css') as demo:
76
+ gr.Markdown("<font color=red size=10><center>LaVie</center></font>")
77
+ with gr.Row(elem_id="col-container"):
78
+
79
+ with gr.Column():
80
+
81
+ prompt = gr.Textbox(value="a teddy bear walking on the street", label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in", min_width=200, lines=2)
82
+
83
+ ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=50, step=1)
84
+ seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=400, elem_id="seed-in")
85
+ # with gr.Row():
86
+ # # control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
87
+ # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
88
+ # seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456, elem_id="seed-in")
89
+
90
+ # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
91
+ with gr.Column():
92
+ submit_btn = gr.Button("Generate video")
93
+ clean_btn = gr.Button("Clean video")
94
+ # submit_btn = gr.Button("Generate video", size='sm')
95
+ # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
96
+ video_out = gr.Video(label="Video result", elem_id="video-output")
97
+ # with gr.Row():
98
+ # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
99
+ # submit_btn = gr.Button("Generate video", size='sm')
100
+
101
+
102
+ # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
103
+ inputs = [prompt, seed_inp, ddim_steps]
104
+ outputs = [video_out]
105
+
106
+
107
+ # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
108
+ # submit_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
109
+ clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
110
+ submit_btn.click(infer, inputs, outputs)
111
+ # share_button.click(None, [], [], _js=share_js)
112
+
113
+ print(2)
114
+ demo.queue(max_size=12).launch(server_name="0.0.0.0", server_port=7860)
115
+
116
+