Zhouyan248 commited on
Commit
68a24d9
1 Parent(s): 9fe76e3

Update base/app.py

Browse files
Files changed (1) hide show
  1. base/app.py +1 -55
base/app.py CHANGED
@@ -13,9 +13,6 @@ from huggingface_hub import snapshot_download
13
  config_path = "./base/configs/sample.yaml"
14
  args = OmegaConf.load("./base/configs/sample.yaml")
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
- ### download models
17
- # snapshot_download('Vchitect/LaVie',cache_dir='./pretrained_models')
18
- # snapshot_download('CompVis/stable-diffusion-v1-4',cache_dir='./pretrained_models')
19
 
20
  # ------- get model ---------------
21
  model_t2V = model_t2v_fun(args)
@@ -45,13 +42,8 @@ def infer(prompt, seed_inp, ddim_steps,cfg):
45
  if not os.path.exists(args.output_folder):
46
  os.mkdir(args.output_folder)
47
  torchvision.io.write_video(args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4', videos[0], fps=8)
48
- # imageio.mimwrite(args.output_folder + prompt.replace(' ', '_') + '.mp4', videos[0], fps=8)
49
- # video = cv2.VideoCapture(args.output_folder + prompt.replace(' ', '_') + '.mp4')
50
- # video = imageio.get_reader(args.output_folder + prompt.replace(' ', '_') + '.mp4', 'ffmpeg')
51
 
52
 
53
- # video = model_t2V(prompt, seed_inp, ddim_steps)
54
-
55
  return args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4'
56
 
57
  print(1)
@@ -81,7 +73,6 @@ title = """
81
  </div>
82
  """
83
 
84
- # print(1)
85
  with gr.Blocks(css='style.css') as demo:
86
  gr.Markdown("<font color=red size=10><center>LaVie: Text-to-Video generation</center></font>")
87
  with gr.Column():
@@ -97,58 +88,15 @@ with gr.Blocks(css='style.css') as demo:
97
  cfg = gr.Number(label="guidance_scale",value=7.5)
98
  # seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=400, elem_id="seed-in")
99
 
100
- # with gr.Row():
101
- # # control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
102
- # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
103
- # seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456, elem_id="seed-in")
104
-
105
- # ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
106
- # ex = gr.Examples(
107
- # examples = [['a corgi walking in the park at sunrise, oil painting style',400,50,7],
108
- # ['a cut teddy bear reading a book in the park, oil painting style, high quality',700,50,7],
109
- # ['an epic tornado attacking above a glowing city at night, the tornado is made of smoke, highly detailed',230,50,7],
110
- # ['a jar filled with fire, 4K video, 3D rendered, well-rendered',400,50,7],
111
- # ['a teddy bear walking in the park, oil painting style, high quality',400,50,7],
112
- # ['a teddy bear walking on the street, 2k, high quality',100,50,7],
113
- # ['a panda taking a selfie, 2k, high quality',400,50,7],
114
- # ['a polar bear playing drum kit in NYC Times Square, 4k, high resolution',400,50,7],
115
- # ['jungle river at sunset, ultra quality',400,50,7],
116
- # ['a shark swimming in clear Carribean ocean, 2k, high quality',400,50,7],
117
- # ['A steam train moving on a mountainside by Vincent van Gogh',230,50,7],
118
- # ['a confused grizzly bear in calculus class',1000,50,7]],
119
- # fn = infer,
120
- # inputs=[prompt, seed_inp, ddim_steps,cfg],
121
- # # outputs=[video_out],
122
- # cache_examples=False,
123
- # examples_per_page = 6
124
- # )
125
- # ex.dataset.headers = [""]
126
 
127
  with gr.Column():
128
  submit_btn = gr.Button("Generate video")
129
  clean_btn = gr.Button("Clean video")
130
- # submit_btn = gr.Button("Generate video", size='sm')
131
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
132
  video_out = gr.Video(label="Video result", elem_id="video-output")
133
- # with gr.Row():
134
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
135
- # submit_btn = gr.Button("Generate video", size='sm')
136
-
137
 
138
- # video_out = gr.Video(label="Video result", elem_id="video-output", height=320, width=512)
139
  inputs = [prompt, seed_inp, ddim_steps,cfg]
140
  outputs = [video_out]
141
- # gr.Examples(
142
- # value = [['An astronaut riding a horse',123,50],
143
- # ['a panda eating bamboo on a rock',123,50],
144
- # ['Spiderman is surfing',123,50]],
145
- # label = "example of sampling",
146
- # show_label = True,
147
- # headers = ['prompt','seed','steps'],
148
- # datatype = ['str','number','number'],
149
- # row_count=4,
150
- # col_count=(3,"fixed")
151
- # )
152
  ex = gr.Examples(
153
  examples = [['a corgi walking in the park at sunrise, oil painting style',400,50,7],
154
  ['a cut teddy bear reading a book in the park, oil painting style, high quality',700,50,7],
@@ -169,8 +117,6 @@ with gr.Blocks(css='style.css') as demo:
169
  )
170
  ex.dataset.headers = [""]
171
 
172
- # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
173
- # submit_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
174
  clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
175
  submit_btn.click(infer, inputs, outputs)
176
  # share_button.click(None, [], [], _js=share_js)
 
13
  config_path = "./base/configs/sample.yaml"
14
  args = OmegaConf.load("./base/configs/sample.yaml")
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
16
 
17
  # ------- get model ---------------
18
  model_t2V = model_t2v_fun(args)
 
42
  if not os.path.exists(args.output_folder):
43
  os.mkdir(args.output_folder)
44
  torchvision.io.write_video(args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4', videos[0], fps=8)
 
 
 
45
 
46
 
 
 
47
  return args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4'
48
 
49
  print(1)
 
73
  </div>
74
  """
75
 
 
76
  with gr.Blocks(css='style.css') as demo:
77
  gr.Markdown("<font color=red size=10><center>LaVie: Text-to-Video generation</center></font>")
78
  with gr.Column():
 
88
  cfg = gr.Number(label="guidance_scale",value=7.5)
89
  # seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=400, elem_id="seed-in")
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  with gr.Column():
93
  submit_btn = gr.Button("Generate video")
94
  clean_btn = gr.Button("Clean video")
 
 
95
  video_out = gr.Video(label="Video result", elem_id="video-output")
 
 
 
 
96
 
 
97
  inputs = [prompt, seed_inp, ddim_steps,cfg]
98
  outputs = [video_out]
99
+
 
 
 
 
 
 
 
 
 
 
100
  ex = gr.Examples(
101
  examples = [['a corgi walking in the park at sunrise, oil painting style',400,50,7],
102
  ['a cut teddy bear reading a book in the park, oil painting style, high quality',700,50,7],
 
117
  )
118
  ex.dataset.headers = [""]
119
 
 
 
120
  clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
121
  submit_btn.click(infer, inputs, outputs)
122
  # share_button.click(None, [], [], _js=share_js)