Anonymous commited on
Commit
7f4ab63
1 Parent(s): e693f4b
Files changed (1) hide show
  1. app.py +33 -22
app.py CHANGED
@@ -22,17 +22,16 @@ ckpt_dir_1024 = "checkpoints/base_1024_v1"
22
  os.makedirs(ckpt_dir_1024, exist_ok=True)
23
  hf_hub_download(repo_id="VideoCrafter/Text2Video-1024", filename="model.ckpt", local_dir=ckpt_dir_1024)
24
 
25
- ckpt_path_256 = "checkpoints/base_256_v1/model.pth"
26
- ckpt_dir_256 = "checkpoints/base_256_v1"
27
- os.makedirs(ckpt_dir_256, exist_ok=True)
28
- hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model.pth", local_dir=ckpt_dir_256)
29
 
30
 
31
- def infer(prompt, output_size="256x256"):
32
  num_frames = 32
33
  ddim_steps = 50
34
  unconditional_guidance_scale = 12.0
35
- seed = 123
36
  save_fps = 10
37
  window_size = 16
38
  window_stride = 4
@@ -48,19 +47,19 @@ def infer(prompt, output_size="256x256"):
48
  model_1024 = load_model_checkpoint(model_1024, ckpt_path_1024)
49
  model_1024.eval()
50
  model = model_1024
51
- fps = 24
52
- elif output_size == "256x256":
53
- width = 256
54
- height = 256
55
- config_256 = "configs/inference_t2v_tconv256_v1.0_freenoise.yaml"
56
- config_256 = OmegaConf.load(config_256)
57
- model_config_256 = config_256.pop("model", OmegaConf.create())
58
- model_256 = instantiate_from_config(model_config_256)
59
- model_256 = model_256.cuda()
60
- model_256 = load_model_checkpoint(model_256, ckpt_path_256)
61
- model_256.eval()
62
- model = model_256
63
- fps = 8
64
 
65
  if seed is None:
66
  seed = int.from_bytes(os.urandom(2), "big")
@@ -253,14 +252,26 @@ with gr.Blocks(css=css) as demo:
253
  )
254
 
255
  prompt_in = gr.Textbox(label="Prompt", placeholder="A chihuahua in astronaut suit floating in space, cinematic lighting, glow effect")
256
- output_size = gr.Dropdown(["576x1024", "256x256"], value="576x1024", label="Output Size", info="576x1024 is watermark-free")
 
 
 
 
 
 
 
 
 
 
 
 
257
  submit_btn = gr.Button("Generate")
258
  video_result = gr.Video(label="Video Output")
259
 
260
- gr.Examples(examples=examples, inputs=[prompt_in, output_size])
261
 
262
  submit_btn.click(fn=infer,
263
- inputs=[prompt_in, output_size],
264
  outputs=[video_result],
265
  api_name="zrscp")
266
 
 
22
  os.makedirs(ckpt_dir_1024, exist_ok=True)
23
  hf_hub_download(repo_id="VideoCrafter/Text2Video-1024", filename="model.ckpt", local_dir=ckpt_dir_1024)
24
 
25
+ # ckpt_path_256 = "checkpoints/base_256_v1/model.pth"
26
+ # ckpt_dir_256 = "checkpoints/base_256_v1"
27
+ # os.makedirs(ckpt_dir_256, exist_ok=True)
28
+ # hf_hub_download(repo_id="MoonQiu/LongerCrafter", filename="model.pth", local_dir=ckpt_dir_256)
29
 
30
 
31
+ def infer(prompt, output_size, seed):
32
  num_frames = 32
33
  ddim_steps = 50
34
  unconditional_guidance_scale = 12.0
 
35
  save_fps = 10
36
  window_size = 16
37
  window_stride = 4
 
47
  model_1024 = load_model_checkpoint(model_1024, ckpt_path_1024)
48
  model_1024.eval()
49
  model = model_1024
50
+ fps = 28
51
+ # elif output_size == "256x256":
52
+ # width = 256
53
+ # height = 256
54
+ # config_256 = "configs/inference_t2v_tconv256_v1.0_freenoise.yaml"
55
+ # config_256 = OmegaConf.load(config_256)
56
+ # model_config_256 = config_256.pop("model", OmegaConf.create())
57
+ # model_256 = instantiate_from_config(model_config_256)
58
+ # model_256 = model_256.cuda()
59
+ # model_256 = load_model_checkpoint(model_256, ckpt_path_256)
60
+ # model_256.eval()
61
+ # model = model_256
62
+ # fps = 8
63
 
64
  if seed is None:
65
  seed = int.from_bytes(os.urandom(2), "big")
 
252
  )
253
 
254
  prompt_in = gr.Textbox(label="Prompt", placeholder="A chihuahua in astronaut suit floating in space, cinematic lighting, glow effect")
255
+
256
+ with gr.Row():
257
+ with gr.Accordion('FreeNoise Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
258
+ with gr.Row():
259
+ output_size = gr.Dropdown(["576x1024"], value="576x1024", label="Output Size")
260
+ # output_size = gr.Dropdown(["576x1024", "256x256"], value="576x1024", label="Output Size", info="576x1024 is watermark-free")
261
+ with gr.Row():
262
+ seed = gr.Slider(label='seed',
263
+ minimum=0,
264
+ maximum=1000,
265
+ step=1,
266
+ value=123)
267
+
268
  submit_btn = gr.Button("Generate")
269
  video_result = gr.Video(label="Video Output")
270
 
271
+ gr.Examples(examples=examples, inputs=[prompt_in, output_size, seed])
272
 
273
  submit_btn.click(fn=infer,
274
+ inputs=[prompt_in, output_size, seed],
275
  outputs=[video_result],
276
  api_name="zrscp")
277