GrayShine commited on
Commit
29fb181
1 Parent(s): 6aa6c2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -15
app.py CHANGED
@@ -336,7 +336,8 @@ def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, prefram
336
  # Judge Generation or Prediction
337
  # ========================================
338
  @spaces.GPU
339
- def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
 
340
  default_step = [25, 40, 50, 100, 125, 200, 250]
341
  difference = [abs(item - diffusion_step) for item in default_step]
342
  diffusion_step = default_step[difference.index(min(difference))]
@@ -368,16 +369,6 @@ with gr.Blocks() as demo:
368
  interactive=True,
369
  label="Spatial Text Guidence Scale",
370
  )
371
- # with gr.Row():
372
- # with gr.Column(scale=1.0):
373
- # tcfg_scale = gr.Slider(
374
- # minimum=1,
375
- # maximum=50,
376
- # value=6.5,
377
- # step=0.1,
378
- # interactive=True,
379
- # label="Temporal Text Guidence Scale",
380
- # )
381
  with gr.Row():
382
  with gr.Column(scale=1.0):
383
  img_cfg_scale = gr.Slider(
@@ -407,15 +398,20 @@ with gr.Blocks() as demo:
407
  output_video = gr.Video(interactive=False, include_audio=True, elem_id="输出的视频")
408
  clear = gr.Button("Restart")
409
 
410
- tcfg_scale = scfg_scale
411
  ex = gr.Examples(
412
- examples = [["Underwater environment cosmetic bottles", None, 7.5, 7.5, None, "./input/i2v/Underwater_environment_cosmetic_bottles.png", 100]],
 
 
 
 
 
413
  fn = gen_or_pre,
414
- inputs = [text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step],
415
  outputs=[output_video],
416
  cache_examples=False
417
  )
418
 
419
- run.click(gen_or_pre, [text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step], [output_video])
420
 
421
  demo.queue(max_size=12).launch()
 
336
  # Judge Generation or Prediction
337
  # ========================================
338
  @spaces.GPU
339
+ def gen_or_pre(text_input, image_input, scfg_scale, img_cfg_scale, preframe_input, diffusion_step):
340
+ tcfg_scale = scfg_scale
341
  default_step = [25, 40, 50, 100, 125, 200, 250]
342
  difference = [abs(item - diffusion_step) for item in default_step]
343
  diffusion_step = default_step[difference.index(min(difference))]
 
369
  interactive=True,
370
  label="Spatial Text Guidence Scale",
371
  )
 
 
 
 
 
 
 
 
 
 
372
  with gr.Row():
373
  with gr.Column(scale=1.0):
374
  img_cfg_scale = gr.Slider(
 
398
  output_video = gr.Video(interactive=False, include_audio=True, elem_id="输出的视频")
399
  clear = gr.Button("Restart")
400
 
401
+ # tcfg_scale = scfg_scale
402
  ex = gr.Examples(
403
+ examples = [["Underwater environment cosmetic bottles", None, 7.5, 7.5, None, "./input/i2v/Underwater_environment_cosmetic_bottles.png", 100],
404
+ ["A big drop of water falls on a rose petal", None, 7.5, 7.5, None, "./input/i2v/A_big_drop_of_water_falls_on_a_rose_petal.png", 100],
405
+ ["A fish swims past an oriental woman", None, 7.5, 7.5, None, "./input/i2v/A_fish_swims_past_an_oriental_woman.png", 100],
406
+ ["Cinematic photograph View of piloting aaero", None, 7.5, 7.5, None, "./input/i2v/Cinematic_photograph_View_of_piloting_aaero.png", 100],
407
+ ["Planet hits earth", None, 7.5, 7.5, None, "./input/i2v/Planet_hits_earth.png", 100],
408
+ ],
409
  fn = gen_or_pre,
410
+ inputs = [text_input, image_input, scfg_scale, img_cfg_scale, preframe_input, diffusion_step],
411
  outputs=[output_video],
412
  cache_examples=False
413
  )
414
 
415
+ run.click(gen_or_pre, [text_input, image_input, scfg_scale, img_cfg_scale, preframe_input, diffusion_step], [output_video])
416
 
417
  demo.queue(max_size=12).launch()