PeterL1n commited on
Commit
87d5fe9
1 Parent(s): c6579d8
Files changed (1) hide show
  1. app.py +30 -22
app.py CHANGED
@@ -12,13 +12,6 @@ from PIL import Image
12
 
13
  # Constants
14
  base = "frankjoshua/toonyou_beta6"
15
- repo = "ByteDance/AnimateDiff-Lightning"
16
- checkpoints = {
17
- "1-Step" : ["animatediff_lightning_1step_diffusers.safetensors", 1],
18
- "2-Step" : ["animatediff_lightning_2step_diffusers.safetensors", 2],
19
- "4-Step" : ["animatediff_lightning_4step_diffusers.safetensors", 4],
20
- "8-Step" : ["animatediff_lightning_8step_diffusers.safetensors", 8],
21
- }
22
  loaded = None
23
 
24
  # Ensure model and scheduler are initialized in GPU-enabled function
@@ -33,24 +26,21 @@ else:
33
 
34
  # Function
35
  @spaces.GPU(enable_queue=True)
36
- def generate_image(prompt, ckpt):
37
  global loaded
38
- print(prompt, ckpt)
39
 
40
- checkpoint = checkpoints[ckpt][0]
41
- num_inference_steps = checkpoints[ckpt][1]
 
 
 
42
 
43
- if loaded != num_inference_steps:
44
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, checkpoint), device=device), strict=False)
45
- loaded = num_inference_steps
46
-
47
- output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=num_inference_steps)
48
 
49
  name = str(uuid.uuid4()).replace("-", "")
50
  path = f"/tmp/{name}.mp4"
51
-
52
  export_to_video(output.frames[0], path, fps=10)
53
-
54
  return path
55
 
56
 
@@ -62,10 +52,28 @@ with gr.Blocks(css="style.css") as demo:
62
  gr.HTML("<p><center>Lightning-fast text-to-video generation</center></p><p><center><a href='https://huggingface.co/ByteDance/AnimateDiff-Lightning'>https://huggingface.co/ByteDance/AnimateDiff-Lightning</a></center></p>")
63
  with gr.Group():
64
  with gr.Row():
65
- prompt = gr.Textbox(label='Enter your prompt (English)', scale=8)
66
- ckpt = gr.Dropdown(label='Select inference steps',choices=['1-Step', '2-Step', '4-Step', '8-Step'], value='4-Step', interactive=True)
67
- submit = gr.Button(scale=1, variant='primary')
68
- video = gr.Video(label='AnimateDiff-Lightning Generated Image')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  prompt.submit(
71
  fn=generate_image,
 
12
 
13
  # Constants
14
  base = "frankjoshua/toonyou_beta6"
 
 
 
 
 
 
 
15
  loaded = None
16
 
17
  # Ensure model and scheduler are initialized in GPU-enabled function
 
26
 
27
  # Function
28
  @spaces.GPU(enable_queue=True)
29
+ def generate_image(prompt, step):
30
  global loaded
31
+ print(prompt, step)
32
 
33
+ if loaded != step:
34
+ repo = "ByteDance/AnimateDiff-Lightning"
35
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
36
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
37
+ loaded = step
38
 
39
+ output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=step)
 
 
 
 
40
 
41
  name = str(uuid.uuid4()).replace("-", "")
42
  path = f"/tmp/{name}.mp4"
 
43
  export_to_video(output.frames[0], path, fps=10)
 
44
  return path
45
 
46
 
 
52
  gr.HTML("<p><center>Lightning-fast text-to-video generation</center></p><p><center><a href='https://huggingface.co/ByteDance/AnimateDiff-Lightning'>https://huggingface.co/ByteDance/AnimateDiff-Lightning</a></center></p>")
53
  with gr.Group():
54
  with gr.Row():
55
+ prompt = gr.Textbox(
56
+ label='Enter your prompt (English)',
57
+ scale=8
58
+ )
59
+ ckpt = gr.Dropdown(
60
+ label='Select inference steps',
61
+ choices=[
62
+ ('1-Step', 1),
63
+ ('2-Step', 2),
64
+ ('4-Step', 4),
65
+ ('8-Step', 8)],
66
+ value='4-Step',
67
+ interactive=True
68
+ )
69
+ submit = gr.Button(
70
+ scale=1,
71
+ variant='primary'
72
+ )
73
+ video = gr.Video(
74
+ label='AnimateDiff-Lightning',
75
+ autoplay=True,
76
+ )
77
 
78
  prompt.submit(
79
  fn=generate_image,