PeterL1n commited on
Commit
90ee73b
1 Parent(s): 6b26d99
Files changed (4) hide show
  1. README.md +6 -8
  2. app.py +81 -0
  3. requirements.txt +5 -0
  4. style.css +3 -0
README.md CHANGED
@@ -1,12 +1,10 @@
1
  ---
2
- title: AnimateDiff Lightning
3
- emoji: 🐢
4
- colorFrom: gray
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.22.0
8
  app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: AnimateDiff-Lightning
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.19.1
8
  app_file: app.py
9
+ license: mit
10
  ---
 
 
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ import spaces
5
+ import uuid
6
+
7
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
8
+ from diffusers.utils import export_to_video
9
+ from huggingface_hub import hf_hub_download
10
+ from safetensors.torch import load_file
11
+ from PIL import Image
12
+
13
+ # Constants
14
+ base = "frankjoshua/toonyou_beta6"
15
+ repo = "ByteDance/AnimateDiff-Lightning"
16
+ checkpoints = {
17
+ "1-Step" : ["animatediff_lightning_1step_diffusers.safetensors", 1],
18
+ "2-Step" : ["animatediff_lightning_2step_diffusers.safetensors", 2],
19
+ "4-Step" : ["animatediff_lightning_4step_diffusers.safetensors", 4],
20
+ "8-Step" : ["animatediff_lightning_8step_diffusers.safetensors", 8],
21
+ }
22
+ loaded = None
23
+
24
+ # Ensure model and scheduler are initialized in GPU-enabled function
25
+ if torch.cuda.is_available():
26
+ device = "cuda"
27
+ dtype = torch.float16
28
+ adapter = MotionAdapter().to(device, dtype)
29
+ pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
30
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
31
+ else:
32
+ raise NotImplementedError("No GPU detected!")
33
+
34
+ # Function
35
+ @spaces.GPU(enable_queue=True)
36
+ def generate_image(prompt, ckpt):
37
+ global loaded
38
+ print(prompt, ckpt)
39
+
40
+ checkpoint = checkpoints[ckpt][0]
41
+ num_inference_steps = checkpoints[ckpt][1]
42
+
43
+ if loaded != num_inference_steps:
44
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, checkpoint), device=device), strict=False)
45
+ loaded = num_inference_steps
46
+
47
+ output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=num_inference_steps)
48
+
49
+ name = str(uuid.uuid4()).replace("-", "")
50
+ path = f"/tmp/{name}.mp4"
51
+
52
+ export_to_video(output.frames[0], path, fps=10)
53
+
54
+ return path
55
+
56
+
57
+
58
+ # Gradio Interface
59
+
60
+ with gr.Blocks(css="style.css") as demo:
61
+ gr.HTML("<h1><center>AnimateDiff-Lightning ⚡</center></h1>")
62
+ gr.HTML("<p><center>Lightning-fast text-to-video generation</center></p><p><center><a href='https://huggingface.co/ByteDance/AnimateDiff-Lightning'>https://huggingface.co/ByteDance/AnimateDiff-Lightning</a></center></p>")
63
+ with gr.Group():
64
+ with gr.Row():
65
+ prompt = gr.Textbox(label='Enter your prompt (English)', scale=8)
66
+ ckpt = gr.Dropdown(label='Select inference steps',choices=['1-Step', '2-Step', '4-Step', '8-Step'], value='4-Step', interactive=True)
67
+ submit = gr.Button(scale=1, variant='primary')
68
+ video = gr.Video(label='AnimateDiff-Lightning Generated Image')
69
+
70
+ prompt.submit(
71
+ fn=generate_image,
72
+ inputs=[prompt, ckpt],
73
+ outputs=video,
74
+ )
75
+ submit.click(
76
+ fn=generate_image,
77
+ inputs=[prompt, ckpt],
78
+ outputs=video,
79
+ )
80
+
81
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ gradio
4
+ torch
5
+ transformers
style.css ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .gradio-container {
2
+ max-width: 690px !important;
3
+ }