Add diffusers inference code
#1
by
multimodalart
HF staff
- opened
README.md
CHANGED
@@ -52,6 +52,42 @@ You should use `flat color` to trigger the image generation.
|
|
52 |
|
53 |
You should use `no lineart` to trigger the image generation.
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
## Download model
|
57 |
|
|
|
52 |
|
53 |
You should use `no lineart` to trigger the image generation.
|
54 |
|
55 |
+
## Using with Diffusers
|
56 |
+
```py
|
57 |
+
pip install git+https://github.com/huggingface/diffusers.git
|
58 |
+
```
|
59 |
+
|
60 |
+
```py
|
61 |
+
import torch
|
62 |
+
from diffusers.utils import export_to_video
|
63 |
+
from diffusers import AutoencoderKLWan, WanPipeline
|
64 |
+
from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
|
65 |
+
|
66 |
+
# Available models: Wan-AI/Wan2.1-T2V-14B-Diffusers, Wan-AI/Wan2.1-T2V-1.3B-Diffusers
|
67 |
+
model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
68 |
+
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
69 |
+
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
|
70 |
+
flow_shift = 5.0 # 5.0 for 720P, 3.0 for 480P
|
71 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
|
72 |
+
pipe.to("cuda")
|
73 |
+
|
74 |
+
pipe.load_lora_weights("motimalu/wan-flat-color-v2")
|
75 |
+
|
76 |
+
pipe.enable_model_cpu_offload() #for low-vram environments
|
77 |
+
|
78 |
+
prompt = "A cat wandering around new york city"
|
79 |
+
negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
|
80 |
+
|
81 |
+
output = pipe(
|
82 |
+
prompt=prompt,
|
83 |
+
negative_prompt=negative_prompt,
|
84 |
+
height=480,
|
85 |
+
width=720,
|
86 |
+
num_frames=81,
|
87 |
+
guidance_scale=5.0,
|
88 |
+
).frames[0]
|
89 |
+
export_to_video(output, "output.mp4", fps=16)
|
90 |
+
```
|
91 |
|
92 |
## Download model
|
93 |
|