dn6 HF staff commited on
Commit
b449606
1 Parent(s): 5e54afb
README.md CHANGED
@@ -7,4 +7,33 @@ pipeline_tag: text-to-video
7
 
8
  For more details, please refer to our [[paper](https://arxiv.org/abs/2402.00769)] | [[code](https://github.com/G-U-N/AnimateLCM)] | [[proj-page](https://animatelcm.github.io/)] | [[civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation)].
9
 
10
- <video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/63e9e92f20c109718713f5eb/KCwSoZCdxkkmtDg1LuXsP.mp4"></video>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  For more details, please refer to our [[paper](https://arxiv.org/abs/2402.00769)] | [[code](https://github.com/G-U-N/AnimateLCM)] | [[proj-page](https://animatelcm.github.io/)] | [[civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation)].
9
 
10
+ <video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/63e9e92f20c109718713f5eb/KCwSoZCdxkkmtDg1LuXsP.mp4"></video>
11
+
12
+ ## Using AnimateLCM with Diffusers
13
+
14
+ ```python
15
+ import torch
16
+ from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
17
+ from diffusers.utils import export_to_gif
18
+
19
+ adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM", torch_dtype=torch.float16)
20
+ pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
21
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")
22
+
23
+ pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="sd15_lora_beta.safetensors", adapter_name="lcm-lora")
24
+ pipe.set_adapters(["lcm-lora"], [0.8])
25
+
26
+ pipe.enable_vae_slicing()
27
+ pipe.enable_model_cpu_offload()
28
+
29
+ output = pipe(
30
+ prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution",
31
+ negative_prompt="bad quality, worse quality, low resolution",
32
+ num_frames=16,
33
+ guidance_scale=2.0,
34
+ num_inference_steps=6,
35
+ generator=torch.Generator("cpu").manual_seed(0),
36
+ )
37
+ frames = output.frames[0]
38
+ export_to_gif(frames, "animatelcm.gif")
39
+ ```
config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "MotionAdapter",
3
+ "_diffusers_version": "0.27.0.dev0",
4
+ "block_out_channels": [
5
+ 320,
6
+ 640,
7
+ 1280,
8
+ 1280
9
+ ],
10
+ "conv_in_channels": null,
11
+ "motion_layers_per_block": 2,
12
+ "motion_max_seq_length": 32,
13
+ "motion_mid_block_layers_per_block": 1,
14
+ "motion_norm_num_groups": 32,
15
+ "motion_num_attention_heads": 8,
16
+ "use_motion_mid_block": true
17
+ }
diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1997997a982432f5fb79d1d002259ef37ac3212db9f0361f00b104153e68cd6a
3
+ size 907702280
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb08053b37ee27d69dc9bfa55bc526d998d9f13211f9e21509f975a9d4ef4803
3
+ size 1815329816