Update README.md
Browse files
README.md
CHANGED
@@ -20,9 +20,9 @@ nf4:
|
|
20 |
|
21 |
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/630406731dd5d3c62486140a/gVMUZNcAPxGSoiPRximAN.mp4"></video>
|
22 |
|
23 |
-
**Diffusers
|
24 |
|
25 |
-
`pip install git+https://github.com/huggingface/diffusers
|
26 |
|
27 |
To use:
|
28 |
|
@@ -30,7 +30,7 @@ To use:
|
|
30 |
from diffusers import MochiPipeline, MochiTransformer3DModel
|
31 |
from diffusers.utils import export_to_video
|
32 |
transformer = MochiTransformer3DModel.from_pretrained("imnotednamode/mochi-1-preview-mix-nf4-small", torch_dtype=torch.float16)
|
33 |
-
pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", torch_dtype=torch.float16, transformer=transformer)
|
34 |
pipe.enable_model_cpu_offload()
|
35 |
pipe.enable_vae_tiling()
|
36 |
frames = pipe("A camera follows a squirrel running around on a tree branch", num_inference_steps=100, guidance_scale=4.5, height=480, width=848, num_frames=161).frames[0]
|
@@ -45,6 +45,6 @@ from diffusers import MochiPipeline, MochiTransformer3DModel, BitsAndBytesConfig
|
|
45 |
import torch
|
46 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=["final_layer", "x_embedder.proj", "t_embedder", "pos_frequencies", "t5"])
|
47 |
# Please convert mochi to diffusers first
|
48 |
-
transformer = MochiTransformer3DModel.from_pretrained("genmo/mochi-1-preview",
|
49 |
transformer.save_pretrained("mochi-1-preview-nf4")
|
50 |
```
|
|
|
20 |
|
21 |
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/630406731dd5d3c62486140a/gVMUZNcAPxGSoiPRximAN.mp4"></video>
|
22 |
|
23 |
+
**Diffusers main branch is required**
|
24 |
|
25 |
+
`pip install git+https://github.com/huggingface/diffusers`
|
26 |
|
27 |
To use:
|
28 |
|
|
|
30 |
from diffusers import MochiPipeline, MochiTransformer3DModel
|
31 |
from diffusers.utils import export_to_video
|
32 |
transformer = MochiTransformer3DModel.from_pretrained("imnotednamode/mochi-1-preview-mix-nf4-small", torch_dtype=torch.float16)
|
33 |
+
pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", revision="refs/pr/18", torch_dtype=torch.float16, transformer=transformer)
|
34 |
pipe.enable_model_cpu_offload()
|
35 |
pipe.enable_vae_tiling()
|
36 |
frames = pipe("A camera follows a squirrel running around on a tree branch", num_inference_steps=100, guidance_scale=4.5, height=480, width=848, num_frames=161).frames[0]
|
|
|
45 |
import torch
|
46 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=["final_layer", "x_embedder.proj", "t_embedder", "pos_frequencies", "t5"])
|
47 |
# Please convert mochi to diffusers first
|
48 |
+
transformer = MochiTransformer3DModel.from_pretrained("genmo/mochi-1-preview", revision="refs/pr/18", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.float16)
|
49 |
transformer.save_pretrained("mochi-1-preview-nf4")
|
50 |
```
|