NagaSaiAbhinay commited on
Commit
50f6abe
1 Parent(s): a58e206

Fix code samples

Browse files
Files changed (1) hide show
  1. README.md +35 -3
README.md CHANGED
@@ -27,16 +27,48 @@ Test prompt: "A princess playing a guitar, modern disney style"
27
 
28
  ## Usage
29
 
 
30
  ```python
31
  import torch
32
- from diffusers import DiffusionPipeline, DDIMScheduler
33
  from diffusers.utils import export_to_video
34
  from PIL import Image
35
 
36
-
37
  pretrained_model_path = "nitrosocke/mo-di-diffusion"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- pipe = TuneAVideoPipeline.from_pretrained(
40
  "Tune-A-Video-library/df-cpt-mo-di-bear-guitar", torch_dtype=torch.float16
41
  ).to("cuda")
42
 
 
27
 
28
  ## Usage
29
 
30
+ ### Loading with a pre-existing Text2Image checkpoint
31
  ```python
32
  import torch
33
+ from diffusers import TuneAVideoPipeline, DDIMScheduler, UNet3DConditionModel
34
  from diffusers.utils import export_to_video
35
  from PIL import Image
36
 
37
+ # Use any pretrained Text2Image checkpoint based on stable diffusion
38
  pretrained_model_path = "nitrosocke/mo-di-diffusion"
39
+ unet = UNet3DConditionModel.from_pretrained(
40
+ "Tune-A-Video-library/df-cpt-mo-di-bear-guitar", subfolder="unet", torch_dtype=torch.float16
41
+ ).to("cuda")
42
+
43
+ pipe = TuneAVideoPipeline.from_pretrained(pretrained_model_path, unet=unet, torch_dtype=torch.float16).to("cuda")
44
+
45
+ prompt = "A princess playing a guitar, modern disney style"
46
+ generator = torch.Generator(device="cuda").manual_seed(42)
47
+
48
+ video_frames = pipe(prompt, video_length=3, generator=generator, num_inference_steps=50, output_type="np").frames
49
+
50
+ # Saving to gif.
51
+ pil_frames = [Image.fromarray(frame) for frame in video_frames]
52
+ duration = len(pil_frames) / 8
53
+ pil_frames[0].save(
54
+ "animation.gif",
55
+ save_all=True,
56
+ append_images=pil_frames[1:], # append rest of the images
57
+ duration=duration * 1000, # in milliseconds
58
+ loop=0,
59
+ )
60
+
61
+ # Saving to video
62
+ video_path = export_to_video(video_frames)
63
+ ```
64
+ ### Loading a saved Tune-A-Video checkpoint
65
+ ```python
66
+ import torch
67
+ from diffusers import DiffusionPipeline, DDIMScheduler
68
+ from diffusers.utils import export_to_video
69
+ from PIL import Image
70
 
71
+ pipe = DiffusionPipeline.from_pretrained(
72
  "Tune-A-Video-library/df-cpt-mo-di-bear-guitar", torch_dtype=torch.float16
73
  ).to("cuda")
74