patrickvonplaten commited on
Commit
941d256
1 Parent(s): 22b5903

finish test setup

Browse files
Files changed (1) hide show
  1. run_video.py +11 -15
run_video.py CHANGED
@@ -1,31 +1,27 @@
1
  #!/usr/bin/env python3
2
  import torch
3
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, TextToVideoSDPipeline
4
- import numpy as np
5
- import gc
6
  from diffusers.utils import export_to_video
7
  from PIL import Image
8
 
9
- pipe = TextToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
 
 
 
10
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
11
  pipe.enable_model_cpu_offload()
12
  pipe.enable_vae_slicing()
13
 
14
- prompt = "spiderman running in the desert"
15
- video_frames = pipe(prompt, num_inference_steps=2, height=576, width=1024, num_frames=24).frames
16
- # video_path = export_to_video(video_frames, output_video_path="/home/patrick/videos/video_576_spiderman_24.mp4")
17
- pipe.to("cpu")
18
- import ipdb; ipdb.set_trace()
19
-
20
- del pipe
21
- gc.collect()
22
- torch.cuda.empty_cache()
23
 
 
24
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
25
  pipe.enable_model_cpu_offload()
26
  pipe.enable_vae_slicing()
27
 
28
  video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
29
 
30
- video_frames = pipe(prompt, video=video, num_inference_steps=2, strength=0.6).frames
31
- video_path = export_to_video(video_frames, output_video_path="/home/patrick/videos/video_1024_spiderman_24.mp4")
 
1
  #!/usr/bin/env python3
2
  import torch
3
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
 
 
4
  from diffusers.utils import export_to_video
5
  from PIL import Image
6
 
7
+ # Make sure CUDA has < 13GB VRAM
8
+ torch.cuda.set_per_process_memory_fraction(0.5)
9
+
10
+ pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
11
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
12
  pipe.enable_model_cpu_offload()
13
  pipe.enable_vae_slicing()
14
 
15
+ prompt = "Darth Vader is surfing on waves"
16
+ video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=36).frames
17
+ video_path = export_to_video(video_frames, output_video_path="/home/patrick/videos/video_576_darth_vader_36.mp4")
 
 
 
 
 
 
18
 
19
+ pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
20
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
21
  pipe.enable_model_cpu_offload()
22
  pipe.enable_vae_slicing()
23
 
24
  video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
25
 
26
+ video_frames = pipe(prompt, video=video, strength=0.6).frames
27
+ video_path = export_to_video(video_frames, output_video_path="/home/patrick/videos/video_1024_darth_vader_36.mp4")