import torch
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import requests
from io import BytesIO
import psutil
#  pip install moviepy
from moviepy import ImageSequenceClip

"""
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install diffusers diffsynth modelscope[framework] Pillow opencv-python moviepy requests numpy peft safetensors
conda install pywin32
"""




def print_resource_usage():
    # GPU内存使用
    if torch.cuda.is_available():
        print(f"GPU Memory - Allocated: {torch.cuda.memory_allocated()/1024**2:.2f}MB, "
              f"Cached: {torch.cuda.memory_reserved()/1024**2:.2f}MB")
    # CPU使用率
    print(f"CPU Usage: {psutil.cpu_percent()}%")
# 加载初始图像
url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg?for=bg"  # 替换为实际图像URL
response = requests.get(url)
init_image = Image.open(BytesIO(response.content)).convert("RGB")
print('loading image...')
print("Initial resource usage:")
print_resource_usage()

pipeline = StableVideoDiffusionPipeline.from_pretrained(
    "stabilityai/stable-video-diffusion-img2vid-xt",
    device="cuda",
    torch_dtype=torch.float32,  # 降低精度要求
    cache_dir='models')

print("\nAfter model loading:")
print_resource_usage()

print('generating video...')
try:
    print("\nBefore video generation:")
    print_resource_usage()
    video_frames = pipeline(init_image, num_frames=8).frames
    # 清理GPU缓存
    torch.cuda.empty_cache()
    print("\nAfter video generation:")
    print_resource_usage()
finally:
    print('saving video...')
# 使用moviepy将帧保存为视频
clip = ImageSequenceClip([frame for frame in video_frames], fps=4)  # 设置帧率为4fps
clip.write_videofile("videos/output.mp4", codec="libx264")