import torch
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import requests
from io import BytesIO
import psutil
import ray

#  pip install moviepy
"""
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install diffusers diffsynth modelscope[framework] Pillow opencv-python moviepy requests numpy peft safetensors
conda install pywin32 ray
"""
from moviepy import ImageSequenceClip


def print_resource_usage():
    # GPU内存使用
    if torch.cuda.is_available():
        print(f"GPU Memory - Allocated: {torch.cuda.memory_allocated() / 1024 ** 2:.2f}MB, "
              f"Cached: {torch.cuda.memory_reserved() / 1024 ** 2:.2f}MB")
    # CPU使用率
    print(f"CPU Usage: {psutil.cpu_percent()}%")


@ray.remote
def generate_video():
    # 加载初始图像
    url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg?for=bg"  # 替换为实际图像URL
    response = requests.get(url)
    init_image = Image.open(BytesIO(response.content)).convert("RGB")
    print('loading image...')
    print("Initial resource usage:")
    print_resource_usage()

    pipeline = StableVideoDiffusionPipeline.from_pretrained(
        "stabilityai/stable-video-diffusion-img2vid-xt",
        # device="cpu",
        torch_dtype=torch.float32,  # 降低精度要求
        cache_dir='models')

    print("\nAfter model loading:")
    print_resource_usage()

    # 启用CPU卸载
    pipeline.enable_model_cpu_offload()
    pipeline.to('cuda')

    print('generating video...')
    try:
        print("\nBefore video generation:")
        print_resource_usage()
        video_frames = pipeline(init_image, num_frames=8).frames
        # 清理GPU缓存
        torch.cuda.empty_cache()
        print("\nAfter video generation:")
        print_resource_usage()
    finally:
        print('saving video...')
    # 使用moviepy将帧保存为视频
    clip = ImageSequenceClip([frame for frame in video_frames], fps=4)  # 设置帧率为4fps
    clip.write_videofile("videos/output.mp4", codec="libx264")


if __name__ == "__main__":
    # 初始化 Ray，连接到集群
    ray.init(address='auto')
    # ray.init(address='ray://192.168.12.251:8877')
    # 启动远程任务
    generate_video.remote()
