"""

# 注意根据你自己cuda 来
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
pip install diffusers diffsynth modelscope[framework] Pillow opencv-python moviepy requests numpy psutil
"""


import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import requests
from io import BytesIO
import psutil
from moviepy import ImageSequenceClip


def setup(rank, world_size, master_addr, master_port):
    os.environ['MASTER_ADDR'] = master_addr
    os.environ['MASTER_PORT'] = master_port
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)


def cleanup():
    dist.destroy_process_group()


def print_resource_usage():
    if torch.cuda.is_available():
        print(f"GPU Memory - Allocated: {torch.cuda.memory_allocated() / 1024 ** 2:.2f}MB, "
              f"Cached: {torch.cuda.memory_reserved() / 1024 ** 2:.2f}MB")
    print(f"CPU Usage: {psutil.cpu_percent()}%")


def generate_video(rank, world_size, master_addr, master_port):
    setup(rank, world_size, master_addr, master_port)

    url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg?for=bg"
    response = requests.get(url)
    init_image = Image.open(BytesIO(response.content)).convert("RGB")
    print('loading image...')
    print("Initial resource usage:")
    print_resource_usage()

    pipeline = StableVideoDiffusionPipeline.from_pretrained(
        "stabilityai/stable-video-diffusion-img2vid-xt",
        torch_dtype=torch.float16,
        cache_dir='models'
    )

    model = pipeline.unet
    model = model.to(rank)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])

    pipeline.unet = model

    print("\nAfter model loading:")
    print_resource_usage()

    print('generating video...')
    try:
        print("\nBefore video generation:")
        print_resource_usage()
        video_frames = pipeline(init_image, num_frames=8).frames
        torch.cuda.empty_cache()
        print("\nAfter video generation:")
        print_resource_usage()
    finally:
        print('saving video...')

    cleanup()

    return video_frames


if __name__ == "__main__":
    world_size = 2
    master_addr = "192.168.12.251"
    master_port = "29500"

    processes = []
    mp.set_start_method("spawn")
    frame_lists = []
    for rank in range(world_size):
        p = mp.Process(target=generate_video, args=(rank, world_size, master_addr, master_port))
        p.start()
        processes.append(p)

    for p in processes:
        frame_lists.append(p.join())

    all_frames = []
    for frames in frame_lists:
        if frames:
            all_frames.extend(frames)

    # 仅在主节点保存视频
    if dist.get_rank() == 0:
        clip = ImageSequenceClip([frame for frame in all_frames], fps=4)
        clip.write_videofile("videos/output.mp4", codec="libx264")









