
"""
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
pip install diffusers diffsynth modelscope[framework] Pillow opencv-python moviepy requests numpy peft safetensors
conda install pywin32
pip install psutil deepspeed
------deepspeed安装失败(window环境下)
pip install py-cpuinfo
set DS_BUILD_OPS=0  # 禁用需要 Linux 工具链的自定义操作
set DS_BUILD_AIO=0  # 禁用异步 IO 操作（避免依赖 Linux 命令）
"""

import os
import torch
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import requests
from io import BytesIO
import psutil
from moviepy import ImageSequenceClip


def init_distributed():
    # 初始化 DeepSpeed 分布式环境
    local_rank = int(os.getenv('LOCAL_RANK', '0'))
    torch.cuda.set_device(local_rank)
    return local_rank


def print_resource_usage():
    # GPU 内存使用
    if torch.cuda.is_available():
        print(f"GPU Memory - Allocated: {torch.cuda.memory_allocated() / 1024 ** 2:.2f}MB, "
              f"Cached: {torch.cuda.memory_reserved() / 1024 ** 2:.2f}MB")
    # CPU 使用率
    print(f"CPU Usage: {psutil.cpu_percent()}%")


def generate_video(local_rank):
    # 加载初始图像
    url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg?for=bg"
    response = requests.get(url)
    init_image = Image.open(BytesIO(response.content)).convert("RGB")
    print('loading image...')
    print("Initial resource usage:")
    print_resource_usage()

    # 加载模型
    pipeline = StableVideoDiffusionPipeline.from_pretrained(
        "stabilityai/stable-video-diffusion-img2vid-xt",
        torch_dtype=torch.float16,  # 使用混合精度以减少显存占用
        cache_dir='models'
    )

    # 获取模型的 UNet 部分（假设这是主要的计算模块）
    model = pipeline.unet
    model = model.to(local_rank)

    # DeepSpeed 配置
    ds_config = {
        "fp16": {
            "enabled": True,
            "initial_scale_power": 16
        },
        "optimizer": {
            "type": "Adam",
            "params": {
                "lr": 1e-4
            }
        },
        "model_parallel_size": 1,
        "zero_optimization": {
            "stage": 2,
            "offload_optimizer": {
                "device": "cpu",
                "pin_memory": True
            },
            "allgather_partitions": True,
            "allgather_bucket_size": 2e8,
            "overlap_comm": True,
            "reduce_scatter": True,
            "reduce_bucket_size": 2e8
        },
        "steps_per_print": 2000,
        "train_batch_size": 1,
        "train_micro_batch_size_per_gpu": 1
    }

    # 初始化 DeepSpeed
    model, _, _, _ = deepspeed.initialize(
        args=None,
        model=model,
        model_parameters=model.parameters(),
        config=ds_config
    )

    pipeline.unet = model

    print("\nAfter model loading:")
    print_resource_usage()

    print('generating video...')
    try:
        print("\nBefore video generation:")
        print_resource_usage()
        video_frames = pipeline(init_image, num_frames=8).frames
        # 清理 GPU 缓存
        torch.cuda.empty_cache()
        print("\nAfter video generation:")
        print_resource_usage()
    finally:
        print('saving video...')

    return video_frames


if __name__ == "__main__":
    local_rank = init_distributed()
    video_frames = generate_video(local_rank)

    # 仅在主节点（local_rank == 0）保存视频
    if local_rank == 0:
        clip = ImageSequenceClip([frame for frame in video_frames], fps=4)
        clip.write_videofile("videos/output.mp4", codec="libx264")