import torch
import torch.distributed as dist
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import requests
from io import BytesIO
import psutil
from moviepy import ImageSequenceClip
import os


# 分布式环境初始化
def init_ddp():
    dist.init_process_group(
        backend='gloo',  # Windows推荐使用gloo后端
        init_method='tcp://192.168.12.251:29500',  # 主节点IP和端口
        world_size=int(os.environ.get('WORLD_SIZE', 1)),
        rank=int(os.environ.get('RANK', 0))
    )


# 分布式视频生成函数
@torch.no_grad()
def generate_video_distributed(init_image, prompts, num_frames, device):
    # 主进程加载模型并广播
    if dist.get_rank() == 0:
        pipeline = StableVideoDiffusionPipeline.from_pretrained(
            "stabilityai/stable-video-diffusion-img2vid-xt",
            device=device,
            torch_dtype=torch.float16,
            cache_dir='models'
        ).to(device)
        # 启用显存优化
        pipeline.enable_xformers_memory_efficient_attention()
        pipeline.unet.enable_gradient_checkpointing()
        dist.broadcast_object_list([pipeline], src=0)
    else:
        pipeline = dist.broadcast_object_list([None], src=0)[0]

    # 分配任务（支持动态调整）
    local_indices = list(range(dist.get_rank(), num_frames, dist.get_world_size()))

    # 生成提示词嵌入（仅主进程计算）
    if dist.get_rank() == 0:
        prompt_embeds = pipeline.text_encoder(
            pipeline.tokenizer(prompts, return_tensors="pt").input_ids.to(device)
        )[0]
        dist.broadcast_object_list([prompt_embeds], src=0)
    else:
        prompt_embeds = dist.broadcast_object_list([None], src=0)[0]

    # 生成视频帧
    local_frames = []
    for idx in local_indices:
        with torch.cuda.amp.autocast():
            frame = pipeline(
                init_image,
                prompt_embeds=prompt_embeds[idx:idx + 1],
                num_inference_steps=50,
                guidance_scale=7.5
            ).frames[0]
        local_frames.append(frame)

    # 收集所有帧到主进程
    all_frames = [[] for _ in range(dist.get_world_size())]
    dist.all_gather_object(all_frames[dist.get_rank()], local_frames)

    if dist.get_rank() == 0:
        return [frame for chunk in all_frames for frame in chunk]
    return None


# 主函数
def main():
    # 初始化分布式环境
    init_ddp()
    device = torch.device(f'cuda:{dist.get_rank()}')

    # 主进程加载初始图像和提示词
    if dist.get_rank() == 0:
        # 加载初始图像
        url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg"
        response = requests.get(url)
        init_image = Image.open(BytesIO(response.content)).convert("RGB")

        # 生成动态提示词示例
        base_prompt = "a beautiful sunset over the cat with it crying"
        dynamic_prompts = [
            f"{base_prompt} {step}% motion blur"
            for step in range(0, 100, 100 // 8)
        ]
        prompts = dynamic_prompts
    else:
        init_image = None
        prompts = None

    # 广播图像和提示词
    init_image = dist.broadcast_object_list([init_image], src=0)[0]
    prompts = dist.broadcast_object_list([prompts], src=0)[0]

    # 生成视频帧
    video_frames = generate_video_distributed(init_image, prompts, len(prompts), device)

    # 主进程保存结果
    if dist.get_rank() == 0:
        clip = ImageSequenceClip(video_frames, fps=4)
        clip.write_videofile("output.mp4")
        print("Video generated successfully!")

    # 清理资源
    dist.destroy_process_group()


if __name__ == "__main__":
    main()