import torch
import torch.distributed as dist
from torch.multiprocessing import Process
from util.resource_monitor import print_resource_usage
from distributed_worker import generate_frames  # 注意函数名变更
# 在导入部分添加绝对路径处理（解决模块查找问题）
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# 修改后的函数调用部分
#from distributed_worker import parallel_generate  # 注意函数名变更

def run(rank, world_size, init_image_url):
    # 初始化进程组
    dist.init_process_group(
        backend='gloo',
        init_method='tcp://192.168.1.100:12355',  # 主节点IP:端口
        rank=rank,
        world_size=world_size
    )

    # 加载模型和图像（仅主节点执行）
    if rank == 0:
        from PIL import Image
        import requests
        from io import BytesIO

        response = requests.get(init_image_url)
        init_image = Image.open(BytesIO(response.content)).convert("RGB")
        print_resource_usage(rank)
    else:
        init_image = None

    # 广播图像数据
    image_tensor = torch.tensor(bytearray(init_image.tobytes()) if rank == 0 else torch.empty(921600, dtype=torch.uint8)
    dist.broadcast(image_tensor, 0)

    # 分布式生成视频
    from distributed_worker import generate_frames
    local_frames = generate_frames(image_tensor, rank, world_size)

    # 收集结果
    gathered_frames = [None] * world_size
    dist.all_gather_object(gathered_frames, local_frames)

    # 主节点保存最终视频
    if rank == 0:
        from moviepy.editor import ImageSequenceClip
    all_frames = [frame for sublist in gathered_frames for frame in sublist]
    clip = ImageSequenceClip(all_frames, fps=4)
    clip.write_videofile("output.mp4", codec="libx264")

if __name__ == "__main__":
    world_size = 3  # 总节点数（包含主节点）
Process(target=run, args=(0, world_size, "YOUR_IMAGE_URL")).start()
# 启动其他节点时需要手动运行此文件并指定rank