# machine_b.py
import torch
from diffusers import UNet2DConditionModel
from config import *
import torch.distributed as dist

# 初始化分布式环境
dist.init_process_group(
    backend="gloo",
    init_method=f"tcp://{MASTER_IP}:{MASTER_PORT}",
    world_size=WORLD_SIZE,
    rank=NODE_RANK
)

# 加载分配给本机的U-Net
unet = UNet2DConditionModel.from_pretrained(
    "stabilityai/stable-video-diffusion-img2vid-xt",
    subfolder="unet",
    cache_dir="models"
).to("cuda:0")

# 接收文本嵌入和图像特征
while True:
    # 接收文本嵌入（来自节点0）
    text_embeds = torch.empty((1, 77, 1024), device="cuda:0")
    dist.recv(text_embeds, src=0)

    # 接收图像特征（示例输入）
    image_features = torch.empty((1, 4, 64, 64), device="cuda:0")
    dist.recv(image_features, src=0)

    # U-Net去噪处理
    output = unet(image_features, 0, text_embeds).sample

    # 发送去噪特征到节点0
    dist.send(output, dst=0)