# machine_a.py
import torch
from diffusers import StableVideoDiffusionPipeline, AutoencoderKL
from transformers import CLIPTextModel

from config import *
import requests
from PIL import Image
from io import BytesIO
import torch.distributed as dist

# 初始化分布式环境
dist.init_process_group(
    backend="gloo",
    init_method=f"tcp://{MASTER_IP}:{MASTER_PORT}",
    world_size=WORLD_SIZE,
    rank=NODE_RANK
)

# 加载分配给本机的模型组件
text_encoder = CLIPTextModel.from_pretrained(MODEL_PATH, subfolder="text_encoder", cache_dir=CACHE_DIR).to("cuda:0")
vae_decoder = AutoencoderKL.from_pretrained(MODEL_PATH, subfolder="vae", cache_dir=CACHE_DIR).to("cuda:0")

# 加载初始图像（仅主节点加载）
if NODE_RANK == 0:
    url = "https://pic.rmb.bdstatic.com/bjh/other/7132183c3a3973c03796a833e857df67.jpeg"
    response = requests.get(url)
    init_image = Image.open(BytesIO(response.content)).convert("RGB")
    # 广播图像到节点1
    dist.broadcast_object_list([init_image], src=0)
else:
    init_image = dist.broadcast_object_list([None], src=0)[0]


# 接收U-Net输出的去噪特征
def receive_denoised_features():
    features = [torch.empty((1, 4, 64, 64), device="cuda:0")]
    dist.recv(features[0], src=1)
    return features[0]


# 生成视频帧（简化版）
for _ in range(8):
    # 文本编码（节点0处理）
    text_embeds = text_encoder(torch.tensor([1, 2, 3])).last_hidden_state  # 示例输入
    # 发送文本嵌入到节点1
    dist.send(text_embeds, dst=1)

    # 接收U-Net处理后的特征
    denoised_features = receive_denoised_features()

    # VAE解码生成图像
    image = vae_decoder(denoised_features).sample

# 清理资源
dist.destroy_process_group()