import torch
from diffusers import StableVideoDiffusionPipeline
from util.resource_monitor import print_resource_usage


def generate_frames(image_tensor, rank, world_size):
    # 初始化模型
    pipeline = StableVideoDiffusionPipeline.from_pretrained(
        "stabilityai/stable-video-diffusion-img2vid-xt",
        torch_dtype=torch.float32,
        cache_dir='models'
    ).to("cuda")

    # 将字节数据转换为PIL图像
    from PIL import Image
    image = Image.frombytes('RGB', (640, 480), image_tensor.numpy().tobytes())

    # 分配帧生成任务
    total_frames = 8
    frames_per_node = total_frames // world_size
    start = rank * frames_per_node
    end = (rank + 1) * frames_per_node if rank != world_size - 1 else total_frames

    # 生成指定帧范围
    print_resource_usage(rank)
    video_frames = pipeline(image, num_frames=end - start).frames[start:end]
    torch.cuda.empty_cache()

    return video_frames