from diffusers import StableDiffusionControlNetPipeline, ControlNetModel,UniPCMultistepScheduler
import torch
from PIL import Image
from controlnet_inpainting_dataset import read_image_url
import random
iters = [
    198000
]
for iter in iters:
    controlnet = ControlNetModel.from_pretrained(f"/mnt/afs2d/luotianhang/smartvehicle_diffusion/diffusers/examples/controlnet/controlnet_experiment_text/checkpoint-{iter}/controlnet")
    pipeline = StableDiffusionControlNetPipeline.from_pretrained(
        '/mnt/afs2d/luotianhang/cache/PretrainedModels/stable-diffusion-v1-5/stable-diffusion-v1-5',controlnet = controlnet
    ).to("cuda")
    pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
    pipeline.safety_checker = None
    for i in range(10):
        image = read_image_url('sdc_ytd_v2:s3://ytd-bucket-v2/data/aigc/controlnet_data/controlnet_pose_mask_img/alps/crop_146_419_2446_1952/10899.png')
        image = image.resize((640,360))
        image.save('source_image.png')
        res_image = pipeline(
            prompt='1 adult sit in car.',
            image = image,
            num_inference_steps = 20,
            generator = torch.Generator(device='cuda').manual_seed(random.randint(a=0,b=99999)),
        ).images[0]
        new_image = Image.new('RGB',(640*2,360))
        new_image.paste(image,(0,0))
        new_image.paste(res_image,(640,0))
        new_image.save(f'./res_{iter}_{i}.png')
print(f'image saved res.png')