from diffusers import ControlNetModel,UniPCMultistepScheduler
from inpainting_infer_pipeline import StableDiffusionControlNetInpaintPipeline
import torch
from PIL import Image
import random
import cv2
import numpy as np
from aoss_client.client import Client
conf_path = '/mnt/afs2d/luotianhang/aoss.conf'
client = Client(conf_path)
def read_image_url(url):
    if 's3://' in url:
        image_bytes = client.get(url)
        image_mem_view = memoryview(image_bytes)
        image_array = np.frombuffer(image_mem_view,np.uint8)
        image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
        image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image_np).convert('RGB')
    else:
        image = Image.open(url).convert('RGB')
    return image
   


def make_mask_image(loc_x1y1x2y2,rwidth,rheight,twidth,theight):
    x1 = loc_x1y1x2y2[0]
    y1 = loc_x1y1x2y2[1]
    x2 = loc_x1y1x2y2[2]
    y2 = loc_x1y1x2y2[3]

    x1 = int(x1/rwidth*twidth)
    y1 = int(y1/rheight*theight)
    x2 = int(x2/rwidth*twidth)
    y2 = int(y2/rheight*theight)

    mask = np.zeros((theight,twidth),dtype=np.uint8)
    mask[int(y1):int(y2),int(x1):int(x2)] = 255
    return Image.fromarray(mask).convert('L')



iters = [
    250000
]

mem = {
    "target_image": "aoss_oms:s3://cabin-oms/sh38/mnt/lustrenew/xiyunlong/data/pic/NIO/alps/train/202405/crop_146_419_2446_1952/2808_alps_child_caiji_240418/eadingLight_oms_camera/scene_8/00000006/120837_1138000006_30_2_0/frame_1760.png", 
    "source_image": "sdc_ytd_v2:s3://ytd-bucket-v2/data/aigc/controlnet_data/controlnet_pose_mask_img/alps/crop_146_419_2446_1952/8419.png", 
    "text": "1 adult sit in car.", 
    "child": [], 
    "adult": [[360, 96, 551, 422]]
    }


def prepare_mask_and_masked_image(image, mask):
    image = np.array(image.convert("RGB"))
    image = image[None].transpose(0, 3, 1, 2)
    image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

    mask = np.array(mask.convert("L"))
    mask = mask.astype(np.float32) / 255.0
    mask = mask[None, None]
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    mask = torch.from_numpy(mask)

    masked_image = image * (mask < 0.5)

    return mask, masked_image


for iter in iters:
    controlnet = ControlNetModel.from_pretrained(
        # f"/mnt/afs2d/luotianhang/smartvehicle_diffusion/diffusers/examples/controlnet/controlnet_experiment_inpainting/checkpoint-{iter}/controlnet"
        f'/mnt/afs2d/luotianhang/smartvehicle_diffusion/diffusers/examples/controlnet/controlnet_experiment_inpainting/checkpoint-{iter}/controlnet'
        )
    pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
        '/mnt/afs2d/luotianhang/cache/PretrainedModels/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5',controlnet = controlnet
    ).to("cuda")
    pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
    pipeline.safety_checker = None


    target_image = Image.open('/mnt/afs2d01/luotianhang/diffusion_data/process/data_inpainting/inpainting_material/empty_cart_sort/a19/empty_1/frame_15.jpg')
    
    source_image = read_image_url(mem['source_image'])
    adult = mem['adult']
    loc_x1y1x2y2 = adult[0]
    mask_image = make_mask_image(loc_x1y1x2y2,source_image.width,source_image.height,640,360)
    target_image = target_image.resize((640,360))
    source_image = source_image.resize((640,360))
    if random.random()>0.5:
        mask_image = mask_image.transpose(Image.FLIP_LEFT_RIGHT)
        target_image = target_image.transpose(Image.FLIP_LEFT_RIGHT)
        source_image = source_image.transpose(Image.FLIP_LEFT_RIGHT)

    # mask_image,masked_image = prepare_mask_and_masked_image(target_image,mask_image)
    for i in range(10):
        res_image = pipeline(
            height=360,
            width=640,
            prompt=random.choice(['adult','child']),
            num_inference_steps=50,
            image = target_image,
            mask_image = mask_image,
            control_image = source_image,
            generator = torch.Generator(device='cuda').manual_seed(random.randint(a=0,b=99999)),
            eta=1.0,
        ).images[0]

        # new_image = Image.new('RGB',(640*4,360))
        # new_image.paste(source_image,(0,0))
        # new_image.paste(mask_image,(640,0))
        # new_image.paste(target_image,(640*2,0))
        # new_image.paste(res_image,(640*3,0))
        res_image.save(f'./res_image_{i}.png')
print('image saved res.png')