'''
20250717 
局部迁移 
base Depth
[ noise , depth , masked , mask ]

输入 
    c1 
    c1 mask
    > c1 depth
    c2 
    > c2 emb
ckp
    /mnt/nas/shengjie/depth_local_output_20250714/
'''
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from diffusers import FluxPriorReduxPipeline,ReduxImageEncoder
import torch,pdb
from PIL import Image,ImageOps
from image_gen_aux import DepthPreprocessor

from pipeline_control_fill import FluxControlPipeline
from util_for_os import ose,osj
from util_flux import process_img_1024,horizontal_concat_images

ckp_id = 7000
dtype = torch.bfloat16
device = 'cuda'

ckp_dir = 'depth_local_output_20250714'
get_local_embeder_path = lambda ckp_id : f'/mnt/nas/shengjie/{ckp_dir}/checkpoint-{ckp_id}/image_encoder_1.bin'
get_dpth_embeder_path = lambda ckp_id : f'/mnt/nas/shengjie/{ckp_dir}/checkpoint-{ckp_id}/image_encoder_2.bin'
get_lora_path = lambda ckp_id : f'/mnt/nas/shengjie/{ckp_dir}/checkpoint-{ckp_id}/pytorch_lora_weights.safetensors'
local_embder_ckp_path = get_local_embeder_path(ckp_id)
dpth_embder_ckp_path = get_dpth_embeder_path(ckp_id)
lora_path = get_lora_path(ckp_id)

from MODEL_CKP import FLUX_DEPTH,FLUX_REDUX,DEPTH_PREDCITION

pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, 
                                        torch_dtype=torch.bfloat16).to("cuda")

types = ['collar','sleeve','pockets']
choose_type = types[0]
data_root = '/mnt/nas/shengjie2/datasets'
get_ori_data_dir = lambda t : osj( data_root , f'cloth_{t}_balanced' )
get_localimg_data_dir = lambda t : osj( data_root , f'cloth_{t}_localimg' )
get_localimg_mask_data_dir = lambda t : osj( data_root , f'cloth_{t}_localimg_mask' )
get_localimg_canny_data_dir = lambda t : osj( data_root , f'cloth_{t}_localimg_canny' )

ori_data_dir = get_ori_data_dir(choose_type)
localimg_data_dir = get_localimg_data_dir(choose_type)
localimg_mask_data_dir = get_localimg_mask_data_dir(choose_type)
localimg_canny_data_dir = get_localimg_canny_data_dir(choose_type)

local_embder_ckp_path = get_local_embeder_path(ckp_id)
lora_path = get_lora_path(ckp_id)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                        FLUX_REDUX, 
                                        torch_dtype=torch.bfloat16).to(device)
local_embder = ReduxImageEncoder().to(device,dtype=dtype)

depth_processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)


local_embder_ckp = torch.load(local_embder_ckp_path)
local_embder.load_state_dict(local_embder_ckp)
pipe.load_lora_weights(lora_path)

def get_masked_img(img,mask):
    mask = mask.convert('L')
    background = Image.new("RGB", img.size, 'white')
    from PIL import ImageOps
    masked_img = Image.composite(img , background , ImageOps.invert( mask ) )
    return masked_img

count = 0
prename = None
for entry in os.scandir(ori_data_dir):
    if not entry.is_file():continue
    filename = entry.name
    if not filename.endswith('.jpg'):continue

    count+=1
    print('\rprocess idx: ',count,end='',flush=True)

    if prename is None:
        prename = filename
        continue
    ori_filepath = osj( ori_data_dir , filename )
    ori_filepath2 = osj( ori_data_dir , prename )
    # local img 1 for depth
    # local img 2 for redux
    localimg_filepath = osj( localimg_data_dir , prename )
    localimg_filepath2 = osj( localimg_data_dir , filename )
    localimg_mask_filepath = osj( localimg_mask_data_dir , filename.replace('.jpg','.png') )
    localimg_canny_filepath = osj( localimg_canny_data_dir , filename )
    # localimg_normal_filepath = osj( localimg_normal_data_dir , filename )

    prename = filename

    assert ose(ori_filepath),ori_filepath
    assert ose(ori_filepath2),ori_filepath2
    assert ose(localimg_filepath),localimg_filepath
    assert ose(localimg_filepath2),localimg_filepath2 # for redux
    assert ose(localimg_mask_filepath),localimg_mask_filepath
    assert ose(localimg_canny_filepath),localimg_canny_filepath
    # assert ose(localimg_normal_filepath),localimg_normal_filepath

    ori_image = process_img_1024(ori_filepath).convert("RGB")
    ori_image2 = process_img_1024(ori_filepath2).convert("RGB")
    local_img = process_img_1024(localimg_filepath).convert("RGB")
    local_img2 = process_img_1024(localimg_filepath2).convert("RGB")
    local_mask = process_img_1024(localimg_mask_filepath,pad_color=(0,0,0)).convert("RGB")

    # c1 dpth (control)
    local_depth_image = depth_processor(ori_image,)[0].convert('RGB')
    # local_depth_image.save('tmp.jpg')

    # c1 local mask & image
    def generate_random_mask(w,h,ratio=2/3):
        import random
        w_s = random.randint(0,1)
        h_s = random.randint(0,1)
        tar_w,tar_h = int(w*ratio),int(h*ratio)
        tar_size =( tar_w , tar_h )
        start_x,start_y = w_s*(w-tar_w), h_s*(h-tar_h)
        tar_pos = (start_x,start_y)

        white_mask = Image.new('RGB',tar_size,'white')
        background = Image.new('RGB',(w,h),'black')
        background.paste( white_mask , tar_pos )
        return background,tar_pos,tar_size
    local_mask,tar_pos,tar_size = generate_random_mask(
                    ori_image.width,
                    ori_image.height,
                    ratio=2/3,
                ) 
    image = ori_image

    # c2 emb
    local_img2_croped = ori_image.crop( (tar_pos[0],tar_pos[1], 
                           tar_pos[0]+tar_size[0] , tar_pos[1]+tar_size[1]) )
    # local_img2_croped.save('tmp.jpg')
    # pdb.set_trace()
    local_image_latents = pipe_prior_redux.encode_image( local_img2_croped , pipe_prior_redux.device , 1 )
    local_image_latents = local_image_latents.to(device,dtype = dtype)   

    prompt_embeds2 = local_embder(local_image_latents)['image_embeds']
    prompt_embeds1 = torch.zeros(1,512,4096)\
                    .to(device,dtype = dtype)
    prompt_embeds = torch.concat( [prompt_embeds1,prompt_embeds2],dim=1 )\
                    .to(device,dtype = dtype) 
    
    pooled_prompt_embeds = torch.zeros(1,768)\
                    .to(device,dtype = dtype)
    
    horizontal_concat_images([image , 
                              local_mask , 
                              process_img_1024('',img_pil=local_img2_croped) , 
                              local_depth_image ]).save('tmp.jpg')
    # pdb.set_trace()
    # pipe generate
    image = pipe( image=image,
                mask_image=local_mask,
                control_image=local_depth_image,
                height=local_depth_image.height,
                width=local_depth_image.width,
                num_inference_steps=20,
                guidance_scale=4.5,
                generator=torch.Generator().manual_seed(20250721),
                prompt_embeds=prompt_embeds,
                pooled_prompt_embeds=pooled_prompt_embeds ).images[0]
    
    horizontal_concat_images([image , 
                              local_mask , 
                              process_img_1024('',img_pil=local_img2_croped) , 
                              local_depth_image ]).save('tmp.jpg')
    pdb.set_trace()