'''
concat3 [ ori , depth , local ]
prompt_embeds = [ fixed , visual ]
pooled_prompt = zero

需要img dir
需要localimg dir

需要 img -> depth 的代码 (depth processor)
需要钱 img -> redux 512+729 的代码 (prior)
'''

import os
from os.path import join as osj
from os.path import exists as ose
os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch,pdb
from diffusers import  FluxFillPipeline,FluxPriorReduxPipeline
# from pipeline_flux import FluxPipeline
from diffusers.utils import load_image
from util_flux import horizontal_concat_images,process_img_1024
from PIL import Image
from image_gen_aux import DepthPreprocessor

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'
FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
FLUX='/data/models/FLUX___1-dev'

types = ['collar','sleeve','pockets']
choose_type = types[2]
index = len(choose_type.split('-'))+1

img_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced'
depth_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced_depth/'
local_img_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg/'


img_name = 'pockets_20250625_Zippered-Pocket_418'
depth_name = img_name
local_img_name = 'pockets_20250625_Zippered-Pocket_419'

redux_img_path = os.path.join( img_dir , img_name+'.jpg' )
depth_path = os.path.join( depth_dir , depth_name+'.png')
local_img_path = os.path.join( local_img_dir , local_img_name+'.jpg' )

assert ose(redux_img_path),redux_img_path
assert ose(depth_path),depth_path
assert ose(local_img_path),local_img_path
# redux_img = Image.open(redux_img_path)
# local_img = Image.open(local_img_path)
## depth
# processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
# depth_img = processor(redux_img_path)[0].convert("RGB")

## prompt
fixed_prompt = f"The pair of images highlights first clothing showing second image's texture with third clothing's local feature, high resolution, 4K, 8K; " \
            f"[IMAGE1] Synthesis clothing with second's texture and third's local feature." \
            f"[IMAGE2] Texture shot of a clothing." \
            f"[IMAGE3] Detailed local feature shot of a clothing."
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                            FLUX_REDUX, 
                                            torch_dtype=torch.bfloat16).to("cuda")
redux_img = process_img_1024(redux_img_path)
with torch.no_grad():
    main_condition_prompt = pipe_prior_redux(redux_img,
                                            prompt=None,
                                            prompt_2=fixed_prompt) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 

del pipe_prior_redux

FLUX_LORA_DIR = '/mnt/nas/shengjie/depth_local_output/'
FLUX_LORA_PATHS = list(filter(lambda d :'checkpoint' in d,
                        os.listdir(FLUX_LORA_DIR)))
tmp_lora_path = os.path.join(FLUX_LORA_DIR , 
                             FLUX_LORA_PATHS[0] , 
                             'pytorch_lora_weights.safetensors')
assert os.path.exists(tmp_lora_path)

jsonl_for_train = 'local_data.json'
guidance_scale = 30.0
steps = 8
weight_dtype = torch.bfloat16
width,height = 1024*3 , 1024
target_shape = (width,height)

# texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-1.jpg"
# texture_path = "/data/shengjie/style_zhenzhi_test/zhenzhi-1.jpg"
# shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-3.jpg"


def get_img_and_mask(depth_path,local_img_path):

    img_concat3 = Image.new('RGB' , (width,height) , (0,0,0))
    from util_flux import pad_image
    img2,img3 = Image.open(depth_path),\
                Image.open(local_img_path)
    img2,_,_,_,_ = pad_image(img2)
    img3,_,_,_,_ = pad_image(img3)
    img_concat3.paste(img2,(width//3,0))
    img_concat3.paste(img3,((width//3)*2,0))

    ## generate Mask
    black_image = Image.new("RGB", (width, height), (0, 0, 0))
    white_image = Image.new("RGB", (width//3, height), (255, 255, 255))
    # black_image.paste(white_image, (args.resolution_width//2, 0))
    black_image.paste(white_image, (0, 0))
    mask_image = black_image
    return img_concat3,mask_image

# text_one
img_concat3,mask_image = get_img_and_mask(depth_path,local_img_path)
# test_many
# img_concat3_list,mask_image_list = [],[]
# save_names = []
# from itertools import product
# for i1,i2 in product([1,2,3,4,],[5,6,7,8]):
#     if i1==i2:continue
#     # texture_path = f'/data/shengjie/style_zhenzhi/img_{i1}.jpg'
#     texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[2]}/zhenzhi-{types[2]}-{i1}.jpg"
#     # shape_path = f'/data/shengjie/style_zhenzhi/img_{i2}.jpg'
#     shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[1]}/zhenzhi-{types[1]}-{i2}.jpg"
#     img_concat3,mask_image = get_img_and_mask(texture_path,shape_path)
#     img_concat3_list.append(img_concat3)
#     mask_image_list.append(mask_image)

#     save_name = f'texture_img_{i1}-shape_img_{i2}.jpg'
#     save_names.append(save_name)

# pdb.set_trace()

# import json
# with open(jsonl_for_train) as f:
#     data = json.load(f)
# prompt_embeds_path = data['caption']['prompt_embeds']   
# pooled_prompt_embeds_path = data['caption']['pooled_prompt_embeds']   
# # text_ids_path = data['caption']['text_ids']   
# prompt_embeds = torch.load(prompt_embeds_path, 
#                             map_location='cuda')
# pooled_prompt_embeds = torch.load(pooled_prompt_embeds_path, 
#                                     map_location='cuda')


# pdb.set_trace()

pipe = FluxFillPipeline.from_pretrained(FLUX_FILL, 
                                        torch_dtype=torch.bfloat16).to("cuda")


# def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype):
#     # pdb.set_trace()
#     pixel_latents = vae.encode(pixels.to(device=vae.device,
#                                         dtype=vae.dtype))\
#         .latent_dist.sample()
#     pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor
#     return pixel_latents.to(weight_dtype)

'''
packed
torch.Size([1, 576, 64])
'''
# pdb.set_trace()
def test_one(ckp_id=10000,img_concat3=None,mask_image=None,
             load=True):
    assert img_concat3 is not None and mask_image is not None
    if load:
        # lora_path = [p for p in FLUX_LORA_PATHS if str(ckp_id) in p][0]
        lora_path =  os.path.join(FLUX_LORA_DIR , 
                        f'checkpoint-{ckp_id}' , 
                        'pytorch_lora_weights.safetensors')
        # if not os.path.exists(lora_path):continue
        print(lora_path)
        assert os.path.exists(lora_path),lora_path

        pipe.load_lora_weights(lora_path)
    torch.cuda.empty_cache()

    img_concat3.save('tmp.jpg')

    with torch.no_grad():
        image = pipe(
            image=img_concat3,
            mask_image=mask_image,
            height=target_shape[1],
            width=target_shape[0],
            
            guidance_scale=guidance_scale,
            num_inference_steps = steps,
            **main_condition_prompt,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]

    # image.save('tmp.jpg')
    image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
    image_concat.paste(image , (0,0))
    image_concat.paste(img_concat3 , (0,height))
    image_concat.save('tmp.jpg')

    return image_concat

    # pdb.set_trace()

    # concat_img = horizontal_concat_images([image,Image.open(texture_path),Image.open(shape_path)])
    # concat_img.save('tmp.jpg')
def test_all(img_concat3=None,mask_image=None,):
    for i in range(len(FLUX_LORA_PATHS)):
        lora_path =  os.path.join(FLUX_LORA_DIR , 
                        FLUX_LORA_PATHS[i] , 
                        'pytorch_lora_weights.safetensors')
        if not os.path.exists(lora_path):continue

        pipe.load_lora_weights(lora_path)
        torch.cuda.empty_cache()

        image = pipe(
            image=img_concat3,
            mask_image=mask_image,
            height=target_shape[1],
            width=target_shape[0],

            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
            
            guidance_scale=guidance_scale,
            num_inference_steps = steps,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]

        print(f'generate {FLUX_LORA_PATHS[i]}')
        image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
        image_concat.paste(image , (0,0))
        image_concat.paste(img_concat3 , (0,height))
        image_concat.save('tmp.jpg')

        pdb.set_trace()

# pdb.set_trace()
if __name__=='__main__':
    # test one img
    while True:
        try:
            ckp_id = int(input('input ckp-id:'))
            res = test_one(ckp_id=ckp_id,img_concat3=img_concat3,
                                    mask_image=mask_image)
        except Exception as e:
            break
    # test many img
    # import shutil
    # save_dir = '/data/shengjie/style_zhenzhi_concat3_res'
    # if os.path.exists(save_dir):
    #     shutil.rmtree(save_dir)
    # os.makedirs(save_dir)
    # load_lora = True
    # for img_concat3,mask_image,save_name in zip(img_concat3_list,
    #                                             mask_image_list,
    #                                             save_names):
    #     res = test_one(ckp_id=7000,
    #              img_concat3=img_concat3,
    #              mask_image=mask_image,
    #              load=load_lora) 
    #     save_path = os.path.join(save_dir,save_name)
    #     # pdb.set_trace()
    #     res.save(save_path)
    #     res.save('tmp.jpg')

    #     load_lora = False # 只在一开始加载一次
    
    # test all lora
    # test_all()