import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch,pdb
from diffusers import  FluxFillPipeline,FluxPipeline
# from pipeline_flux import FluxPipeline
from diffusers.utils import load_image
from util_flux import horizontal_concat_images
from PIL import Image

FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
FLUX='/data/models/FLUX___1-dev'

types = ['niukou','niukou-pockets','yinhua']
choose_type = types[1]
index = len(choose_type.split('-'))+1

img_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-ori/'
depth_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-depth/'
shape_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-shape/'


FLUX_LORA_DIR = '/mnt/nas/shengjie/zhenzhi_output/'
FLUX_LORA_PATHS = list(filter(lambda d :'checkpoint' in d,
                        os.listdir(FLUX_LORA_DIR)))
tmp_lora_path = os.path.join(FLUX_LORA_DIR , 
                             FLUX_LORA_PATHS[0] , 
                             'pytorch_lora_weights.safetensors')
assert os.path.exists(tmp_lora_path)

jsonl_for_train = 'zhenzhi_data.json'
guidance_scale = 30.0
steps = 8
weight_dtype = torch.bfloat16
width,height = 1024*3 , 1024
target_shape = (width,height)

texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-1.jpg"
# texture_path = "/data/shengjie/style_zhenzhi_test/zhenzhi-1.jpg"
shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-3.jpg"


def get_img_and_mask(texture_path,shape_path):

    img_concat3 = Image.new('RGB' , (width,height) , (0,0,0))
    from util_flux import pad_image
    img2,img3 = Image.open(texture_path),\
                Image.open(shape_path)
    img2,_,_,_,_ = pad_image(img2)
    img3,_,_,_,_ = pad_image(img3)
    img_concat3.paste(img2,(width//3,0))
    img_concat3.paste(img3,((width//3)*2,0))

    ## generate Mask
    black_image = Image.new("RGB", (width, height), (0, 0, 0))
    white_image = Image.new("RGB", (width//3, height), (255, 255, 255))
    # black_image.paste(white_image, (args.resolution_width//2, 0))
    black_image.paste(white_image, (0, 0))
    mask_image = black_image
    return img_concat3,mask_image

# text_one
img_concat3,mask_image = get_img_and_mask(texture_path,shape_path)
# test_many
img_concat3_list,mask_image_list = [],[]
save_names = []
from itertools import product
for i1,i2 in product([1,2,3,4,],[5,6,7,8]):
    if i1==i2:continue
    # texture_path = f'/data/shengjie/style_zhenzhi/img_{i1}.jpg'
    texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[1]}/zhenzhi-{types[1]}-{i1}.jpg"
    # shape_path = f'/data/shengjie/style_zhenzhi/img_{i2}.jpg'
    shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[1]}/zhenzhi-{types[1]}-{i2}.jpg"
    img_concat3,mask_image = get_img_and_mask(texture_path,shape_path)
    img_concat3_list.append(img_concat3)
    mask_image_list.append(mask_image)

    save_name = f'texture_img_{i1}-shape_img_{i2}.jpg'
    save_names.append(save_name)

# pdb.set_trace()

import json
with open(jsonl_for_train) as f:
    data = json.load(f)
prompt_embeds_path = data['caption']['prompt_embeds']   
pooled_prompt_embeds_path = data['caption']['pooled_prompt_embeds']   
# text_ids_path = data['caption']['text_ids']   
prompt_embeds = torch.load(prompt_embeds_path, 
                            map_location='cuda')
pooled_prompt_embeds = torch.load(pooled_prompt_embeds_path, 
                                    map_location='cuda')


# pdb.set_trace()

pipe = FluxFillPipeline.from_pretrained(FLUX_FILL, 
                                        torch_dtype=torch.bfloat16).to("cuda")


# def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype):
#     # pdb.set_trace()
#     pixel_latents = vae.encode(pixels.to(device=vae.device,
#                                         dtype=vae.dtype))\
#         .latent_dist.sample()
#     pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor
#     return pixel_latents.to(weight_dtype)

'''
packed
torch.Size([1, 576, 64])
'''
# pdb.set_trace()
def test_one(ckp_id=10000,
             lora_path = None,
             img_concat3=None,mask_image=None,
             load=True):
    assert img_concat3 is not None and mask_image is not None
    if load:
        if lora_path is None:
            lora_path =  os.path.join(FLUX_LORA_DIR , 
                            f'checkpoint-{ckp_id}' , 
                            'pytorch_lora_weights.safetensors')
        # if not os.path.exists(lora_path):continue
        print(lora_path)
        assert os.path.exists(lora_path)

        pipe.load_lora_weights(lora_path)
    torch.cuda.empty_cache()

    with torch.no_grad():
        image = pipe(
            image=img_concat3,
            mask_image=mask_image,
            height=target_shape[1],
            width=target_shape[0],

            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
            
            guidance_scale=guidance_scale,
            num_inference_steps = steps,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]

    # image.save('tmp.jpg')
    image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
    image_concat.paste(image , (0,0))
    image_concat.paste(img_concat3 , (0,height))
    image_concat.save('tmp.jpg')

    return image_concat

    # pdb.set_trace()

    # concat_img = horizontal_concat_images([image,Image.open(texture_path),Image.open(shape_path)])
    # concat_img.save('tmp.jpg')
def test_all(img_concat3=None,mask_image=None,):
    for i in range(len(FLUX_LORA_PATHS)):
        lora_path =  os.path.join(FLUX_LORA_DIR , 
                        FLUX_LORA_PATHS[i] , 
                        'pytorch_lora_weights.safetensors')
        if not os.path.exists(lora_path):continue

        pipe.load_lora_weights(lora_path)
        torch.cuda.empty_cache()

        image = pipe(
            image=img_concat3,
            mask_image=mask_image,
            height=target_shape[1],
            width=target_shape[0],

            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
            
            guidance_scale=guidance_scale,
            num_inference_steps = steps,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]

        print(f'generate {FLUX_LORA_PATHS[i]}')
        image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
        image_concat.paste(image , (0,0))
        image_concat.paste(img_concat3 , (0,height))
        image_concat.save('tmp.jpg')

        pdb.set_trace()

# pdb.set_trace()
if __name__=='__main__':
    # test one img
    # test_one(ckp_id=1000,texture_path=texture_path,
    #                         shape_path=shape_path)
    
    # test many img
    import shutil
    save_dir = '/data/shengjie/style_zhenzhi_concat3_res'
    if os.path.exists(save_dir):
        shutil.rmtree(save_dir)
    os.makedirs(save_dir)
    load_lora = True

    lora_path = "/mnt/nas/shengjie/zhenzhi_output_version1/pytorch_lora_weights.safetensors"
    for img_concat3,mask_image,save_name in zip(img_concat3_list,
                                                mask_image_list,
                                                save_names):
        res = test_one(ckp_id=10000,
                       lora_path=None,
                        img_concat3=img_concat3,
                        mask_image=mask_image,
                        load=load_lora) 
        save_path = os.path.join(save_dir,save_name)
        # pdb.set_trace()
        res.save(save_path)
        res.save('tmp.jpg')

        load_lora = False # 只在一开始加载一次
    
    # test all lora
    # test_all()