'''
输入  img1 mask img2 redux
'''
import util_for_huggingface

import os
os.environ['CUDA_VISIBLE_DEVICES']='7'
import torch,pdb
from diffusers import  FluxFillPipeline,FluxPipeline
# from pipeline_flux import FluxPipeline
from diffusers.utils import load_image
from util_flux import horizontal_concat_images
from PIL import Image

# FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'

# FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
# FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
# FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
# FLUX='/data/models/FLUX___1-dev'

from MODEL_CKP import *

## 模型准备

from demo_rmbg import load_rmbg,get_mask_by_rmbg
model_rmbg = load_rmbg()

from diffusers import FluxPriorReduxPipeline

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                        FLUX_REDUX, 
                                        torch_dtype=torch.bfloat16).to('cuda')


# types = ['niukou','niukou-pockets','yinhua']
# choose_type = types[1]
# index = len(choose_type.split('-'))+1

# img_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-ori/'
# depth_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-depth/'
# shape_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-shape/'


FLUX_LORA_DIR = '/mnt/nas/shengjie/depthcontrol0822_output/'
FLUX_LORA_PATHS = list(filter(lambda d :'checkpoint' in d,
                        os.listdir(FLUX_LORA_DIR)))
tmp_lora_path = os.path.join(FLUX_LORA_DIR , 
                             FLUX_LORA_PATHS[0] , 
                             'pytorch_lora_weights.safetensors')
assert os.path.exists(tmp_lora_path)

# jsonl_for_train = 'zhenzhi_data.json'
# guidance_scale = 30.0
# steps = 8
# weight_dtype = torch.bfloat16
# width,height = 1024*3 , 1024
# target_shape = (width,height)

# texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-1.jpg"
# # texture_path = "/data/shengjie/style_zhenzhi_test/zhenzhi-1.jpg"
# shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}/zhenzhi-{choose_type}-3.jpg"


# def get_img_and_mask(texture_path,shape_path):

#     img_concat3 = Image.new('RGB' , (width,height) , (0,0,0))
#     from util_flux import pad_image
#     img2,img3 = Image.open(texture_path),\
#                 Image.open(shape_path)
#     img2,_,_,_,_ = pad_image(img2)
#     img3,_,_,_,_ = pad_image(img3)
#     img_concat3.paste(img2,(width//3,0))
#     img_concat3.paste(img3,((width//3)*2,0))

#     ## generate Mask
#     black_image = Image.new("RGB", (width, height), (0, 0, 0))
#     white_image = Image.new("RGB", (width//3, height), (255, 255, 255))
#     # black_image.paste(white_image, (args.resolution_width//2, 0))
#     black_image.paste(white_image, (0, 0))
#     mask_image = black_image
#     return img_concat3,mask_image

# # text_one
# img_concat3,mask_image = get_img_and_mask(texture_path,shape_path)
# # test_many
# img_concat3_list,mask_image_list = [],[]
# save_names = []
# from itertools import product
# for i1,i2 in product([1,2,3,4,],[5,6,7,8]):
#     if i1==i2:continue
#     # texture_path = f'/data/shengjie/style_zhenzhi/img_{i1}.jpg'
#     texture_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[1]}/zhenzhi-{types[1]}-{i1}.jpg"
#     # shape_path = f'/data/shengjie/style_zhenzhi/img_{i2}.jpg'
#     shape_path = f"/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{types[1]}/zhenzhi-{types[1]}-{i2}.jpg"
#     img_concat3,mask_image = get_img_and_mask(texture_path,shape_path)
#     img_concat3_list.append(img_concat3)
#     mask_image_list.append(mask_image)

#     save_name = f'texture_img_{i1}-shape_img_{i2}.jpg'
#     save_names.append(save_name)

# # pdb.set_trace()

# import json
# with open(jsonl_for_train) as f:
#     data = json.load(f)
# prompt_embeds_path = data['caption']['prompt_embeds']   
# pooled_prompt_embeds_path = data['caption']['pooled_prompt_embeds']   
# # text_ids_path = data['caption']['text_ids']   
# prompt_embeds = torch.load(prompt_embeds_path, 
#                             map_location='cuda')
# pooled_prompt_embeds = torch.load(pooled_prompt_embeds_path, 
#                                     map_location='cuda')


# pdb.set_trace()

pipe = FluxFillPipeline.from_pretrained(FLUX_FILL, 
                                        torch_dtype=torch.bfloat16).to("cuda")


# def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype):
#     # pdb.set_trace()
#     pixel_latents = vae.encode(pixels.to(device=vae.device,
#                                         dtype=vae.dtype))\
#         .latent_dist.sample()
#     pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor
#     return pixel_latents.to(weight_dtype)

'''
packed
torch.Size([1, 576, 64])
'''
from util_for_os import osj,ose
from util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images
from itertools import product

num = 5

types = ['collar','sleeve','pockets']
# choose_type = types[0]
examples_dir = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_balanced'
# examples_dir2 = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_balanced'
# examples_dir3 = lambda t: f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg_mask'
# save_dir = '/data/shengjie/synthesis_zhenzhi/'
# other_dir = './redux_other'
output_dir = 'tmp_redux0826'
os.makedirs(output_dir,exist_ok=True)
# pdb.set_trace()
def test_one(ckp_id=10000,
             lora_path = None,
             load=True):
    if load:
        if lora_path is None:
            lora_path =  os.path.join(FLUX_LORA_DIR , 
                            f'checkpoint-{ckp_id}' , 
                            'pytorch_lora_weights.safetensors')
        # if not os.path.exists(lora_path):continue
        print(lora_path)
        assert os.path.exists(lora_path)

        pipe.load_lora_weights(lora_path)
    torch.cuda.empty_cache()

    def get_tmp_clo1_clo2():
        dir = './compare_longshort'
        files = os.listdir(dir)
        files.sort()
        files = [ osj( dir , f ) for f in files]
        return files[:5],files[-5:]


    for t_id in range(len(types)):
        clo_list1 = []
        clo_list2 = []
        t1 , t2 = types[t_id] , types[t_id]
        # t1 , t2 = types[t_id] , types[ ((t_id)+1)%len(types) ]

        dir1 , dir2 = examples_dir(t1) , examples_dir(t2)
        # dir1 , dir2 = examples_dir(t1) , other_dir
        if dir1 == dir2:
            for entry in os.scandir(dir1):
                filename = entry.name
                if not filename.endswith('.jpg'):continue
                # break
                if len(clo_list1)!=num:
                    clo_list1.append(
                        osj( dir1 , filename )
                    )
                elif len(clo_list2)!=num:
                    clo_list2.append(
                        osj( dir2 , filename )
                    )
                else:
                    break
        else:
            for entry in os.scandir(dir1):
                filename = entry.name
                if not filename.endswith('.jpg'):continue
                # break
                if len(clo_list1)!=num:
                    clo_list1.append(
                        osj( dir1 , filename )
                    )
                else:break
            for entry in os.scandir(dir2):
                filename = entry.name
                if filename.endswith('.txt'):continue
                # break
                if len(clo_list2)!=num:
                    clo_list2.append(
                        osj( dir2 , filename )
                    )
                else:break

        # pdb.set_trace()
        clo_list2,clo_list1 = get_tmp_clo1_clo2()
        
        def get_mask(size=(1024,1024),fill=(255,255,255)):
            return Image.new('RGB',size,color=fill)
        res_imgs = [
            [get_mask()]+[ process_img_1024(path) for path in clo_list1 ],
            *[[process_img_1024(path)] for path in clo_list2], 
        ]
        # pdb.set_trace()
        # 每生成 num 个 就插入到 res_imgs
        idx = 0
        for c1,c2 in product(clo_list1,clo_list2):
            '''
            构建
            /        原图 c1-1  c1-2  c1-3 ...
            原图c2-1     合成1  合成2 合成3 ...
            c2-2        ...
            # c2-3
            ...
            '''

            control_image_path = c1
            redux_image_path = c2

            # 拼到原来的衣服上
            # ori_path = osj(examples_dir2,filename)
            # mask_path = osj(examples_dir3,filename.replace('.jpg','.png'))
            # ori_img = Image.open( ori_path )
            # mask_img = Image.open( mask_path )
            # assert ori_img.size == mask_img.size,f'{ori_img.size} , {mask_img.size}'

            # pos , tar_size = get_whitemask_pos_and_size(mask_img)

            # pdb.set_trace()


            control_image = process_img_1024( control_image_path )

            redux_image = process_img_1024( redux_image_path )


            # with torch.no_grad():
            #     # 这里试试不用衣服的redux相加
            #     alpha = 0.9
            #     # tmp_path = 'tmp_other.jpg'
            #     # prompt_emb,pooled_prompt_emb = pipe_prior_redux(control_image,
            #     #                                                 return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            #     prompt_emb2,pooled_prompt_emb2 = pipe_prior_redux(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            #     # prompt_emb2[:,-25:,:]=0
            #     # prompt_emb2[:,0::2,:]=0

            #     # prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
            #     prompt_emb =  prompt_emb2
            #     pooled_prompt_emb = pooled_prompt_emb2
            '''
            10 颜色不保留,((整体样式))差不多细节会有变化,但是领口样式,口袋位置,拉链位置差不多,global信息
            20 颜色不保留,

            颜色信息在后半段 -350~
            -50~ 信息稀疏,有一点关于背景信息的
            -100~ 颜色信息还是不够,衣服上面的小细节没有(比如口袋,logo)
            -150~ 出现了一些颜色信息,但是还不是很多,出现了口袋,但是信息比较弱,还没有出现logo
            -200~ 和-150类似,有颜色信息,比较淡,口袋信息也不明显,-150~-200 似乎引入了很多的颜色信息
            -250~ 和 ~200差不多?
            -300  出现了logo 
            -350
            '''
            prompt_emb,pooled_prompt_emb =pipe_prior_redux(process_img_1024(redux_image),return_dict=False) 
            # control_image = resize_with_aspect(control_image_path)
            mask_pil,no_bg_img = get_mask_by_rmbg( model_rmbg , control_image )
            white_image = Image.new("RGB", mask_pil.size , (255, 255, 255))
            
            # pdb.set_trace()


            with torch.no_grad():
                image = pipe(
                    # prompt=prompt,
                    
                    image=white_image,
                    mask_image=mask_pil,
                    
                    height=control_image.height,
                    width=control_image.width,
                    
                    num_inference_steps=20,
                    guidance_scale=4.5,

                    prompt_embeds=prompt_emb,
                    pooled_prompt_embeds=pooled_prompt_emb,
                ).images[0]

            res_imgs[idx%num + 1].append(
                process_img_1024('',img_pil=image)
            )

            idx += 1 
            print('cur id ',idx)


            concat_tmp_res = horizontal_concat_images( [ 
                process_img_1024('',img_pil=mask_pil) , 
                process_img_1024(control_image_path),
                redux_image ,   
                process_img_1024('',img_pil=image) ,
                ] )

            concat_tmp_res.save('tmp.jpg')

        # pdb.set_trace()
        res_imgs_hori_concat = [
            horizontal_concat_images(ri) for ri in res_imgs
        ]
        res_imgs_verti_concat = vertical_concat_images(res_imgs_hori_concat)
        res_imgs_verti_concat.save(f'{output_dir}/tmp_redux_ori_by_fill_{ckp_id}_longshort_reverse.jpg')
        # pdb.set_trace()
        break

    # image.save('tmp.jpg')
    # image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
    # image_concat.paste(image , (0,0))
    # image_concat.paste(img_concat3 , (0,height))
    # image_concat.save('tmp.jpg')

    # return image_concat

    # pdb.set_trace()

    # concat_img = horizontal_concat_images([image,Image.open(texture_path),Image.open(shape_path)])
    # concat_img.save('tmp.jpg')
# def test_all(img_concat3=None,mask_image=None,):
#     for i in range(len(FLUX_LORA_PATHS)):
#         lora_path =  os.path.join(FLUX_LORA_DIR , 
#                         FLUX_LORA_PATHS[i] , 
#                         'pytorch_lora_weights.safetensors')
#         if not os.path.exists(lora_path):continue

#         pipe.load_lora_weights(lora_path)
#         torch.cuda.empty_cache()

#         image = pipe(
#             image=img_concat3,
#             mask_image=mask_image,
#             height=target_shape[1],
#             width=target_shape[0],

#             prompt_embeds=prompt_embeds,
#             pooled_prompt_embeds=pooled_prompt_embeds,
            
#             guidance_scale=guidance_scale,
#             num_inference_steps = steps,
#             # depth_condition=packed_depth_latents,
#             # texture_condition=packed_texture_latents,
#         ).images[0]

#         print(f'generate {FLUX_LORA_PATHS[i]}')
#         image_concat = Image.new('RGB' , (width,height*2) , (0,0,0))
#         image_concat.paste(image , (0,0))
#         image_concat.paste(img_concat3 , (0,height))
#         image_concat.save('tmp.jpg')

#         pdb.set_trace()

# pdb.set_trace()
if __name__=='__main__':
    # test one img
    # test_one(ckp_id=1000,texture_path=texture_path,
    #                         shape_path=shape_path)
    
    # test many img
    # import shutil
    # save_dir = '/data/shengjie/style_zhenzhi_concat3_res'
    # if os.path.exists(save_dir):
    #     shutil.rmtree(save_dir)
    # os.makedirs(save_dir)
    load_lora = True

    # lora_path = "/mnt/nas/shengjie/zhenzhi_output_version1/pytorch_lora_weights.safetensors"
    # for img_concat3,mask_image,save_name in zip(img_concat3_list,
    #                                             mask_image_list,
    #                                             save_names):
    # while (ckp_id := int(input('ckp_id:')))!='q':
    for ckp_id in range(1,31):
        ckp_id = ckp_id * 2000
        res = test_one(ckp_id=ckp_id,
                       lora_path=None,
                        load=load_lora) 
        # save_path = os.path.join(save_dir,save_name)
        # # pdb.set_trace()
        # res.save(save_path)
        # res.save('tmp.jpg')

        load_lora = False # 只在一开始加载一次
    
    # test all lora
    # test_all()