import os
import util_for_huggingface
os.environ['CUDA_VISIBLE_DEVICES']='1'
import torch,pdb,shutil
# from diffusers import  FluxPipeline
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from pipeline_controlimg import FluxPipeline
from diffusers.utils import load_image
from util_flux import horizontal_concat_images,process_img_1024,vertical_concat_images
from PIL import Image
from image_gen_aux import DepthPreprocessor
from util_for_os import ose,osj
from itertools import product
from demo_rmbg import load_rmbg,get_mask_by_rmbg

'''
20250707 
 测试 train_flux_depthredux.py 的训练结果
 所需数据 :
    1. controlimg(depth)
    2. redux
'''

# FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'
# DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'

# FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
# FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
# FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
# FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
# FLUX='/data/models/FLUX___1-dev'

from MODEL_CKP import *

examples_dir = lambda t : f'/mnt/nas/shengjie/datasets/cloth_{t}_balanced/'
get_save_dir = lambda num_steps : f'/mnt/nas/shengjie/synthesis_results/synthesis_global_depth_redux-{num_steps}/'

output_dir = './tmp_redux0814'
os.makedirs(output_dir,exist_ok=True)


# imagefiles = [n for n in os.listdir(examples_dir) if n.endswith('.jpg')]
# types = ['collar','pockets','sleeve']
# dic = {
#     types[0]:[
#         ['collar_0000883.jpg','collar_0000944.jpg'],
#         ['collar_0000944.jpg','collar_0000883.jpg'],
#         ['collar_0000910.jpg','collar_0000972.jpg'],
#         ['collar_0000972.jpg','collar_0000910.jpg'],
#     ],
#     types[1]:[
#         ['pockets_20250625_Cargo-Pocket_643.jpg','pockets_20250625_Cargo-Pocket_664.jpg'],
#         ['pockets_20250625_Cargo-Pocket_664.jpg','pockets_20250625_Cargo-Pocket_643.jpg'],
#         ['pockets_20250625_Cargo-Pocket_1203.jpg','pockets_20250625_Cargo-Pocket_1521.jpg'],
#         ['pockets_20250625_Cargo-Pocket_1521.jpg','pockets_20250625_Cargo-Pocket_1203.jpg'],
#     ],
#     types[2]:[
#         ['sleeve_0000077.jpg','sleeve_0000495.jpg'],
#         ['sleeve_0000495.jpg','sleeve_0000077.jpg'],
#         ['sleeve_0001458.jpg','sleeve_0001510.jpg'],
#         ['sleeve_0001510.jpg','sleeve_0001458.jpg'],
#     ],
# }
# imagefiles = ['collar_0000878.jpg','collar_0000879.jpg']
# tmp_save = []
# from copy import deepcopy
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=torch.bfloat16).to("cuda")
depth_processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
model_rmbg = load_rmbg()


# for key,value in dic.items():
#     for imagefiles in value:

#         test_img = os.path.join(examples_dir(key),imagefiles[0])
#         test_img2 = os.path.join(examples_dir(key),imagefiles[1])
#         save_test_name = os.path.splitext(imagefiles[0])[0]+\
#                                         os.path.splitext(imagefiles[1])[0] +\
#                                         os.path.splitext(imagefiles[0])[1]
                                    
#         # prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
#         control_image = process_img_1024(load_image(test_img))
#         image2 = process_img_1024(load_image( test_img2 ))


#         with torch.no_grad():
#             # 这里试试不用衣服的redux相加
#             # tmp_path = 'tmp_other.jpg'
#             prompt_emb,pooled_prompt_emb = pipe_prior_redux(control_image,
#                                                             return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
#             prompt_emb2,pooled_prompt_emb2 = pipe_prior_redux(image2,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
#             alpha = 0.9
#             prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
#         # pipe_prior_output2 = pipe_prior_redux(image2,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
#         # torch.cuda.empty_cache()

#         target_shape = (1024,1024)
#         # from lotus.app_infer_depth import get_depth_by_lotus
#         # output_d,output_g = get_depth_by_lotus(test_img,0) # PIL*2    922*1050
#         # control_image = process_img_1024(output_g)
#         processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
#         control_image = processor(control_image)[0].convert("RGB")
#         control_image2 = processor(image2)[0].convert("RGB")

#         # pdb.set_trace()
#         tmp_save.append([ 
#                 control_image,
#                 control_image2,
#                 image2,
#                 prompt_emb,
#                 pooled_prompt_emb,
#                 save_test_name,
#             ])

# del pipe_prior_redux

torch.cuda.empty_cache()

get_lora_path = lambda ckp_id : f'/mnt/nas/shengjie/depthcontrol0812_output/checkpoint-{ckp_id}/pytorch_lora_weights.safetensors'

pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, torch_dtype=torch.bfloat16).to("cuda")

def get_tmp_clo1_clo2():
    dir = './compare_longshort'
    files = os.listdir(dir)
    files.sort()
    files = [ osj( dir , f ) for f in files]
    return files[:5],files[-5:]


num = 5
def load_lora_and_generate(ckp_id='ori',is_load=True):
    if is_load:
        lora_path = get_lora_path(ckp_id)
        print(lora_path)
        assert os.path.exists(lora_path),lora_path
        # pdb.set_trace()
        from safetensors.torch import load_file
        # state_dict = load_file( lora_path )
        pipe.load_lora_weights(lora_path)
        torch.cuda.empty_cache()
    # pdb.set_trace()

    save_dir = get_save_dir(ckp_id)
    if os.path.exists(save_dir):shutil.rmtree(save_dir)
    os.makedirs(save_dir)

    control_alpha = 0.3
    
    clo_list2,clo_list1 = get_tmp_clo1_clo2()
    
    def get_mask(size=(1024,1024),fill=(255,255,255)):
        return Image.new('RGB',size,color=fill)
    
    res_imgs = [
        [get_mask()]+[ process_img_1024(path) for path in clo_list1 ],
        *[[process_img_1024(path)] for path in clo_list2], 
    ]
    # pdb.set_trace()
    # 每生成 num 个 就插入到 res_imgs
    idx = 0
    for c1,c2 in product(clo_list1,clo_list2):
        
        
        control_image_path = c1
        redux_image_path = c2

        # 拼到原来的衣服上
        # ori_path = osj(examples_dir2,filename)
        # mask_path = osj(examples_dir3,filename.replace('.jpg','.png'))
        # ori_img = Image.open( ori_path )
        # mask_img = Image.open( mask_path )
        # assert ori_img.size == mask_img.size,f'{ori_img.size} , {mask_img.size}'

        # pos , tar_size = get_whitemask_pos_and_size(mask_img)

        # pdb.set_trace()


        control_image = process_img_1024( control_image_path )
        mask_pil , no_bg_img =   get_mask_by_rmbg( model_rmbg , control_image )

        redux_image = process_img_1024( redux_image_path )


        with torch.no_grad():
            # 这里试试不用衣服的redux相加
            alpha = 0.8
            # tmp_path = 'tmp_other.jpg'
            # prompt_emb,pooled_prompt_emb = pipe_prior_redux(control_image,
            #                                                 return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            prompt_emb2,pooled_prompt_emb2 = pipe_prior_redux(redux_image,return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
            # prompt_emb2[:,-25:,:]=0
            # prompt_emb2[:,0::2,:]=0

            # prompt_emb = (1-alpha) * prompt_emb + alpha * prompt_emb2
            prompt_emb =  prompt_emb2 * alpha
            pooled_prompt_emb = pooled_prompt_emb2
            '''
            10 颜色不保留,((整体样式))差不多细节会有变化,但是领口样式,口袋位置,拉链位置差不多,global信息
            20 颜色不保留,

            颜色信息在后半段 -350~
            -50~ 信息稀疏,有一点关于背景信息的
            -100~ 颜色信息还是不够,衣服上面的小细节没有(比如口袋,logo)
            -150~ 出现了一些颜色信息,但是还不是很多,出现了口袋,但是信息比较弱,还没有出现logo
            -200~ 和-150类似,有颜色信息,比较淡,口袋信息也不明显,-150~-200 似乎引入了很多的颜色信息
            -250~ 和 ~200差不多?
            -300  出现了logo 
            -350
            '''
            # test_num = 350
            # total = prompt_emb.shape[1]
            # prompt_emb = torch.concat( [
            #                             prompt_emb[:,-250:,:],

            #                             prompt_emb2[:,-450:,:] ,
                                        
            #                             torch.zeros((1,total-700,4096)).to(prompt_emb.device,dtype=prompt_emb.dtype), 
                                        
                                        
            #                             ] , dim=1 )
            # pooled_prompt_emb = (1-alpha) * pooled_prompt_emb + alpha * pooled_prompt_emb2

        # control_image = resize_with_aspect(control_image_path)

        control_image = depth_processor(
            control_image
        )[0].convert("RGB") # PIL 768 1024
        white_mask = Image.new( 'RGB' , control_image.size , 'white'  )
        from PIL import ImageOps
        control_image = Image.composite( white_mask , control_image , ImageOps.invert(mask_pil) ) # 正常是 黑留白遮 ops 白留黑遮
    
        
        # control_image2 = depth_processor(
        #     redux_image
        # )[0].convert("RGB") # PIL 768 1024

        # print(control_image.size)
        control_alpha = 0.4


        # pdb.set_trace()

        with torch.no_grad():
            image = pipe(
                # prompt=prompt,
                control_image=control_image,
                control_image2=control_image,
                control_alpha=control_alpha,
                height=control_image.height,
                width=control_image.width,
                num_inference_steps=20,
                guidance_scale=4.5,
                # generator=torch.Generator().manual_seed(42),
                # **pipe_prior_output,
                prompt_embeds=prompt_emb,
                pooled_prompt_embeds=pooled_prompt_emb,
            ).images[0]
            
            mask_pil , no_bg_img =   get_mask_by_rmbg( model_rmbg , image )
            
            white_mask = Image.new( 'RGB' , image.size , 'white'  )
            image = Image.composite( white_mask , image , ImageOps.invert(mask_pil) ) # 正常是 黑留白遮 ops 白留黑遮
    

        res_imgs[idx%num + 1].append(
            process_img_1024('',img_pil=image)
        )

        idx += 1 
        print('cur id ',idx)


        concat_tmp_res = horizontal_concat_images( [ 
            process_img_1024('',img_pil=control_image) , 
            process_img_1024(control_image_path),
            redux_image ,   
            process_img_1024('',img_pil=image) ,
            ] )

        concat_tmp_res.save('tmp.jpg')

    # pdb.set_trace()
    res_imgs_hori_concat = [
        horizontal_concat_images(ri) for ri in res_imgs
    ]
    res_imgs_verti_concat = vertical_concat_images(res_imgs_hori_concat)
    res_imgs_verti_concat.save(f'{output_dir}/tmp_redux_{ckp_id}_longshort_reverse.jpg')
    # pdb.set_trace()
   

# load_lora_and_generate( is_load=False )

while True:
    try:
        ckp_id = int(input('input ckp-id:'))
        load_lora_and_generate(ckp_id,is_load=True)

    except:
        break