import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import util_for_huggingface
import torch,pdb
# from diffusers import  FluxPipeline
from diffusers import FluxFillPipeline, FluxPriorReduxPipeline,ReduxImageEncoder
from diffusers.utils import load_image
from util_flux import horizontal_concat_images,process_img_1024
from PIL import Image
# from image_gen_aux import DepthPreprocessor

'''
20250715
 测试 tryoff
 所需数据 :
    prompt
    human
 base-model: FILL
 所需处理
    concat( noise , human )
    mask = white + black
    prompt from caption
'''

FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'
DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'
FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
FLUX='/data/models/FLUX___1-dev'


##############  clip 获得 propmt_embeds ##########
from util_for_clip import get_clip_model
from util_for_os import ose,osj

types = ['upper','lower','fullbody']
choose_type_id = 0
choose_type = types[choose_type_id]
data_root = '/mnt/nas/shengjie2/datasets'
dataset_type = 'dc'

get_fixed_prompt_for_clip =  lambda t : f'An in-shop display of a human {t}.'
get_fixed_prompt_for_t5 = lambda t : f'[Image1] shows the {t} as in a product display. [Image2] shows a person wearing the {t} from Image1'

caption_prompt_dir = '/mnt/nas/shengjie/datasets/caption_for_tryoff'
get_caption_path = lambda t: osj(caption_prompt_dir,f'prompt_{t}.bin')

def get_prompt_emb(clip_txt='',t5_txt='',prompt_path=''):
    if prompt_path!='':
        prompt_embs = torch.load( prompt_path )
        # pdb.set_trace()
        prompt_embeds,pooled_prompt_embeds,text_ids = prompt_embs.values()
    else:
        with torch.no_grad():
            prompt_embeds, pooled_prompt_embeds,text_ids = \
                        pipe.encode_prompt(
                            prompt=clip_txt,
                            prompt_2=t5_txt,
                        )
    return prompt_embeds,pooled_prompt_embeds,text_ids

datasets_map = {
    'dc':{
        'ori_dir':'/mnt/nas/shengjie/datasets/DressCode_1024',
        'types':[types[0],types[1],types[2],],
        'clo_dir':['upper/cloth','lower/cloth','dresses/cloth'],
        'human_dir':['upper/image','lower/image','dresses/image'],
        'get_human_name_by_clo':lambda clo:clo.replace('_1','_0'),
    },
    'viton':{
        'ori_dir':'/mnt/nas/shengjie/datasets/VITON-HD_ori',
        'types':[types[0],types[0],],
        'clo_dir':['test/cloth','train/cloth'],
        'human_dir':['test/image','train/image'],
        'get_human_name_by_clo':lambda clo:clo,
    },
    
}

dataset_dict = datasets_map[dataset_type]
ori_dir = dataset_dict['ori_dir']
sub_dir = dataset_dict['human_dir'][choose_type_id]
human_dir = osj( ori_dir , sub_dir )

human_dir = 'tryoff'
# localimg_data_dir = get_localimg_data_dir(choose_type)
# localimg_mask_data_dir = get_localimg_mask_data_dir(choose_type)
# localimg_canny_data_dir = get_localimg_canny_data_dir(choose_type)


dtype = torch.bfloat16
device = 'cuda'

ckp_id = 13000
# ckp_dir = 'depth_local_output_20250714'
# get_local_embeder_path = lambda ckp_id : f'/mnt/nas/shengjie/{ckp_dir}/checkpoint-{ckp_id}/image_encoder_1.bin'
# get_dpth_embeder_path = lambda ckp_id : f'/mnt/nas/shengjie/{ckp_dir}/checkpoint-{ckp_id}/image_encoder_2.bin'
get_lora_path = lambda ckp_id : f'/mnt/nas/shengjie/tryoff_output20250715/checkpoint-{ckp_id}/pytorch_lora_weights.safetensors'
# local_embder_ckp_path = get_local_embeder_path(ckp_id)
# dpth_embder_ckp_path = get_dpth_embeder_path(ckp_id)
lora_path = get_lora_path(ckp_id)

# from lotus.app_infer_depth import load_pipe_d,get_depth_by_lotus_d
# task_name = 'n'
# depth_pipe = load_pipe_d(task_name).to(device,dtype=torch.bfloat16)

# pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
#                                         FLUX_REDUX, 
#                                         torch_dtype=torch.bfloat16).to(device)
# local_embder = ReduxImageEncoder().to(device,dtype=dtype)
# dpth_embder = ReduxImageEncoder().to(device,dtype=dtype)

pipe = FluxFillPipeline.from_pretrained(FLUX_FILL, 
                                        torch_dtype=torch.bfloat16).to("cuda")

# local_embder_ckp = torch.load(local_embder_ckp_path)
# local_embder.load_state_dict(local_embder_ckp)
# dpth_embder_ckp = torch.load(dpth_embder_ckp_path)
# dpth_embder.load_state_dict(dpth_embder_ckp)
pipe.load_lora_weights(lora_path)

# load lora

def get_masked_img(img,mask):
    mask = mask.convert('L')
    background = Image.new("RGB", img.size, 'white')
    from PIL import ImageOps
    masked_img = Image.composite(img , background , ImageOps.invert( mask ) )
    return masked_img
def horizontal_concat_images(images,):
    # 计算总宽度 (所有图片宽度之和 + 间距)
    widths, heights = zip(*(img.size for img in images))
    total_width = sum(widths)
    max_height = max(heights)
    
    # 创建新图片 (白色背景)
    new_img = Image.new('RGB', (total_width, max_height), color='white')
    
    # 粘贴图片并绘制分隔线
    x_offset = 0
    for i, img in enumerate(images):
        new_img.paste(img, (x_offset, 0))
        
        x_offset += img.width
    
    return new_img
count = 0
prename = None
for entry in os.scandir(human_dir):
    if not entry.is_file():continue
    filename = entry.name
    if not filename.endswith('.jpg'):continue

    count+=1
    print('\rprocess idx: ',count,end='',flush=True)

    ori_filepath = osj( human_dir , filename )
    # if prename is None:
    #     prename = filename
    #     continue
    # local img 1 for depth
    # local img 2 for redux
    # localimg_filepath = osj( localimg_data_dir , prename )
    # localimg_filepath2 = osj( localimg_data_dir , filename )
    # localimg_mask_filepath = osj( localimg_mask_data_dir , filename.replace('.jpg','.png') )
    # localimg_canny_filepath = osj( localimg_canny_data_dir , filename )
    # localimg_normal_filepath = osj( localimg_normal_data_dir , filename )

    # prename = filename

    input_img = process_img_1024(ori_filepath).convert("RGB")

    input_id = int(input('input type [0:upper 1:lower 2:fullbody]'))
    prompt_embeds,pooled_prompt_embeds,_ = get_prompt_emb(
                                prompt_path=get_caption_path(types[input_id]))    


    # for input
    # local_masked_img = get_masked_img(ori_imgae,local_mask)
    white_image = Image.new("RGB", (input_img.width,input_img.height) , (255, 255, 255))
    # black_image = Image.new("RGB", (input_img.width//2,input_img.height) , (0, 0 , 0) )
    input_img = horizontal_concat_images([white_image,input_img])
    # for mask
    white_image = Image.new("RGB", (input_img.width//2,input_img.height) , (255, 255, 255))
    black_image = Image.new("RGB", input_img.size , (0, 0, 0))
    black_image.paste(white_image, (0, 0))
    mask = black_image

    # input_img.save('tmp.jpg')
    # mask.save('tmp.jpg')
    # pdb.set_trace()

    # infer
    with torch.no_grad():
        image = pipe(
            image=input_img,
            mask_image=mask,
            height=input_img.size[1],
            width=input_img.size[0],

            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
            
            guidance_scale=4.5,
            num_inference_steps = 30,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]


    image.save('tmp.jpg')
    # pdb.set_trace()

    # break


