
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch,pdb
import warnings,shutil
warnings.filterwarnings('ignore')
from diffusers import FluxPriorReduxPipeline
from diffusers.utils import load_image
# from itertools import product
# from image_gen_aux import DepthPreprocessor

from util_flux import pad_image
# from util_flux import horizontal_concat_images

# from util_sam import get_mask_by_sam
# from util_mask import get_erosed_mask_by_radtio,add_random_holes


FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'
FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX_DEPTH_LORA='/home/shengjie/ckp/FLUX.1-Depth-dev-lora'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'


target_shape = (1024,1024)

examples_dir = '/data/shengjie/style_zhenzhi/'

# depth_dir = '/data/shengjie/style_zhenzhi_depth/'
embding_redux_dir = '/data/shengjie/style_zhenzhi_emb/'
# os.makedirs(depth_dir,exist_ok=True)
os.makedirs(embding_redux_dir,exist_ok=True)

imagefiles = os.listdir(examples_dir)

# processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                            FLUX_REDUX, 
                                            torch_dtype=torch.bfloat16).to("cuda")

from tqdm import tqdm

types = ['collar','sleeve','pockets']

fixed_prompt = f"The pair of images highlights first clothing showing second image's depth with third clothing's local feature, high resolution, 4K, 8K; " \
            f"[IMAGE1] Synthesis clothing with second's depth and third's local feature." \
            f"[IMAGE2] Depth image of a clothing." \
            f"[IMAGE3] Detailed local feature shot of a clothing."
for choose_type in types:
    img_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced'
    save_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced_embed'
    assert os.path.exists(img_dir),img_dir
    imagefiles = [n for n in os.listdir(img_dir) if n.endswith('.jpg')]
    if os.path.exists(save_dir):shutil.rmtree(save_dir)
    os.makedirs(save_dir)
    
    for t1 in tqdm(imagefiles):

        img1_path = os.path.join(img_dir,t1)
        # depth_save_path = os.path.join(depth_dir,t1.replace('.jpg','.png'))
        redux_save_path = os.path.join(save_dir,
                                    t1.replace('.jpg','.pth')\
                                        .replace('.png','.pth'))
        
        # pdb.set_trace()
        # PIL
        img1 = load_image(img1_path) # PIL
        img1,_,_,_,_ = pad_image(img1)
        img1 = img1.resize(target_shape)

        ## depth(img1)
        # img1_depth = processor(img1)[0].convert("RGB")
        # del processor

        ## emb(img1)
        # prompt1 for clip -> pooled_prompt_embed
        # prompt2 for t5   -> prompt_embed
        with torch.no_grad():
            main_condition_prompt = pipe_prior_redux(img1,
                                                    prompt=None,
                                                     prompt_2=fixed_prompt) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
        # del pipe_prior_redux

        # main_condition_prompt ==> 
        # dict_keys(['prompt_embeds', 'pooled_prompt_embeds'])
        # torch.Size([1, 1241, 4096]) 
        # torch.Size([1, 768])
        # pdb.set_trace()
        torch.save(main_condition_prompt.__dict__,
                redux_save_path)