import os
os.environ['CUDA_VISIBLE_DEVICES']='0'

''' GPU 占用 < 4.5GB '''

# 设置 Hugging Face 镜像（可选）
# os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
# os.environ['HF_TOKEN']="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"
import util_for_huggingface

import torch,pdb,shutil
# pdb.set_trace()
import warnings
warnings.filterwarnings('ignore')
from diffusers import FluxPriorReduxPipeline
from diffusers.utils import load_image
from itertools import product
# from image_gen_aux import DepthPreprocessor

from util_flux import pad_image,process_img_1024
# from util_flux import horizontal_concat_images

# from util_sam import get_mask_by_sam
# from util_mask import get_erosed_mask_by_radtio,add_random_holes

from util_for_clip import get_clip_model

import argparse

parser = argparse.ArgumentParser(description="输入提取的类型")
parser.add_argument('-t','--choose_index',required=False,default=0,type=int,help='类型 in [0,1,2]')
args = parser.parse_args()

FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'
FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX_DEPTH_LORA='/home/shengjie/ckp/FLUX.1-Depth-dev-lora'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'


target_shape = (1024,1024)

types = ['collar','sleeve','pockets']
if args.choose_index >= len(types):
    exit(0)
choose_type = types[ int(args.choose_index) ]
examples_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg'
ori_img_dir  = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced'
# depth_dir = '/data/shengjie/style_zhenzhi_depth/'
embding_redux_dir =  f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg_emb'
# os.makedirs(depth_dir,exist_ok=True)
if os.path.exists(embding_redux_dir):shutil.rmtree(embding_redux_dir)
os.makedirs(embding_redux_dir)

# imagefiles = os.listdir(examples_dir)

# processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                            FLUX_REDUX, 
                                            torch_dtype=torch.bfloat16).to("cuda")
clip_processor,clip_model,maxtokens = get_clip_model()


fixed_prompt = f"The pair of images highlights first clothing showing second image's depth with third clothing's local feature, high resolution, 4K, 8K; " \
            f"[IMAGE1] Synthesis clothing with second's depth and third's local feature." \
            f"[IMAGE2] Depth image of a clothing." \
            f"[IMAGE3] Detailed local feature shot of a clothing."
# from tqdm import tqdm
count = 0
for entry in os.scandir(examples_dir):
    t1 = entry.name
# for t1 in tqdm(imagefiles):
    if not t1.endswith('.jpg'): continue
    count += 1
    if count % 5== 0:
        print(count , end=' ',flush=True) 
    if count % 1000 == 0:
        print()
    img1_path = os.path.join(examples_dir,t1)
    ori_path = os.path.join(ori_img_dir , t1)
    assert os.path.exists(img1_path),img1_path
    assert os.path.exists(ori_path),ori_path

    # depth_save_path = os.path.join(depth_dir,t1.replace('.jpg','.png'))
    redux_save_path = os.path.join(embding_redux_dir,
                                   t1.replace('.jpg','.pth')\
                                    .replace('.png','.pth'))
    
    # pdb.set_trace()
    # PIL
    img1 = process_img_1024(img1_path)
    ori_img = process_img_1024(ori_path)

    ## depth(img1)
    # img1_depth = processor(img1)[0].convert("RGB")
    # del processor

    with torch.no_grad():
        ## emb(img1)
        ori_prompt_embeds,ori_pooled_prompt_embeds = pipe_prior_redux(img1,
                                                                    prompt_2=fixed_prompt,
                                                                    return_dict=False) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
        
        local_image_latents = pipe_prior_redux.encode_image( img1 , pipe_prior_redux.device , 1  )
        # del pipe_prior_redux
        local_image_feature = clip_processor(images=img1)['pixel_values'][0]
        local_image_feature = torch.tensor(local_image_feature).unsqueeze(0).cuda()
        # pdb.set_trace()
        local_pooled_prompt_embeds = clip_model.get_image_features(pixel_values=local_image_feature) # 1 768

        ori_image_latents = pipe_prior_redux.encode_image( ori_img , pipe_prior_redux.device , 1  )
        ori_image_feature = clip_processor(images=ori_img)['pixel_values'][0]
        ori_image_feature = torch.tensor(ori_image_feature).unsqueeze(0).cuda()
        # pdb.set_trace()
        ori_img_pooled_prompt_embeds = clip_model.get_image_features(pixel_values=ori_image_feature) # 1 768

    ## extra 额外增加 ori img


    # main_condition_prompt ==> 
    # dict_keys(['prompt_embeds', 'pooled_prompt_embeds'])
    # torch.Size([1, 1241, 4096]) 
    # torch.Size([1, 768])
    # pdb.set_trace()
    will_save =  {
                'ori_prompt_embeds':ori_prompt_embeds,   # 1 512+729 4096 = 1 1241 4096
                'ori_pooled_prompt_embeds':ori_pooled_prompt_embeds, #  1 768
                'local_image_latents':local_image_latents,  # siglip image encoder for embeder torch.Size([1, 729, 1152])
                'local_pooled_prompt_embeds':local_pooled_prompt_embeds,  # 1 768

                # extra
                'ori_image_latents':ori_image_latents,
                'ori_img_pooled_prompt_embeds':ori_img_pooled_prompt_embeds,
                }
    torch.save(will_save,
               redux_save_path)

print()
print(f'{choose_type} process count : ',count)    