
import os
os.environ['CUDA_VISIBLE_DEVICES']='4'

'''
使用固定的prompt
利用kontext取出image中的clothing

加载lora
'''

from diffusers import FluxKontextPipeline

import torch,pdb,gc
from util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images
from PIL import Image
# from image_gen_aux import DepthPreprocessor
from itertools import product
from util_for_os import osj,ose

from MODEL_CKP import FLUX_KONTEXT,FLUX_REDUX


dtype = torch.bfloat16
device = 'cuda'

# ckp_id = 12 # 28k
get_lora_path = lambda ckp_id : f'/mnt/nas/shengjie/tryoff_output_upper0822/checkpoint-{ckp_id}000/pytorch_lora_weights.safetensors'
save_dir = './tmp_tryoff0825'
os.makedirs(save_dir,exist_ok=True)
# pdb.set_trace()
for ckp_id in range(2,100,2):
    # 清空 PyTorch 的 CUDA 缓存
    torch.cuda.empty_cache()
    
    # 强制 Python 垃圾回收（释放被遗忘的引用）
    gc.collect()

    # 显式删除旧模型（关键！）
    if 'pipe' in locals():
        del pipe

    # 清空 PyTorch 的 CUDA 缓存
    torch.cuda.empty_cache()
    
    # 强制 Python 垃圾回收（释放被遗忘的引用）
    gc.collect()

    lora_path = get_lora_path(ckp_id)
    if not os.path.exists(lora_path):break

    pipe = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                            torch_dtype=torch.bfloat16).to("cuda")
    pipe.load_lora_weights(lora_path)


    types = ['upper','lower','dresses']
    # types = ['upper']
    categorys = ['upper','lower','fullbody']
    categorys = ['upper']
    options = ' '.join( [ f'{idx}:{v}'  for idx,v in enumerate(categorys) ] )
    # choose_type = types[0]
    examples_dir = lambda t: f'/mnt/nas/shengjie/datasets/DressCode_1024/{t}/image'
    # other_dir = './redux_other'

    # prompt = lambda t : f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest.'
    prompt = lambda t : f'{t}'
    # prompt2 = lambda t :  f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest. A high-quality product photo of the {t} garment on a white background, centered, no model, studio lighting.'
    prompt2 = lambda t :  f'{t}'


    # 各取 num 张 然后交叉 product 生成
    num = 2

    # 同类
    for t_id in range(len(types)):
        clo_list1 = []
        t1 = types[t_id] 
        # t1 , t2 = types[t_id] , types[ ((t_id)+1)%len(types) ]

        dir1 = examples_dir(t1) 

        for entry in os.scandir(dir1):
            filename = entry.name
            if not filename.endswith('.jpg'):continue
            # break
            if len(clo_list1)!=num:
                clo_list1.append(
                    osj( dir1 , filename )
                )
            else:break

        res_imgs = [
            [ process_img_1024(c1) ] for c1 in clo_list1
        ]
        for idx,c1 in enumerate(clo_list1):

            # cat_id = input(f'input { options }')
            # category = categorys[t_id]
            for category in  categorys :


                control_image_path = c1

                control_image = process_img_1024( control_image_path )

                with torch.no_grad():
                    image = pipe(
                        prompt=prompt(category),
                        prompt_2=prompt2(category),
                        image=control_image,
                        height=control_image.height,
                        width=control_image.width,
                        num_inference_steps=20,
                        guidance_scale=4.5,
                        generator=torch.Generator().manual_seed(20250718),
                        # **pipe_prior_output,
                    ).images[0]

                res_imgs[idx].append(image)

                horizontal_concat_images( [ control_image , image ] 
                                        ).save('tmp2.jpg')

        res_imgs_horizon = [
            horizontal_concat_images( ri ) 
            for ri in res_imgs
        ]
        vertical_concat_images( 
            res_imgs_horizon ).save( osj(save_dir  , f'tmp_tryoff_ckp{ckp_id}_{t1}.jpg' ) )