
import os
os.environ['CUDA_VISIBLE_DEVICES']='3'


'''
使用固定的prompt
利用kontext取出image中的clothing
'''

from diffusers import FluxKontextPipeline

import torch,pdb
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images
from util_flux import resize_with_aspect
from PIL import Image
# from image_gen_aux import DepthPreprocessor
from controlnet_aux import CannyDetector
from itertools import product
from util_for_os import osj,ose

from MODEL_CKP import FLUX_KONTEXT,FLUX_REDUX

pipe = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                           torch_dtype=torch.bfloat16)
pipe.to("cuda")


types = ['upper','lower','dresses']
# choose_type = types[0]
examples_dir = lambda t: f'/mnt/nas/shengjie/datasets/DressCode_1024/{t}/image'
# other_dir = './redux_other'

prompt = lambda t : f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest.'
prompt2 = lambda t :  f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest. A high-quality product photo of the {t} garment on a white background, centered, no model, studio lighting.'

# pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
#                                     FLUX_REDUX, 
#                                     torch_dtype=torch.bfloat16).to("cuda")
# from lotus.app_infer_depth import load_pipe_g,get_depth_by_lotus_g

# input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")


# 各取五张 然后交叉 product 生成
num = 2

# 同类
for t_id in range(len(types)):
    clo_list1 = []
    t1 = types[t_id] 
    # t1 , t2 = types[t_id] , types[ ((t_id)+1)%len(types) ]

    dir1 = examples_dir(t1) 

    for entry in os.scandir(dir1):
        filename = entry.name
        if not filename.endswith('.jpg'):continue
        # break
        if len(clo_list1)!=num:
            clo_list1.append(
                osj( dir1 , filename )
            )
        else:break

    for c1 in clo_list1:
        control_image_path = c1

        control_image = process_img_1024( control_image_path )

        with torch.no_grad():
            image = pipe(
                prompt=prompt(t1),
                prompt_2=prompt2(t1),
                image=control_image,
                height=control_image.height,
                width=control_image.width,
                num_inference_steps=20,
                guidance_scale=2.5,
                generator=torch.Generator().manual_seed(42),
                # **pipe_prior_output,
            ).images[0]

        horizontal_concat_images( [ control_image , image ] ).save('tmp.jpg')

