import torch,os,pdb
from diffusers import FluxPriorReduxPipeline, FluxPipeline
from diffusers.utils import load_image

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX='/data/models/FLUX___1-dev'

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=torch.bfloat16).to("cuda")
pipe = FluxPipeline.from_pretrained(
    FLUX , 
    text_encoder=None,
    text_encoder_2=None,
    torch_dtype=torch.bfloat16
).to("cuda")

examples_dir = '/data/shengjie/style1/'
save_dir = '/data/shengjie/synthesis1/'

imagefiles = os.listdir(examples_dir)

test_img = os.path.join(examples_dir,imagefiles[0])
test_img2 = os.path.join(examples_dir,imagefiles[1])


for s1 in [i/10 for i in range(1,11)]:
    s2 = 1-s1
    test_steps = 20

    # test_steps = 10
    save_test_img = os.path.join(save_dir,
                                os.path.splitext(imagefiles[0])[0]+f'_{s1:.1f}'+\
                                    os.path.splitext(imagefiles[1])[0]+f'_{s2:.1f}'\
                                        +f'_step={test_steps}'+\
                                    os.path.splitext(imagefiles[0])[1]
                                )
    # pdb.set_trace()

    image = load_image( test_img )
    image2 = load_image( test_img2 )
    pipe_prior_output = pipe_prior_redux(image) # attr: prompt_embeds torch.Size([1, 1241, 4096])
    pipe_prior_output2 = pipe_prior_redux(image2) # attr: prompt_embeds torch.Size([1, 1241, 4096])

    # pdb.set_trace()

    pipe_prior_output['prompt_embeds'] = s1 * pipe_prior_output['prompt_embeds'] + s2 * pipe_prior_output2['prompt_embeds']

    images = pipe(
        guidance_scale=2.5,
        num_inference_steps=test_steps,
        generator=torch.Generator("cpu").manual_seed(0),
        # ip_adapter_image=image2,
        **pipe_prior_output,
    ).images
    images[0].save(save_test_img)

# for test_steps in [10,15,20,30,40,50]:
#     # test_steps = 10
#     save_test_img = os.path.join(save_dir,
#                                 os.path.splitext(imagefiles[0])[0]+'_'+\
#                                     os.path.splitext(imagefiles[1])[0]\
#                                         +f'_step={test_steps}'+\
#                                     os.path.splitext(imagefiles[0])[1]
#                                 )
#     # pdb.set_trace()

#     image = load_image( test_img )
#     image2 = load_image( test_img2 )
#     pipe_prior_output = pipe_prior_redux(image) # attr: prompt_embeds torch.Size([1, 1241, 4096])
#     pipe_prior_output2 = pipe_prior_redux(image2) # attr: prompt_embeds torch.Size([1, 1241, 4096])

#     pdb.set_trace()

#     pipe_prior_output['prompt_embeds'] = 0.6 * pipe_prior_output['prompt_embeds'] + 0.4 * pipe_prior_output2['prompt_embeds']

#     images = pipe(
#         guidance_scale=2.5,
#         num_inference_steps=test_steps,
#         generator=torch.Generator("cpu").manual_seed(0),
#         # ip_adapter_image=image2,
#         **pipe_prior_output,
#     ).images
#     images[0].save(save_test_img)

#     break