import torch,pdb,os
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from controlnet_aux import CannyDetector
from util_flux import pad_image
# from image_gen_aux import DepthPreprocessor

# pdb.set_trace()

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'

examples_dir = '/data/shengjie/style_zhenzhi/'
save_dir = '/data/shengjie/synthesis_zhenzhi/'

target_shape = (1024,1024)

imagefiles = os.listdir(examples_dir)

test_img = os.path.join(examples_dir,imagefiles[0])
test_img2 = os.path.join(examples_dir,imagefiles[1])
save_test_img = os.path.join(save_dir,
                            os.path.splitext(imagefiles[0])[0]+\
                                os.path.splitext(imagefiles[1])[0] +\
                                os.path.splitext(imagefiles[0])[1]
                            )
control_image = load_image(test_img)
image2 = load_image( test_img2 )
control_image,_,_,_,_ = pad_image(control_image)
image2,_,_,_,_ = pad_image(image2)
control_image = control_image.resize(target_shape)

processor = CannyDetector()
control_image_canny = processor(control_image, 
                          low_threshold=50, high_threshold=200, 
                          detect_resolution=1024, image_resolution=1024)
image2_canny = processor(image2, 
                          low_threshold=50, high_threshold=200, 
                          detect_resolution=1024, image_resolution=1024)

pdb.set_trace()


target_shape = (1024,1024)
from lotus.app_infer_depth import get_depth_by_lotus
output_d,output_g = get_depth_by_lotus(test_img,0) # PIL*2    922*1050
output_d2,output_g2 = get_depth_by_lotus(test_img2,0) # PIL*2    922*1050

base_image,_,_,_,_ = pad_image(output_d2) # 
control_image,_,_,_,_ = pad_image(output_d) # 给花纹
base_image = base_image.resize(target_shape)
control_image = control_image.resize(target_shape)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=torch.bfloat16).to("cuda")
pipe_prior_output2 = pipe_prior_redux(base_image) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
del pipe_prior_redux
torch.cuda.empty_cache()

# pdb.set_trace()

# processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
# control_image = processor(control_image)[0].convert("RGB") # PIL 768 1024
# w,h = control_image.size
# del processor

pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, torch_dtype=torch.bfloat16).to("cuda")

# pdb.set_trace()

image = pipe(
    # prompt=prompt,
    control_image=control_image,
    height=target_shape[1],
    width=target_shape[0],
    num_inference_steps=8,
    guidance_scale=10.0,
    # generator=torch.Generator().manual_seed(42),
    **pipe_prior_output2,
).images[0]

from util_flux import horizontal_concat_images
concat_img = horizontal_concat_images([base_image,control_image,image])
concat_img.save('/tmp.png')
pdb.set_trace()

image.save(save_test_img)

# pipe = FluxControlPipeline.from_pretrained(FLUX, torch_dtype=torch.bfloat16).to("cuda")
# pipe.load_lora_weights("black-forest-labs/FLUX.1-Depth-dev-lora", adapter_name="depth")
# pipe.set_adapters("depth", 0.85)

# prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
# control_image = load_image("https://hf-mirror.com/datasets/huggingface/documentation-images/resolve/main/robot.png")

# processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
# control_image = processor(control_image)[0].convert("RGB")

# image = pipe(
#     prompt=prompt,
#     control_image=control_image,
#     height=1024,
#     width=1024,
#     num_inference_steps=30,
#     guidance_scale=10.0,
#     generator=torch.Generator().manual_seed(42),
# ).images[0]
# image.save("output.png")
