import torch,pdb,os
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
# from controlnet_aux import CannyDetector
from util_flux import pad_image
from image_gen_aux import DepthPreprocessor

# pdb.set_trace()

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX='/data/models/FLUX___1-dev'

DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'

examples_dir = '/data/shengjie/style_zhenzhi/'
save_dir = '/data/shengjie/synthesis_zhenzhi/'

target_shape = (1024,1024)

imagefiles = os.listdir(examples_dir)

test_img = os.path.join(examples_dir,imagefiles[0])
test_img2 = os.path.join(examples_dir,imagefiles[1])
save_test_img = os.path.join(save_dir,
                            os.path.splitext(imagefiles[0])[0]+\
                                os.path.splitext(imagefiles[1])[0] +\
                                os.path.splitext(imagefiles[0])[1]
                            )
control_image = load_image(test_img)
image2 = load_image( test_img2 )
control_image,_,_,_,_ = pad_image(control_image)
image2,_,_,_,_ = pad_image(image2)
control_image = control_image.resize(target_shape)
image2 = image2.resize(target_shape)

# 分割 成1/4个 并 resize 到 1024 
# depth 分别处理后  
# 拼接为 一整个，并整个 重新resize到 1024

croped_img = [ control_image.crop((x,y,x+control_image.size[0]//2,y+control_image.size[1]//2)).resize(target_shape)
              for x  in [0,control_image.size[0]//2] 
              for y in [0,control_image.size[1]//2]]

# pdb.set_trace()

processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)

croped_depth = [
    processor(c_i)[0].convert("RGB")
    for c_i in croped_img
]
pdb.set_trace()

control_image = processor(control_image)[0].convert("RGB") # PIL 768 1024

pdb.set_trace()
