import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import util_for_huggingface
import torch,pdb
# from diffusers import  FluxPipeline
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline,ReduxImageEncoder
from diffusers.utils import load_image
from util_flux import horizontal_concat_images,process_img_1024
from PIL import Image
from image_gen_aux import DepthPreprocessor

'''
20250707 
 测试 train_flux_depthredux.py 的训练结果
 所需数据 :
    1. controlimg
    2. redux(local img)
'''

FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'
DEPTH_PREDCITION='/home/shengjie/ckp/depth-anything-large-hf'
FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_DEPTH='/home/shengjie/ckp/FLUX.1-Depth-dev'
FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
FLUX='/data/models/FLUX___1-dev'

##############  clip 获得 propmt_embeds ##########
from util_for_clip import get_clip_model

## clip for prompt_embeds
clip_processor,clip_model,maxtokens = get_clip_model()

# examples_dir = '/data/shengjie/style_zhenzhi/'
# save_dir = '/data/shengjie/synthesis_zhenzhi/'
local_img_dir = '/mnt/nas/shengjie/datasets/cloth_collar_localimg'
depth_img_dir = '/mnt/nas/shengjie/datasets/cloth_collar_balanced'

dtype = torch.bfloat16
device = 'cuda'

ckp_id = 1000
get_embeder_path = lambda ckp_id : f'/mnt/nas/shengjie/depth_local_output/checkpoint-{ckp_id}/image_encoder.bin'
get_lora_path = lambda ckp_id : f'/mnt/nas/shengjie/depth_local_output/checkpoint-{ckp_id}/pytorch_lora_weights.safetensors'
img_embder_ckp_path = get_embeder_path(ckp_id)
lora_path = get_lora_path(ckp_id)

imagefiles = os.listdir(local_img_dir)

local_img_path = os.path.join(local_img_dir,imagefiles[0])
depth_img_path = os.path.join(depth_img_dir,imagefiles[1])
# save_test_img = os.path.join(save_dir,
#                             os.path.splitext(imagefiles[0])[0]+\
#                                 os.path.splitext(imagefiles[1])[0] +\
#                                 os.path.splitext(imagefiles[0])[1]
#                             )
# prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
local_img = process_img_1024(load_image(local_img_path))
depth_img = process_img_1024(load_image( depth_img_path ))

## local pockets/sleeve/collar
# local_img_path = ''
# local_img = process_img_1024(local_img_path)

pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
                                    FLUX_REDUX, 
                                    torch_dtype=dtype).to(device)
# pipe_prior_output2 = pipe_prior_redux(image2) # attr 'prompt_embeds' torch.Size([1, 1241, 4096]) 
image_latents = pipe_prior_redux.encode_image( local_img , pipe_prior_redux.device , 1  )
del pipe_prior_redux
torch.cuda.empty_cache()

target_shape = (1024,1024)
processor = DepthPreprocessor.from_pretrained(DEPTH_PREDCITION)
control_image = processor(depth_img)[0].convert("RGB")
# from lotus.app_infer_depth import get_depth_by_lotus
# output_d,output_g = get_depth_by_lotus(depth_img,0) # PIL*2    922*1050
# control_image = process_img_1024(output_g)


torch.cuda.empty_cache()

assert os.path.exists(lora_path),lora_path
pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, torch_dtype=dtype).to(device)

image_feature = clip_processor(images=local_img)['pixel_values'][0]
image_feature = torch.tensor(image_feature).unsqueeze(0).cuda()

pooled_prompt_embeds = clip_model.get_image_features(pixel_values=image_feature) # 1 768

pooled_prompt_embeds = pooled_prompt_embeds
pooled_prompt_embeds = pooled_prompt_embeds.to(device,dtype=dtype)

img_embder = ReduxImageEncoder()




while True:
    try:
        ckp_id = int(input('input ckp id:'))
        img_embder_state_dict = torch.load(img_embder_ckp_path)
        img_embder.load_state_dict(img_embder_state_dict)
        img_embder = img_embder.to(device,dtype=dtype)

        # pdb.set_trace()

        img_embder_ckp_path = get_embeder_path(ckp_id)
        lora_path = get_lora_path(ckp_id)

        # lora pipe
        pipe.load_lora_weights(lora_path)
        # pdb.set_trace()

        ## embeder

        torch.cuda.empty_cache()

        image_embeds = img_embder(image_latents).image_embeds.to(device,dtype=dtype)
        # 拼接一个 512 的 zeros在前面
        prompt_embeds = torch.zeros((1, 512, 4096)).to(device,dtype=dtype)
        prompt_embeds = torch.cat([prompt_embeds, image_embeds], dim=1)
        # pdb.set_trace()

        # text_ids = torch.zeros(prompt_embeds.shape[1], 3)
        # text_ids = text_ids.to(device,dtype=dtype)


        image = pipe(
            # prompt=prompt,
            control_image=control_image,
            height=target_shape[1],
            width=target_shape[0],
            num_inference_steps=20,
            guidance_scale=10.0,
            generator=torch.Generator().manual_seed(42),
            # **pipe_prior_output2,
            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
        ).images[0]

        concat_res = horizontal_concat_images([local_img,control_image,image])
        concat_res.save('tmp.jpg')
    except Exception as e:
        print(e)
        break


# pdb.set_trace()
# image.save(image)