import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch,pdb
# from diffusers import  FluxFillPipeline,FluxPipeline
from pipeline_flux import FluxPipeline
from diffusers.utils import load_image
from util_flux import horizontal_concat_images
from PIL import Image

FLUX_FILL='/home/shengjie/ckp/FLUX.1-Fill-dev'

FLUX_REDUX='/home/shengjie/ckp/FLUX.1-Redux-dev'
FLUX_ADAPTER = '/home/shengjie/ckp/flux-ip-adapter-v2'
FLUX_ADAPTER_ENCODER = '/home/shengjie/ckp/clip-vit-large-patch14'
FLUX='/data/models/FLUX___1-dev'

FLUX_LORA_DIR = '/mnt/nas/shengjie/zhenzhi_output/'
FLUX_LORA_PATHS = list(filter(lambda d :'checkpoint' in d,
                        os.listdir(FLUX_LORA_DIR)))
tmp_lora_path = os.path.join(FLUX_LORA_DIR , 
                             FLUX_LORA_PATHS[0] , 
                             'pytorch_lora_weights.safetensors')
assert os.path.exists(tmp_lora_path)

jsonl_for_train = 'zhenzhi_data.json'
guidance_scale = 30.0
steps = 8
weight_dtype = torch.bfloat16

from caption_generate_from_img_by_joycaption import generate_caption_from_img
import caption_generate_from_img_by_joycaption
texture_path = '/data/shengjie/style_zhenzhi/img_3.jpg'
shape_path = '/data/shengjie/style_zhenzhi/img_2.jpg'
texture_prompt = generate_caption_from_img(texture_path)
shape_prompt = generate_caption_from_img(shape_path)
caption = (f"This is a composite image showing:" 
        f"[IMAGE1] A complete character design with [IMAGE2]'s texture style and [IMAGE3]'s clothing pattern." 
        f"[IMAGE2] Close-up texture reference showing '{texture_prompt}'."
        f"[IMAGE3] Garment reference featuring '{shape_prompt}'.")
## 删除 joy caption 
if hasattr(caption_generate_from_img_by_joycaption,'llava_model'):
    del caption_generate_from_img_by_joycaption.llava_model
torch.cuda.empty_cache()

# pdb.set_trace()

# from flux_extract_from_fill import generate_emb_from_caption
# import flux_extract_from_fill
# prompt_embeds, pooled_prompt_embeds, text_ids = generate_emb_from_caption(caption)
# torch.Size([1, 512, 4096])  torch.Size([1, 768])
## 删除 Flux_fill
# if hasattr(flux_extract_from_fill,'pipe'):
#     del flux_extract_from_fill.pipe
# torch.cuda.empty_cache()

# pdb.set_trace()

# pdb.set_trace()
# pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
#                                     FLUX_REDUX, 
#                                     torch_dtype=torch.bfloat16).to("cuda")
pipe = FluxPipeline.from_pretrained(
    FLUX , 
    # text_encoder=None,
    # text_encoder_2=None,
    torch_dtype=weight_dtype
).to("cuda")
with torch.no_grad():
    prompt_embeds, pooled_prompt_embeds, text_ids = pipe.encode_prompt(
        prompt=[caption.split('.')[0]],
        prompt_2=[caption],
    )
# pdb.set_trace()
# pipe = FluxFillPipeline.from_pretrained(FLUX_FILL, 
#                                         torch_dtype=torch.bfloat16).to("cuda")

# pipe.load_lora_weights(tmp_lora_path)

# import json
# with open(jsonl_for_train) as f:
#     data = json.load(f)
# pdb.set_trace()
# emb_path = data['data'][0]['concat_emb']
# emb_ckp = torch.load(emb_path,map_location=pipe.device)
# prompt_embeds = emb_ckp['prompt_embeds']
# pooled_prompt_embeds = emb_ckp['pooled_prompt_embeds']
# prompt_embeds_path = data['caption']['prompt_embeds']   
# pooled_prompt_embeds_path = data['caption']['pooled_prompt_embeds']   
# text_ids_path = data['caption']['text_ids']   
# del data
# prompt_embeds = torch.load(prompt_embeds_path, 
#                             map_location=pipe.device)
# pooled_prompt_embeds = torch.load(pooled_prompt_embeds_path, 
#                                     map_location=pipe.device)

# pdb.set_trace()

def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype):
    # pdb.set_trace()
    pixel_latents = vae.encode(pixels.to(device=vae.device,
                                        dtype=vae.dtype))\
        .latent_dist.sample()
    pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor
    return pixel_latents.to(weight_dtype)

# depth_path = data['data'][0]['img1_depth']
# texture_path = data['data'][0]['img2_texture']
# from PIL import Image
# depth_pil = Image.open(depth_path).convert('RGB')
# texture_pil = Image.open(texture_path).convert('RGB')
target_shape = (1024*3,1024)
# depth_img = pipe.image_processor.preprocess(depth_pil, # 1 3 tar_shape
#                            height = target_shape[1],
#                            width = target_shape[0]
#                            ).to(weight_dtype)
# texture_img = pipe.image_processor.preprocess(texture_pil,
#                            height = target_shape[1],
#                            width = target_shape[0]
#                            ).to(weight_dtype)
# pdb.set_trace()
# depth_latents = encode_images(
#                     depth_img.squeeze(1), 
#                     pipe.vae, weight_dtype
#                 )
# texture_latents = encode_images(  # 1 16 48 48 
#                     texture_img.squeeze(1), 
#                     pipe.vae, weight_dtype
#                 )
# pdb.set_trace()
# packed_depth_latents = pipe._pack_latents(depth_latents,
#                     batch_size=1,
#                     num_channels_latents=depth_latents.shape[1],
#                     height=depth_latents.shape[2],
#                     width=depth_latents.shape[3],
#                 )
# packed_texture_latents = pipe._pack_latents(texture_latents,
#                     batch_size=1,
#                     num_channels_latents=texture_latents.shape[1],
#                     height=texture_latents.shape[2],
#                     width=texture_latents.shape[3],
#                 )
'''
packed
torch.Size([1, 576, 64])
'''
# pdb.set_trace()
def test_one(ckp_id=10000):
    lora_path = [p for p in FLUX_LORA_PATHS if str(ckp_id) in p][0]
    # for i in range(len(FLUX_LORA_PATHS)):
        # print(f'generate {FLUX_LORA_PATHS[i]}')
    # if str(ckp_id) not in FLUX_LORA_PATHS[i]:continue
    lora_path =  os.path.join(FLUX_LORA_DIR , 
                    lora_path , 
                    'pytorch_lora_weights.safetensors')
    # if not os.path.exists(lora_path):continue
    assert os.path.exists(lora_path)

    pipe.load_lora_weights(lora_path)
    torch.cuda.empty_cache()

    image = pipe(
        height=target_shape[1],
        width=target_shape[0],
        prompt_embeds=prompt_embeds,
        pooled_prompt_embeds=pooled_prompt_embeds,
        guidance_scale=guidance_scale,
        num_inference_steps = steps,
        # depth_condition=packed_depth_latents,
        # texture_condition=packed_texture_latents,
    ).images[0]

    image.save('tmp.jpg')

    pdb.set_trace()

    concat_img = horizontal_concat_images([image,Image.open(texture_path),Image.open(shape_path)])
    concat_img.save('tmp.jpg')
def test_all():
    for i in range(len(FLUX_LORA_PATHS)):
        lora_path =  os.path.join(FLUX_LORA_DIR , 
                        FLUX_LORA_PATHS[i] , 
                        'pytorch_lora_weights.safetensors')
        if not os.path.exists(lora_path):continue

        pipe.load_lora_weights(lora_path)
        torch.cuda.empty_cache()

        image = pipe(
            height=target_shape[1],
            width=target_shape[0],
            prompt_embeds=prompt_embeds,
            pooled_prompt_embeds=pooled_prompt_embeds,
            guidance_scale=guidance_scale,
            num_inference_steps = steps,
            # depth_condition=packed_depth_latents,
            # texture_condition=packed_texture_latents,
        ).images[0]

        print(f'generate {FLUX_LORA_PATHS[i]}')
        image.save('tmp.jpg')

        pdb.set_trace()

# pdb.set_trace()
if __name__=='__main__':
    test_one(ckp_id=10000)