'''
1, prompt = [Erase the Depth Content]
2, depth img

3, lora path -> load to kontext

4. kontext(prompt , depth img)
'''
import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

import os
os.environ['CUDA_VISIBLE_DEVICES'] = '7'


from utils.util_for_os import ose,osj

data_path = 'data_restore_depth_150.json'
lora_dir = '/mnt/nas/shengjie/depth_restore_output_0829/'

get_loar_path = lambda ckp_id : osj( lora_dir , f'checkpoint-{ckp_id}00' , 'pytorch_lora_weights.safetensors' )
''' format
{
  "data": [
    {
      "captions": "[Erase the Depth Content]",
      "img_path": "/mnt/nas/shengjie/datasets/dataset_depth_control_depth/1756281228634.jpg"
    },...
'''
import torch
from diffusers import FluxKontextPipeline
from diffusers import FluxControlPipeline, FluxPriorReduxPipeline

import gc

from utils.MODEL_CKP import FLUX_KONTEXT,FLUX_REDUX
from utils.util_flux import process_img_1024,horizontal_concat_images

pipe = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                           torch_dtype=torch.bfloat16)
pipe.to("cuda")
text_encoding_pipeline = FluxControlPipeline.from_pretrained(
    FLUX_KONTEXT, transformer=None, vae=None, torch_dtype=torch.bfloat16
).to("cuda")

import json
import pdb
from PIL import Image

with open(data_path, encoding='utf-8') as f:
    data = json.load(f)

# for ckp_id in range(3,11):
ckp_id = input('input ckp id:')
if ckp_id in list(map(str,range(1,11))):
    # clean pipe cache
    torch.cuda.empty_cache()
    gc.collect()

    print(f'loading lora {ckp_id}')
    lora_path = get_loar_path(ckp_id)
    pipe.load_lora_weights(lora_path)
    
    torch.cuda.empty_cache()
    gc.collect()

    for item in data['data']:
        captions = item['captions']
        img_path = item['img_path']
        # You can process captions and img_path as needed here

        concat_img = Image.open(img_path)
        # split concat img to two parts
        w,h = concat_img.size
        depth_img = concat_img.crop((0,0,1024,1024))
        restored_img = concat_img.crop((w-1024,0,w,h))
        depth_img = process_img_1024( '' , img_pil= depth_img , pad_color=(0,0,0))

        with torch.no_grad():
            prompt_embeds, pooled_prompt_embeds, text_ids = text_encoding_pipeline.encode_prompt(
                captions, prompt_2=None
            )
            image = pipe(
                prompt_embeds=prompt_embeds,
                pooled_prompt_embeds=pooled_prompt_embeds,
                image=depth_img,
                height=depth_img.height,
                width=depth_img.width,
                num_inference_steps=10,
                guidance_scale=4.5,
            ).images[0]
        
        horizontal_concat_images([depth_img,restored_img,image]).save(f'tmp_restore_depth.jpg')
        pdb.set_trace()
        # break