'''
{
    caption : {
        prompt..:
        pooled_prompt_..:
        textid_...:
    },
    data : [
        {
            img1: ...
            img1_depth: ...
            img2 : ...
        }
    ]
}
'''
import os,pdb,shutil,json
from os.path import exists as ope
from os.path import join as osj
from PIL import Image,ImageDraw
from util_flux import process_img_1024
from tqdm import tqdm
# from caption_generate_from_img_by_joycaption import generate_caption_from_img
# from flux_extract_from_fill import generate_emb_from_caption,save_emb
def horizontal_concat_images(images):
    """
    横向拼接多张图片

    Args:
        images: 图片列表 (PIL.Image 对象列表)
    Returns:
        拼接后的新图片 (PIL.Image 对象)
    """
    # 计算总宽度 (所有图片宽度之和 + 间距)
    widths, heights = zip(*(img.size for img in images))
    total_width = sum(widths)
    max_height = max(heights)
    
    # 创建新图片 (白色背景)
    new_img = Image.new('RGB', (total_width, max_height), 
                        color='white')
    
    # 粘贴图片并绘制分隔线
    x_offset = 0
    for i, img in enumerate(images):
        new_img.paste(img, (x_offset, 0))
        
        x_offset += img.width
    
    return new_img
PROMPT = 'prompt_embeds.pth'
POOL_PROMPT = 'pooled_prompt_embeds.pth'
TEXT_ID = 'text_ids.pth'
caption_dir = '/data/shengjie/style_zhenzhi_caption/'

save_path = 'local_data_for_concat.json'


will_save = {'data':[]}

types = ['collar','sleeve','pockets']

for choose_type in types:
    img_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced'
    depth_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced_depth'
    localimg_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_localimg'
    concat_save_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_concat'
    img_emb_dir = f'/mnt/nas/shengjie/datasets/cloth_{choose_type}_balanced_embed'
    assert ope(img_dir),img_dir
    assert ope(depth_dir),depth_dir
    assert ope(localimg_dir),localimg_dir
    assert ope(img_emb_dir),img_emb_dir

    if ope(concat_save_dir):shutil.rmtree(concat_save_dir)
    os.makedirs(concat_save_dir)

    for filename in  tqdm(os.listdir(img_dir)):
        if not filename.endswith('.jpg'):continue
        imgname = filename
        embname = filename.replace('.jpg','.pth')
        depthname = filename.replace('.jpg','.png')
        localimg_name = filename
        img_path = osj(img_dir,imgname)
        img_emb_path = osj(img_emb_dir,embname)
        depth_path = osj(depth_dir,depthname)
        localimg_path = osj(localimg_dir,localimg_name)
        assert ope(img_path),img_path
        assert ope(img_emb_path),img_emb_path
        assert ope(depth_path),depth_path
        assert ope(localimg_path),localimg_path

        img = process_img_1024(img_path)
        depth = process_img_1024(depth_path)
        local_img = process_img_1024(localimg_path)

        # 需要既包含 text prompt[fixed] 又包含 image prompt[img]
        # fixed_prompt = f"The pair of images highlights first clothing showing second image's texture with third clothing's local feature, high resolution, 4K, 8K; " \
        #     f"[IMAGE1] Synthesis clothing with second's texture and third's local feature." \
        #     f"[IMAGE2] Texture shot of a clothing." \
        #     f"[IMAGE3] Detailed local feature shot of a clothing."
        concat_img = horizontal_concat_images([img,depth,local_img])

        save_name = '_and_'.join([
                os.path.splitext(imgname)[0],
                os.path.splitext(depthname)[0],
                os.path.splitext(localimg_name)[0],
            ])+'.jpg'
        
        save_emb_name = save_name.replace('.jpg','.pth')
        concat_img_path = os.path.join(concat_save_dir,save_name)

        concat_img.save( concat_img_path )

        will_save['data'].append({
            'img_path':img_path,
            'depth_path':depth_path,
            'localimg_emb_path':localimg_path,
            'concat_img_path':concat_img_path,

            'prompt_embed_path':img_emb_path,
        })

import json

# pdb.set_trace()

with open(save_path,'w',encoding='utf-8') as f:
    json.dump(will_save , f,indent=4,
              ensure_ascii=False) 

