'''
{
    caption : {
        prompt..:
        pooled_prompt_..:
        textid_...:
    },
    data : [
        {
            img1: ...
            img1_depth: ...
            img2 : ...
        }
    ]
}
'''
import os,pdb,shutil,json
from PIL import Image,ImageDraw
# from caption_generate_from_img_by_joycaption import generate_caption_from_img
# from flux_extract_from_fill import generate_emb_from_caption,save_emb
def horizontal_concat_images(images):
    """
    横向拼接多张图片

    Args:
        images: 图片列表 (PIL.Image 对象列表)
    Returns:
        拼接后的新图片 (PIL.Image 对象)
    """
    # 计算总宽度 (所有图片宽度之和 + 间距)
    widths, heights = zip(*(img.size for img in images))
    total_width = sum(widths)
    max_height = max(heights)
    
    # 创建新图片 (白色背景)
    new_img = Image.new('RGB', (total_width, max_height), 
                        color='white')
    
    # 粘贴图片并绘制分隔线
    x_offset = 0
    for i, img in enumerate(images):
        new_img.paste(img, (x_offset, 0))
        
        x_offset += img.width
    
    return new_img
PROMPT = 'prompt_embeds.pth'
POOL_PROMPT = 'pooled_prompt_embeds.pth'
TEXT_ID = 'text_ids.pth'
caption_dir = '/data/shengjie/style_zhenzhi_caption/'

types = ['niukou','niukou-pockets','yinhua']
choose_type = types[2]
index = len(choose_type.split('-'))+1

img_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-ori/'
depth_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-depth/'
shape_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-shape/'

image_files = os.listdir(img_dir)

concat_save_dir = f'/mnt/nas/shengjie/datasets_zhenzhi/zhenzhi-{choose_type}-concat'
# emb_save_dir = '/data/shengjie/concat_zhenzhi_emb/'
if os.path.exists(concat_save_dir):
    shutil.rmtree(concat_save_dir)
# if os.path.exists(emb_save_dir):
#     shutil.rmtree(emb_save_dir)
os.makedirs(concat_save_dir)
# os.makedirs(emb_save_dir)

import json
save_path = f'zhenzhi_{choose_type}_data.json'

dic = dict()
dic['caption'] = dict()
dic['caption']['prompt_embeds'] = os.path.join(caption_dir,PROMPT)
dic['caption']['pooled_prompt_embeds'] = os.path.join(caption_dir,POOL_PROMPT)
dic['caption']['text_ids'] = os.path.join(caption_dir,TEXT_ID)

dic['data'] = []
data_template = {
    'img1':None,
    'img1_depth':None,
    'img2_texture':None,
    'shape_path':None,
    'concat_img':None,
    'concat_caption':None,
    'concat_emb':None,
}

# 筛选出 所有  img_x 开头的 文件
# product()
from itertools import product
from tqdm import tqdm
from copy import deepcopy
image_files = sorted(image_files)
ori_files = [f for f in image_files if len(f.split('-'))==2+len(choose_type.split('-'))]
shape_files = os.listdir(shape_dir)

# pdb.set_trace()

total_save_num_list = []
for ori_f in ori_files:
    sifted_file_with_same_prefix = [
        f
        for f in image_files if f.startswith(
            os.path.splitext(ori_f)[0]
        )
    ]

    # pdb.set_trace()

    for t1,t2 in tqdm(product(sifted_file_with_same_prefix,
                              sifted_file_with_same_prefix)):
        if t1 == t2:continue
        t1_path = os.path.join(img_dir,t1)
        t2_path = os.path.join(depth_dir,t2)
        # t1_depth = os.path.join(
        #     depth_dir,
        #     t1.replace('.jpg','.png')
        # )
        # 筛选 t1_xxx 所有的文件
        t1_shape_files = [f  for f in shape_files 
                            if f.split('_')[0]==os.path.splitext(t1)[0]]
        
        print('shape nums : ',len(t1_shape_files))
        total_save_num_list.append(len(t1_shape_files))
        # pdb.set_trace()
        if len(t1_shape_files)==0:continue

        for shape_f in t1_shape_files:
            shape_path = os.path.join(shape_dir,shape_f)

            assert os.path.exists(t1_path),'img1 path 不存在'
            # assert os.path.exists(t1_depth),'img1 depth path 不存在'
            assert os.path.exists(t2_path),'img2 path 不存在'
            assert os.path.exists(shape_path),'shape path 不存在'

            t1_pil = Image.open(t1_path)
            t2_texture = Image.open(t2_path)
            t1_shape = Image.open(shape_path)

            concat_img = horizontal_concat_images([t1_pil,t2_texture,t1_shape])
            
            save_name = '_and_'.join([
                os.path.splitext(t1)[0],
                os.path.splitext(t2)[0],
                os.path.splitext(shape_f)[0],
            ])+'.jpg'
            save_emb_name = save_name.replace('.jpg','.pth')

            concat_img_path = os.path.join(concat_save_dir,save_name)
            # concat_emb_path = os.path.join(emb_save_dir,save_emb_name)

            concat_img.save( concat_img_path )
            # assert save_emb(
            #     *(generate_emb_from_caption(caption)),
            #                 concat_emb_path )

            data_template['img1'] = t1_path
            # data_template['img1_depth'] = t1_depth
            data_template['img2_texture'] = t2_path
            data_template['shape_path'] = shape_path
            data_template['concat_img'] = concat_img_path
            # data_template['concat_caption'] = caption
            # data_template['concat_emb'] = concat_emb_path

            # pdb.set_trace()
            dic['data'].append(
                deepcopy(data_template)
            )

print('save total num : ',sum(total_save_num_list))


# pdb.set_trace()

with open(save_path,'w',encoding='utf-8') as f:
    json.dump(dic , f,indent=4,
              ensure_ascii=False) 

