import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
from util_for_os import ose,osj
# 先构建 {t} 的固定 prompt
'''
caption:{
    'fullbody':.....bin   => {prompt_emb:.. , pooled_prompt_emb:... }
    'lower':....bin
    'upper':...bin
}
data:{
    {
        t : caption[t],
        clo_path : '',
        human_path : '', 
    }
}
'''
types = ['upper','lower','fullbody']

# get_fixed_prompt_for_clip = lambda t : f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest.'
get_fixed_prompt_for_clip = lambda t : f'{t}'
# get_fixed_prompt_for_t5 = lambda t :  f'An image of a garment. Focus on the {t} garment the human wearing, and isolate it from the rest. A high-quality product photo of the {t} garment on a white background, centered, no model, studio lighting.'
get_fixed_prompt_for_t5 = lambda t :  f'{t}'

# get_fixed_prompt_for_clip =  lambda t : f'An in-shop display of a human {t}.'
# get_fixed_prompt_for_t5 = lambda t : f'[Image1] shows the {t} as in a product display. [Image2] shows a person wearing the {t} from Image1'

from MODEL_CKP import FLUX_FILL
from diffusers import FluxFillPipeline
import torch,shutil,pdb
pipe = FluxFillPipeline.from_pretrained(
                                    FLUX_FILL, 
                                    torch_dtype=torch.bfloat16).to("cuda")


save_prompt_dir = '/mnt/nas/shengjie/datasets/caption_for_tryoff_simple'
if ose(save_prompt_dir):shutil.rmtree(save_prompt_dir)
os.makedirs(save_prompt_dir)

caption = {}
for t in types:
    fixed_prompt_for_clip = get_fixed_prompt_for_clip(t)
    fixed_prompt_for_t5 = get_fixed_prompt_for_t5(t)


    # pdb.set_trace()
    with torch.no_grad():
        prompt_embeds, pooled_prompt_embeds,text_ids = \
                        pipe.encode_prompt(
                            prompt=fixed_prompt_for_clip,
                            prompt_2=fixed_prompt_for_t5,
                        )
    

    # # weighted sum
    prompt_embeds = torch.sum(prompt_embeds, dim=0, keepdim=True)
    pooled_prompt_embeds = torch.sum(pooled_prompt_embeds, dim=0, keepdim=True)

    save_path = osj(save_prompt_dir,f'prompt_{t}.bin')

    torch.save({
        'prompt_embeds':prompt_embeds,
        'pooled_prompt_embeds':pooled_prompt_embeds,
        'text_ids':text_ids,
    }, save_path  )

    caption[t] = save_path

    # pdb.set_trace()
    print('save to ',save_path)
# exit(0)
# pdb.set_trace()

save_json = 'tryoff_data_simple.json'
import json
########### 数据集 ########
datasets_map = {
    'dc':{
        'ori_dir':'/mnt/nas/shengjie/datasets/DressCode_1024',
        'types':[types[0],types[1],types[2],],
        'clo_dir':['upper/cloth','lower/cloth','dresses/cloth'],
        'human_dir':['upper/image','lower/image','dresses/image'],
        'get_human_name_by_clo':lambda clo:clo.replace('_1','_0'),
    },
    'viton':{
        'ori_dir':'/mnt/nas/shengjie/datasets/VITON-HD_ori',
        'types':[types[0],],
        # 'types':[types[0],types[0],],
        'clo_dir':['train/cloth'],
        # 'clo_dir':['test/cloth','train/cloth'],
        'human_dir':['train/image'],
        # 'human_dir':['test/image','train/image'],
        'get_human_name_by_clo':lambda clo:clo,
    },
    
}
will_save = {
    'data':[
        # {
        # 'emb_path':'',
        # 'clo_path':'',
        # 'human_path':'',
        # }
    ]
}

# 统计每个类别文件数量
from collections import defaultdict
record = defaultdict(int)

for dataset,detail_item in datasets_map.items():
    print(dataset)
    ori_dir = detail_item['ori_dir']
    detail_types = detail_item['types']
    clo_dir = detail_item['clo_dir']
    human_dir = detail_item['human_dir']

    get_human_name_by_clo = detail_item['get_human_name_by_clo']
    for t,c_dir,h_dir in zip(detail_types, clo_dir , human_dir):

        print('\t',c_dir,'\t',h_dir)
        emb_path = caption[t]

        detail_c_dir = osj( ori_dir , c_dir )
        detail_h_dir = osj( ori_dir , h_dir )
        
        count = 0
        for entry in os.scandir(detail_c_dir):
            if not entry.is_file():continue
            filename = entry.name
            # pdb.set_trace()
            if not filename.endswith('.jpg'):continue

            count+=1
            print('\rprocess idx: ',count,end='',flush=True)

            human_name = get_human_name_by_clo(filename)
            clo_path = osj( detail_c_dir , filename )
            human_path = osj( detail_h_dir , human_name )

            assert ose(emb_path),emb_path
            assert ose(clo_path),clo_path
            assert ose(human_path),human_path

            will_save['data'].append({
                'type':t,
                'emb_path':emb_path,
                'clo_path':clo_path,
                'human_path':human_path,
            })
            # break
        print(f'\tprocess_num: {count}')
        
        record[t]+=count

# 统计每个类别文件数量
# from collections import defaultdict
# record = defaultdict(int)
# with open(save_json) as f:
#     data = json.load(f)
# for d in data['data']:
#     emb_path = d['emb_path']
#     t = os.path.splitext(os.path.basename(emb_path))[0].split('_')[1]
#     d['type']=t
#     record[t]+=1

# # 额外做一步，均衡数据量
# tar_count = min( record.values() )
# record = defaultdict(int)
# for item in will_save['data']:
#     record[item['type']] += 1
#     if record[item['type']] > tar_count:
#         will_save['data'].remove( item )
#         record[item['type']] -= 1

# pdb.set_trace()
with open(save_json,'w',encoding='utf-8') as f:
    json.dump( will_save , f , ensure_ascii=False,indent=4 )