import jsonlines
from torch import eig, le
from torchvision.transforms.functional import convert_image_dtype
from PIL import Image,ImageDraw


JSONL_PREFIX = '/mnt/afs/luotianhang/trainning_data/image2image/jsonl_data/'
JSONL_PREFIX2 = '/mnt/afs/luotianhang/jsonlines_file/'
JSONL_PREFIX_TEXT = '/mnt/afs/luotianhang/trainning_data/text2image/'

AOSS_PREFIX = 'cluster_lth_yun:s3://cabin-aigc/AIGC/'
# made_by_sdxl_inpanting
data_5_pet2_0 = dict(
    type='Image2ImageDataset',
    meta_file     =        JSONL_PREFIX +'made_by_sdxl_inpanting/pet/image2image_metadata2_0_fix_loc_addmask_add_vqa_loc_x1y1x2y2_fixed_blip_vqa.jsonl',
    raw_prefix    =       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_0/',
    mask_prefix   =       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_0_mask/',
    can_use_mask = False,
)
data_5_pet2_1 = dict(
    type='Image2ImageDataset',
    meta_file =       JSONL_PREFIX +'made_by_sdxl_inpanting/pet/image2image_metadata2_1_fix_loc_addmask_add_vqa_loc_x1y1x2y2_fixed_blip_vqa.jsonl',
    raw_prefix =      AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =   AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_1/',
    mask_prefix=      AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_1_mask/',
    can_use_mask = False,
)
data_5_pet2_2 = dict(
    type='Image2ImageDataset',
    meta_file =       JSONL_PREFIX +'made_by_sdxl_inpanting/pet/image2image_metadata2_2_fix_loc_addmask_add_vqa_loc_x1y1x2y2_fixed_blip_vqa.jsonl',
    raw_prefix =      AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =   AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_2/',
    mask_prefix=      AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_2_mask/',
    can_use_mask = False,
)
data_5_pet2_3 = dict(
    type='Image2ImageDataset',
    meta_file =        JSONL_PREFIX +'made_by_sdxl_inpanting/pet/temp_blip_vqa.jsonl',
    raw_prefix =       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =    AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_3/',
    mask_prefix=       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_3_mask/',
    can_use_mask = False,
)
data_5_good = dict(
    type='Image2ImageDataset',
    meta_file =        JSONL_PREFIX +'made_by_sdxl_inpanting/pet/make_data10_amendtext_fixed_fix_loc_addmask_add_vqa_loc_x1y1x2y2_fixed_blip_vqa.jsonl',
    raw_prefix=        AOSS_PREFIX +'/mnt/afs2d01/luotianhang/diffusion_data/empty_cabin/xuxiaocheng/main_test/Empty',
    edited_prefix=     AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data10/',
    mask_prefix=       AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data10_mask/',
    can_use_mask = False,
)

# data from left_obj and pet 
tdata1=dict(
    type='Image2ImageDataset',
    meta_file=      JSONL_PREFIX2+'0/pad_back_shell_filter_person.pkl_addmask.jsonl',
    edited_prefix=  AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    can_use_mask = False,
)
tdata2=dict(
    type='Image2ImageDataset',
    meta_file=     JSONL_PREFIX2+'4/laptop_back_close_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    can_use_mask = False,
)
tdata3=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'5/BYDuxe_back_phone_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata4=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'8/laptop_back_open_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    can_use_mask = False,
)
tdata5=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'12/BYDsg_back_wallet_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata6=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'13/phone_back_shell_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    can_use_mask = False,
)
tdata7=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'16/DFM57_front_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata8=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'20/DFM57_back_phone_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata9=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'21/GQA02_front_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata10=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'24/DFM57_back_wallet_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata11=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'25/wallet_back_close_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/Sensebee_2632/frames_fr1_max5',
    can_use_mask = False,
)
tdata12=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'26/GQA02_back_phone_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)
tdata13=dict(
    type='Image2ImageDataset',
    meta_file= JSONL_PREFIX2+'28/GQA02_back_wallet_filter_person.pkl_addmask.jsonl',
    edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
    raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
    can_use_mask = False,
)


tdata14=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'32/0115_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0115',
        raw_prefix='',# 'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0115',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0115',
        can_use_mask = False,
)
tdata15=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'33/0116_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0116',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0116',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0116',
        can_use_mask = False,
)
tdata16=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'34/0119_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0119',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0119',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0119',
        can_use_mask = False,
)
tdata17=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'35/0204_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0204',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0204',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/xuxiaocheng/projects/FAW/E001_blend_2024/frames_bld_0204',
        can_use_mask = False,
)
tdata18=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'37/HiPhi_back_phone_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
        can_use_mask = False,
)
tdata19=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'58/HiPhi_front_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
        can_use_mask = False,
)
tdata20=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'60/SC_20240417and18_front_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/data/projects/LEVC/XE08/frames',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/data/projects/LEVC/XE08/frames',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/data/projects/LEVC/XE08/frames',
        can_use_mask = False,
)
tdata21=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'64/NIO_202111_phone_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/zhangruixuan/NIO/force_train/1109/shouji',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/zhangruixuan/NIO/force_train/1109/shouji',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/zhangruixuan/NIO/force_train/1109/shouji',
        can_use_mask = False,
)
tdata22=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX2+'65/BYDuxe_back_filter_person.pkl_addmask.jsonl',
        edited_prefix= AOSS_PREFIX +'ObjPet/Image/train/data/leftObj/lipu2',
        raw_prefix= '',#'cabin-perception-v2:s3://ObjPet/Image_erased/train/data/leftObj/lipu2',
        mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/leftObj/lipu2',
        can_use_mask = False,
)


tdata23=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/alps_train_240913.jsonl',
        edited_prefix= 'sdc_ytd_v2:s3://ytd-bucket-v2/data/alps_seat_train_data/240913/extract_frames',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata24=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_out_blip_vqa.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/guzaiwang/G59_frame_0411/',
        raw_prefix= '',#'',
        mask_prefix='',#'',
        can_use_mask = False,
)
tdata25=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_59246_seatbody_20220222_ignoreinvalidseat_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/shenzan/Safetyseat/sensebee_datalist_59246/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata26=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_62150_seatbody_20220307_ignoreinvalidseat_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/shenzan/Safetyseat/sensebee_datalist_62150/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata27=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_all_person_ignore_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seat_data/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata28=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_all_seat_ignore_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seat_data/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata29=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_child_and_seat_byzhangruixuan_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seatV4/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata30=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_child_and_seat_ignore_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seat_data_V3/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata31=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_child_over_seat_byzhangruixuan_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seatV4/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata32=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_child_over_seat_ignore_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seat_data_V3/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata33=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_person_ignore_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seat_data_V2/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)

tdata35=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/30_percent_train_seat.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/share_data/zhangruixuan/Data/pics/seat/seatV4/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata36=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/A58_seat_0526_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/tmp_seat_datas/guzaiwang/A58/frames/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata37=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/A58_seat_0527_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/tmp_seat_datas/guzaiwang/A58/frames/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata38=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/A88_seat_part1_0726_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/xiyunlong/data/pic/20220711/20220711_safetyseat/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata39=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/fake_mosaic_data_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/data_mosaic/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata40=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/nio2_blip.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/td_caiji/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)
tdata41=dict(
        type='Image2ImageDataset',
        meta_file= JSONL_PREFIX+'made_by_sensebee/safeseat/train_det_data_data.jsonl',
        edited_prefix= AOSS_PREFIX +'SafetySeat/Image/Train/data_xc_cj/',
        raw_prefix= '',
        mask_prefix='',
        can_use_mask = False,
)



tdata_oabench = dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/OABench_image2image_vqa_temp_add_locx1y1x2y2.jsonl',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/OABench/',
    raw_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/OABench/',
    mask_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/OABench/',
    can_use_mask = False,
)

tdata_foodseg103 = dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/EduardoPacheco_FoodSeg103_strip_background_blip_add_locx1y1x2y2.jsonl',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/image2image/EduardoPacheco_FoodSeg103/',
    raw_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/image2image/EduardoPacheco_FoodSeg103/',
    mask_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/image2image/EduardoPacheco_FoodSeg103/',
    can_use_mask = False,
)




tdata_pet=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'data_from_left_obj/pet_base_select_cdt1_no_human_codetr_filter_person.pkl_addmask.jsonl',
    edited_prefix=AOSS_PREFIX +'ObjPet/Image/train/data/pet/sh1424ssd_zyh/',
    raw_prefix='',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/pet/sh1424ssd_zyh/',
    can_use_mask = False,
)
tdata_pet_cat=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'made_by_sensebee/pet/pet_base_select_cdt1_no_human_codetr_high_cat.jsonl',
    edited_prefix=AOSS_PREFIX +'ObjPet/Image/train/data/pet/sh1424ssd_zyh/',
    raw_prefix='',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/pet/sh1424ssd_zyh/',
    can_use_mask = False,
)
tdata_pet_dog=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'made_by_sensebee/pet/pet_base_select_cdt1_no_human_codetr_high_dog.jsonl',
    edited_prefix=AOSS_PREFIX +'ObjPet/Image/train/data/pet/sh1424ssd_zyh/',
    raw_prefix='',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/pet/sh1424ssd_zyh/',
    can_use_mask = False,
)

tdata_pet_cat_text=dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'made_by_sensebee/text2image/pet/pet_base_select_cdt1_no_human_codetr_high_cat.jsonl',
    edited_prefix=AOSS_PREFIX +'ObjPet/Image/train/data/pet/sh1424ssd_zyh/',
    raw_prefix='',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/pet/sh1424ssd_zyh/',
    can_use_mask = False,
)
tdata_pet_dog_text=dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'made_by_sensebee/text2image/pet/pet_base_select_cdt1_no_human_codetr_high_dog.jsonl',
    edited_prefix=AOSS_PREFIX +'ObjPet/Image/train/data/pet/sh1424ssd_zyh/',
    raw_prefix='',
    mask_prefix='',#'cabin-perception-v2:s3://ObjPet/Image_mask/train/data/pet/sh1424ssd_zyh/',
    can_use_mask = False,
)


tdata_from_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'data_from_text2image/data_from_text2image_fix_add_prefix.jsonl',
    edited_prefix= AOSS_PREFIX +'text2image/from_yutingdong/',
    raw_prefix='',
    mask_prefix='',
    can_use_mask = False,
)

tdata_from_text2image_visual_genome = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX_TEXT+'open_source/visual_genome/meta_text2image.jsonl',
    edited_prefix =    AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/visual_genome',
    raw_prefix='',
    mask_prefix='',
    can_use_mask = False,
)

tdata_from_text2image_jackyhate_2M1024_10k = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/jackyhate_text2image_2M_1024_10k.jsonl',
    edited_prefix =    AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/text2image/jackyhate_text2image',
    raw_prefix='',
    mask_prefix='',
    can_use_mask = False,
)
tdata_from_text2image_jackyhate_2M_512_2M = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/jackyhate_text2image_2M_512_2M.jsonl',
    edited_prefix =   AOSS_PREFIX + 'diffusion_data/luotianhang/open_source_data/from_huggingface/text2image/jackyhate_text2image',
    raw_prefix='',
    mask_prefix='',
    can_use_mask = False,
)




tdata_from_text2image_CortexLM_midjourney_v6 = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/CortexLM--midjourney-v6_part.jsonl',
    edited_prefix =   AOSS_PREFIX + 'diffusion_data/luotianhang/open_source_data/from_huggingface/text2image/CortexLM_midjourney_v6',
    raw_prefix='',
    mask_prefix='',
    can_use_mask = False,
)



data_5_pet2_0_text2image = dict(
    type='Text2ImageDataset',
    meta_file     =       JSONL_PREFIX+'made_by_sdxl_inpanting/pet/text2image/image2image_metadata2_0_fix_loc_addmask_add_vqa.jsonl',
    raw_prefix    =       '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =      AOSS_PREFIX + 'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_0/',
    mask_prefix   =       '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_0_mask/',
    can_use_mask = False,
)
data_5_pet2_1_text2image = dict(
    type='Text2ImageDataset',
    meta_file =       JSONL_PREFIX+'made_by_sdxl_inpanting/pet/text2image/image2image_metadata2_1_fix_loc_addmask_add_vqa.jsonl',
    raw_prefix =      '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =   AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_1/',
    mask_prefix=      '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_1_mask/',
    can_use_mask = False,
)
data_5_pet2_2_text2image = dict(
    type='Text2ImageDataset',
    meta_file =       JSONL_PREFIX+'made_by_sdxl_inpanting/pet/text2image/image2image_metadata2_2_fix_loc_addmask_add_vqa.jsonl',
    raw_prefix =      '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =   AOSS_PREFIX +'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_2/',
    mask_prefix=      '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_2_mask/',
    can_use_mask = False,
)
data_5_pet2_3_text2image = dict(
    type='Text2ImageDataset',
    meta_file =        JSONL_PREFIX+'made_by_sdxl_inpanting/pet/text2image/image2image_metadata2_3_fixed_fix_loc_addmask_add_vqa.jsonl',
    raw_prefix =       '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/data_gligen/map/',
    edited_prefix =   AOSS_PREFIX + 'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_3/',
    mask_prefix=       '',#'diffusion_data/luotianhang/AIGC/sdxl_inpainting/make_data2_3_mask/',
    can_use_mask = False,
)

data_coco2017_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/coco2017_text2image.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/coco2017/msdkhairi_coco2017/coco2017',
    mask_prefix='',
    can_use_mask=False,

)

data_coco2017_image2image_box=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/coco2017_image2image_boxs_size_filter_blip.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/coco2017/msdkhairi_coco2017/coco2017',
    mask_prefix='',
    can_use_mask=False,
)

data_coco2017_image2image_mask=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/coco_image2image_mask.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/coco2017/msdkhairi_coco2017/coco2017',
    mask_prefix='',#'diffusion_data/luotianhang/open_source_data/from_huggingface/coco2017/msdkhairi_coco2017/coco2017/mask',
    can_use_mask=False,
)


data_imagenet_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/imagenet_text2image.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/imagenet/cls',
    mask_prefix='',
    can_use_mask=False,
)

data_imagenet_text2image_blip = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/imagenet_text2image_blip_fix.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/imagenet/cls',
    mask_prefix='',
    can_use_mask=False,
)
data_imagenet_image2image_box = dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/imagenet_image2image_box_fix_size_filter.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/imagenet/cls',
    mask_prefix='',
    can_use_mask=False,
)
data_object365_image2image=dict(
    type='Image2ImageDataset',
    meta_file = '/mnt/afs2d01/luotianhang/laion-coco/object365/object365.jsonl',
    raw_prefix= '',
    edited_prefix='/mnt/afs2d/xiyunlong/data/public/Objects365_v1/',
    mask_prefix='',
    can_use_mask=False,
)

data_grounding_text2image = dict(
    type='Text2ImageDataset',
    meta_file = '/mnt/afs2d01/luotianhang/laion-coco/grounding/grounding_text2image.jsonl',
    raw_prefix= '',
    edited_prefix='/mnt/afs2d/xiyunlong/data/public/mixed_grounding/images',
    mask_prefix='',
    can_use_mask=False,
)
data_voc_image2image = dict(
    type='Image2ImageDataset',
    meta_file =JSONL_PREFIX+'open_source_data/image2image/voc_2007_2012_image2image_size_filter.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/voc/',
    mask_prefix='',
    can_use_mask=False,
)

data_voc_text2image = dict(
    type='Text2ImageDataset',
    meta_file =JSONL_PREFIX+'open_source_data/image2image/voc_2007_2012_image2image_mask_blip_add_locx1y1x2y2.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/voc/',
    mask_prefix='',
    can_use_mask=False,
)
data_voc_image2image_mask = dict(
    type='Image2ImageDataset',
    meta_file =JSONL_PREFIX+'open_source_data/image2image/voc_2007_2012_image2image_mask_blip_add_locx1y1x2y2.jsonl',
    raw_prefix= '',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/voc/',
    mask_prefix='',#'diffusion_data/luotianhang/open_source_data/from_huggingface/voc/',
    can_use_mask=False,
)

data_pet_seg_category_face = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/pet_seg_category_mask_blip_loc_x1y1x2y2.jsonl',
    raw_prefix='',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/pet_seg_category/',
    mask_prefix='',
    can_use_mask=False
)
data_pet_seg_category_mask = dict(
    type = 'Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/pet_seg_category_mask.jsonl',
    raw_prefix='',
    edited_prefix=AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/pet_seg_category/',
    mask_prefix='',#'diffusion_data/luotianhang/open_source_data/from_huggingface/pet_seg_category/',
    can_use_mask=False
)


data_sam_text2image_a20 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/20.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)


data_sam_text2image_a21 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/21.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)

data_sam_text2image_a22 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/22.jsonl',
    raw_prefix = '',
    edited_prefix =AOSS_PREFIX + 'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)

data_sam_text2image_a23 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/23.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)

data_sam_text2image_a24 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/24.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)
data_sam_text2image_a25 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/25.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)
data_sam_text2image_a28 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/28.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)

data_sam_text2image_a29 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/29.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)
data_sam_text2image_a131 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/131.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)
data_sam_text2image_a132 = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/132.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/SAM',
    mask_prefix = '',
    can_use_mask = False,
)
data_text2image_fusion_dog=dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/fusing_dog_captions.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/fusing-dog_captions',
    mask_prefix = '',
    can_use_mask = False,
)


data_text2image_flickr30k = dict(
    type = 'Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/flickr30k.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/filck',
    mask_prefix = '',
    can_use_mask = False,
)

data_image2image_visual_genome_region = dict(
    type = 'Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/visual_genome_region_size_filter.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/visual_genome/',
    mask_prefix = '',
    can_use_mask = False,
)

data_image2image_visual_genome_object = dict(
    type = 'Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/visual_genome_object.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/visual_genome/',
    mask_prefix = '',
    can_use_mask = False,
)



data_image2image_person = dict(
    type='Image2ImageDataset',
    meta_file = '/mnt/afs2d01/luotianhang/diffusion_data/process_detect/frames.jsonl',
    raw_prefix= '',
    edited_prefix='cabin-perception-v2:s3://BodyShape/Image/train/sample_sensebee_2960/frames/',
    mask_prefix='',
    can_use_mask=False,
)

data_wallpaper_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/sam/wall_paper.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/wallpaper_crawled_blip/wallpaper_collected/',
    mask_prefix = '',
    can_use_mask = False,
)


data_cub_200_image2image = dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/image2image/cub-200_image2image.jsonl',
    raw_prefix = '',
    edited_prefix= AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/CUB_200_2011/images',
    mask_prefix = '',
    can_use_mask = False,
)


data_aed20k_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/ade20k_text2image.jsonl',
    raw_prefix = '',
    edited_prefix =AOSS_PREFIX + 'diffusion_data/luotianhang/open_source_data/from_huggingface/ADE20K/data/image_data/',
    mask_prefix = '',
    can_use_mask = False,
)


data_openimagev6_text2image=dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/open_image_text2image.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/open-image/',
    mask_prefix = '',
    can_use_mask = False,
)

data_openimagev6_image2image=dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/open-image_image2image_box_all_occ_temp.jsonl',
    raw_prefix = '',
    edited_prefix =AOSS_PREFIX + 'diffusion_data/luotianhang/open_source_data/from_huggingface/open-image/',
    mask_prefix = '',
    can_use_mask = False,
)

data_flux_prompt_photo_face_text2image = dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/krishnakalyan3_flux_prompt_9_10.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/krishnakalyan3_fluxdev_prompt9_10/image_data/',
    mask_prefix = '',
    can_use_mask = False,
)

data_fluxdev_controlnet_16k_text2image= dict(
    type='Text2ImageDataset',
    meta_file = JSONL_PREFIX+'open_source_data/text2image/fluxdev_controlnet_16k.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'diffusion_data/luotianhang/open_source_data/from_huggingface/kadirnar_fluxdev_controlnet_16k/image_data/',
    mask_prefix = '',
    can_use_mask = False,
)

data_ALPS_safeseat_sensebee_2328= dict(
    type='Image2ImageDataset',
    meta_file = JSONL_PREFIX+'made_by_sensebee/ALPS_safeseat_sensebee_2328.jsonl',
    raw_prefix = '',
    edited_prefix = AOSS_PREFIX +'SafeSeat/Image/train/lth_trainningdata/sensebee_2328/frames/',
    mask_prefix = '',
    can_use_mask = False,
)

from aoss_client.client import Client
import jsonlines
from PIL import Image,ImageFont
import numpy as np
import cv2
import random
import os


def read_image_url(url):
    if 's3://' in url:
        image_bytes = client.get(url)
        image_mem_view = memoryview(image_bytes)
        image_array = np.frombuffer(image_mem_view,np.uint8)
        image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
        image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image_np).convert('RGB')
    else:
        image = Image.open(url).convert('RGB')
    return image


def generate_mask_resized(x1y1x2y2,width,height):
    x1 = x1y1x2y2[0]
    y1 = x1y1x2y2[1]
    x2 = x1y1x2y2[2]
    y2 = x1y1x2y2[3]
    x1 = int(x1/width*(640*2))
    y1 = int(y1/height*(360*2))
    x2 = int(x2/width*(640*2))
    y2 = int(y2/height*(360*2))

    mask = np.zeros((360*2,640*2),dtype=np.uint8)
    mask[int(y1):int(y2),int(x1):int(x2)] = 255
    return Image.fromarray(mask).convert('L')

if __name__ == "__main__":
    conf_path = '/mnt/afs2d/luotianhang/aoss.conf'
    client = Client(conf_path)
    from datalist_left_obj.datalist_back import *
    from datalist_left_obj.datalist_front import *

    TDATA = [
        (data_fluxdev_controlnet_16k_text2image,1),
        (data_flux_prompt_photo_face_text2image,1),
        # (data_wallpaper_text2image,1),
        # (data_openimagev6_text2image,1),
        # # image2image
        #         # pet
        #         (data_5_pet2_0,4),# using blip
        #         (data_5_pet2_1,4),# using blip
        #         (data_5_pet2_2,4),# using blip
        #         # (data_5_pet2_3,4),# 这个数据不敢上是因为生成的有点像水彩画
        #         (data_5_good,2*4),# pet cat dog # using blip
        #         # (tdata_pet_cat,1),# 不敢用，用完发现宠物变形厉害
        #         # (tdata_pet_dog,1),# 不敢用，用完发现宠物变形厉害
        #         # safeseat
        #         (tdata23,4),# repeat 23 
        #         (tdata24,5), # desgined for the long text and people sit on the seat
        #         (tdata35,4),
        #         (tdata41,4),
        #         # (tdata23,1),# repeat 23 
        #         # (tdata35,1),
        #         # (tdata41,1),
        #         # opensource 
        #         (tdata_oabench,60), # inpainting
        #         (tdata_foodseg103,2*2), # inpainting # using blip
        #         # (data_image2image_visual_genome_object,6),
        #         (data_image2image_visual_genome_region,10),
        #         (data_pet_seg_category_mask,1*6),# using blip
        #         # (data_imagenet_image2image_box,4),
        #         (data_cub_200_image2image,3),
        #         # (data_coco2017_image2image_box,1),# using blip # 绝对不能用
        #         (data_pet_seg_category_face,2),# using blip
        #         # text2image
        #         (tdata_from_text2image,2), # text2image
        #         (tdata_from_text2image_jackyhate_2M_512_2M,2), # text2image
        #         (tdata_from_text2image_jackyhate_2M1024_10k,2), # text2image
        #         (tdata_from_text2image_CortexLM_midjourney_v6,2), # text2image
        #         (data_coco2017_text2image,1*2), # text2image high quality
        #         (data_sam_text2image_a20,1*1),
        #         (data_sam_text2image_a21,1*1),
        #         (data_sam_text2image_a22,1),
        #         (data_sam_text2image_a23,1),
        #         (data_sam_text2image_a24,1),
        #         (data_sam_text2image_a25,1),
        #         (data_sam_text2image_a28,1),
        #         (data_sam_text2image_a29,1),
        #         (data_sam_text2image_a131,1),
        #         (data_sam_text2image_a132,1),
        #         (data_imagenet_text2image_blip,1),
        #         (data_wallpaper_text2image,2),
        #         (data_text2image_flickr30k,1),
        #         # (data_voc_text2image,2),
        #         (data_aed20k_text2image,4),
        #         (data_openimagev6_text2image,5),
        #         # (data_coco2017_image2image_mask,4),
        #         # # (data_imagenet_text2image,1),# text2image
        #         # (data_voc_image2image,1*6),
        #         # (data_voc_image2image_mask,1*6),

        #         # # # ###################################### tdata left_obj
        #         (tdata_left_obj_back_1,1),
        #         (tdata_left_obj_back_2,1), 
        #         (tdata_left_obj_back_3,1), 
        #         (tdata_left_obj_back_4,1), 
        #         (tdata_left_obj_back_7,1), 
        #         (tdata_left_obj_back_8,1), 
        #         (tdata_left_obj_back_9,1), 
        #         (tdata_left_obj_back_10,1),
        #         (tdata_left_obj_back_13,1),
        #         (tdata_left_obj_back_15,1),
        #         (tdata_left_obj_back_16,1),
        #         (tdata_left_obj_back_17,1),
        #         (tdata_left_obj_back_18,1),
        #         (tdata_left_obj_back_19,1),
        #         (tdata_left_obj_back_20,1),
        #         (tdata_left_obj_back_21,1),
        #         (tdata_left_obj_back_26,1),
        #         (tdata_left_obj_back_27,1),
        #         (tdata_left_obj_back_28,1),
        #         (tdata_left_obj_back_29,1),
        #         (tdata_left_obj_back_31,1),
        #         (tdata_left_obj_back_32,1),
        #         (tdata_left_obj_back_33,1),
        #         (tdata_left_obj_back_35,1),
        #         (tdata_left_obj_back_36,1),
        #         (tdata_left_obj_back_37,1),
        #         (tdata_left_obj_back_38,1),
        #         (tdata_left_obj_back_41,1),
        #         (tdata_left_obj_back_42,1),
        #         (tdata_left_obj_back_44,1),
        #         (tdata_left_obj_back_45,1),
        #         (tdata_left_obj_back_46,1),
        #         (tdata_left_obj_back_47,1),
        #         (tdata_left_obj_back_48,1),
        #         (tdata_left_obj_back_49,1),
        #         (tdata_left_obj_back_52,1),
        #         (tdata_left_obj_back_53,1),
        #         (tdata_left_obj_back_54,1),
        #         (tdata_left_obj_front_1,1),
        #         (tdata_left_obj_front_2,1),
        #         (tdata_left_obj_front_3,1),
        #         (tdata_left_obj_front_4,1),
        #         (tdata_left_obj_front_5,1),
        #         (tdata_left_obj_front_6,1),
        #         (tdata_left_obj_front_7,1),
        #         (tdata_left_obj_front_8,1),
        #         (tdata_left_obj_front_9,1),
        #         (tdata_left_obj_front_10,1),
        #         (tdata_left_obj_front_11,1),
        #         (tdata_left_obj_front_12,1),
        #         (tdata_left_obj_front_13,1),
        #         (tdata_left_obj_front_14,1),
        #         (tdata_left_obj_front_15,1),
        #         (tdata_left_obj_front_16,1),
        #         (tdata_left_obj_front_17,1),
        #         (tdata_left_obj_front_18,1),
        #         (tdata_left_obj_front_19,1),
        #         (tdata_left_obj_front_20,1),
        #         (tdata_left_obj_front_21,1),
        #         (tdata_left_obj_front_22,1),
        #         (tdata_left_obj_front_23,1),
        #         (tdata_left_obj_front_24,1),
        #         (tdata_left_obj_front_25,1),
        #         (tdata_left_obj_front_26,1),
        #         (tdata_left_obj_front_27,1),
        #         (tdata_left_obj_front_28,1),
        #         (tdata_left_obj_front_29,1),
        #         (tdata_left_obj_front_30,1),
        #         (tdata_left_obj_front_31,1),
        #         (tdata_left_obj_front_32,1),
        #         (tdata_left_obj_front_33,1),
        #         (tdata_left_obj_front_34,1),
        #         (tdata_left_obj_front_35,1),
        #         (tdata_left_obj_front_36,1),
        #         (tdata_left_obj_front_37,1),
        #         (tdata_left_obj_front_38,1),
        #         (tdata_left_obj_front_39,1),
        #         (tdata_left_obj_front_40,1),
        #         (tdata_left_obj_front_41,1),
        #         (tdata_left_obj_front_42,1),
        #         (tdata_left_obj_front_43,1),
        #         (tdata_left_obj_front_44,1),
        #         (tdata_left_obj_front_45,1),
        #         (tdata_left_obj_front_46,1),
        #         (tdata_left_obj_front_47,1),
        #         (tdata_left_obj_front_48,1),
        #         (tdata_left_obj_front_49,1),
        #         (tdata_left_obj_front_50,1),
        #         (tdata_left_obj_front_52,1),
        #         (tdata_left_obj_front_53,1),
        #         (tdata_left_obj_front_54,1),
        #         (tdata_left_obj_front_55,1),
        #         (tdata_left_obj_front_56,1),
        #         (tdata_left_obj_front_57,1),
        #         (tdata_left_obj_front_60,1),
        #         (tdata_left_obj_front_61,1),
        #         (tdata_left_obj_front_62,1),
    ]
    new_TDATA  = []
    for TD in TDATA:
        new_TDATA.append(TD[0])

    TDATA = new_TDATA

    # import os
    # from tqdm import tqdm
    # for t in TDATA:
    #     meta_file = t['meta_file']
    #     f = jsonlines.open(meta_file,'r')
    #     fw = jsonlines.open(meta_file.replace('.jsonl','_fix.jsonl'),'w')
    #     for line in tqdm(list(f)):
    #         raw_prefix = t['raw_prefix']
    #         edited_prefix = t['edited_prefix']
    #         mask_prefix = t['mask_prefix']
    #         try:
    #             # if len(raw_prefix)>0:
    #             #     read_image_url(os.path.join(raw_prefix,line['raw_image']))
    #             if len(edited_prefix)>0:
    #                 read_image_url(os.path.join(edited_prefix,line['edited_image']))
    #                 fw.write(line)
    #             # if len(mask_prefix)>0:
    #             #     read_image_url(os.path.join(mask_prefix,line['mask_image']))
    #         except:
    #             print(meta_file,line)
    #     fw.close()
    #     f.close()
           





    # check illegal
    # from tqdm import tqdm
    # for t in TDATA:
    #     meta_file = t['meta_file']
    #     f = jsonlines.open(meta_file,'r')
    #     right = []
    #     not_right = []
    #     for line in tqdm(list(f)):
    #         loc_x1y1x2y2 = line['loc_x1y1x2y2']
    #         x1 = loc_x1y1x2y2[0]
    #         y1 = loc_x1y1x2y2[1]
    #         x2 = loc_x1y1x2y2[2]
    #         y2 = loc_x1y1x2y2[3]

    #         if y2-y1 < 3*32 or x2-x1 < 3*32:
    #             not_right.append(line)
    #         else:
    #             right.append(line)
    #     if len(right)==0:
    #         print(meta_file)

    # show lable and image is inline with

        
    for T in TDATA:
        t = T
        meta_file = t['meta_file']
        edited_prefix = t['edited_prefix']
        mask_prefix = t['mask_prefix']
        raw_prefix = t['raw_prefix']
        print(meta_file)
        with jsonlines.open(meta_file,'r') as f:
            
            lines = list(f)
            for i in range(30):
                
                line = random.choice(lines)
               
                edited_image = read_image_url(os.path.join(edited_prefix,line['edited_image']))
                edited_image_draw = ImageDraw.Draw(edited_image)
                if t['can_use_mask']:
                    mask_image = read_image_url(os.path.join(mask_prefix,line['mask_image'])).convert('RGB').resize((640*2,360*2))
                else:
                    if t['type']=='Text2ImageDataset':
                        loc_x1y1x2y2 = [0,0,0,0]
                    else:
                        try:
                            loc_x1y1x2y2 = line['loc_x1y1x2y2']
                        except:
                            raise ValueError(f'this is a inpainting dataset     {meta_file}')
                            loc_x1y1x2y2 = [0,0,0,0]
                    mask_image = generate_mask_resized(loc_x1y1x2y2,edited_image.width,edited_image.height).convert('RGB').resize((640*2,360*2))
                    lefttop = (loc_x1y1x2y2[0],loc_x1y1x2y2[1])
                    rightbottom = (loc_x1y1x2y2[2],loc_x1y1x2y2[3])
                    edited_image_draw.rectangle((lefttop,rightbottom),fill=None,outline='red',width=3)
                height = edited_image.height
                
               
                text = line['text']
                if  'vqa_text' in line:
                    text = line['vqa_text']
               
                print(i,':',text)

                # text = line['loc_x1y1x2y2_blip']
                font = ImageFont.truetype('/mnt/afs2d01/luotianhang/Arial.ttf', (height//20), encoding='utf-8')

                edited_image_draw.text((30,30),text,font=font,color='red',fill='red')
                edited_image = edited_image.convert('RGB').resize((640*2,360*2))
                width = edited_image.width
                height = edited_image.height
                composite_image =  Image.blend(edited_image, mask_image, 0.5)
            
                # composite_image.save(f'./show_data/{meta_file.split('/')[-1]}__{i}.png')
            



 