from typing import List
from torch.utils.data import Dataset
import torch
from torchvision import transforms
import random
import os
import jsonlines
from aoss_client.client import Client
import numpy as np
from PIL import Image
import cv2
import pickle
from PIL import Image,ImageFilter,ImageOps
conf_path = '/mnt/afs2d/luotianhang/aoss.conf'
client = Client(conf_path)
MASK_ROOT = 'cluster_aosslth_v2:s3://face-frames-v2/diffusion_data/luotianhang/open_source_data/irregular_mask'
MASK_DATA = pickle.load(open('/mnt/afs2d01/luotianhang/diffusion_data/mask_data/mask_data.pkl','rb'))
ASSERT_KEYS = (
    'raw_image','edited_image','text'
)
translations = [
    "This is an interior space of a car.",
    "This is a car interior space.",
    "This is a space within a vehicle's interior.",
    "This is a space inside the car's interior.",
    "This refers to an automotive interior space."
]
def strip_feature(texts,feature):
    new_texts = []
    for text in texts:
        if feature in text:
            continue
        new_texts.append(text)
    return new_texts


def crop_and_resize(image,x1y1x2y2,th,tw,pad=16):
    x1 = min(x1y1x2y2[0]-pad,0)
    y1 = min(x1y1x2y2[1]-pad,0)
    x2 = max(x1y1x2y2[2]+pad,image.width)
    y2 = max(x1y1x2y2[3]+pad,image.height)
    xc = (x1+x2)//2
    yc = (y1+y1)//2

    width = x2-x1
    height = y2-y1

    if height > width:
        new_height = th
        new_width = int(width/height*tw)
    else:
        new_width = tw
        new_height = int(height/width*th)

    new_x1 = int(xc-new_width//2)
    new_y1 = int(yc-new_height//2)
    new_x2 = int(xc+new_width//2)
    new_y2 = int(yc+new_height//2)

    crop_image = image.crop(new_x1,new_y1,new_x2,new_y2)
    new_image = Image.new('RGB', (tw,th), (255,255,255))
    new_image.paste(crop_image,(0,0))
    return new_image





high_quality_descriptors = [
    "high resolution",  # 高分辨率
    "sharp details",  # 细节清晰
    "vivid colors",  # 色彩鲜艳
    "realistic textures",  # 质感真实
    "intricate design",  # 设计精细
    "smooth gradients",  # 渐变平滑
    "perfect lighting",  # 光线完美
    "fine-tuned contrasts",  # 对比度精确调整
    "dynamic range",  # 动态范围广
    "crisp edges",  # 边缘锐利
    "photorealistic quality",  # 拍照般真实的质量
    "balanced composition",  # 构图平衡
    "depth of field",  # 景深效果
    "rich shading",  # 阴影丰富
    "well-defined objects",  # 物体定义明确
    "refined edges",  # 边缘精致
    "clean rendering",  # 渲染干净
    "precise detailing",  # 细节精确
    "smooth transitions",  # 过渡平滑
    "enhanced clarity",  # 清晰度增强
    "vibrant tones",  # 色调鲜活
    "subtle highlights",  # 高光微妙
    "no visible noise",  # 无可见噪点
    "perfect symmetry",  # 对称性完美
    "lifelike portrayal",  # 生动逼真
    "masterful execution",  # 执行精湛
    "rich textures",  # 纹理丰富
    "well-preserved structure",  # 结构保存完好
    "optimal focus",  # 焦点最佳
    "color accuracy",  # 色彩准确
    "natural appearance",  # 外观自然
    "detailed background",  # 背景细节丰富
    "harmonious proportions",  # 比例和谐
    "excellent sharpness",  # 清晰度极佳
    "minimal artifacts",  # 伪影最小化
    "flawless execution",  # 执行无瑕
    "full-spectrum light rendering",  # 全光谱光线渲染
    "rich depth",  # 深度丰富
    "subtle shadows",  # 阴影微妙
    "high fidelity",  # 高保真
    "detailed textures",  # 纹理细致
    "smooth surface",  # 表面光滑
    "refined details",  # 细节精致
    "true-to-life proportions",  # 比例逼真
    "perfectly balanced lighting",  # 光线平衡完美
    "high dynamic range",  # 高动态范围
    "crystal clear clarity",  # 水晶般清晰
    "vibrant color palette",  # 色彩丰富
    "subtle tonal variations",  # 色调变化微妙
    "precise rendering",  # 渲染精确
    "seamless blending",  # 无缝融合
    "perfectly controlled light",  # 光线控制完美
    "high contrast",  # 对比度高
    "well-defined highlights",  # 高光定义明确
    "accurate reflections",  # 反射精确
    "lifelike lighting",  # 逼真光照
    "detailed reflections",  # 反射细致
    "balanced shadowing",  # 阴影均衡
    "flawless textures",  # 纹理无瑕
    "finely-tuned shading",  # 阴影精细调节
    "perfect proportions",  # 比例完美
    "high realism",  # 高度真实
    "subtle color variations",  # 色彩微妙变化
    "intricate patterns",  # 图案精细
    "ultra-high detail",  # 超高细节
    "flawless geometry",  # 几何无瑕
    "natural lighting",  # 自然光照
    "complex textures",  # 复杂纹理
    "realistic depth",  # 真实深度
    "well-balanced tones",  # 色调均衡
    "high-level accuracy",  # 高度准确
    "minimal distortion",  # 畸变最小化
    "vivid light contrasts",  # 光线对比鲜明
    "superb rendering",  # 渲染出色
    "high-precision focus",  # 高精度聚焦
    "soft transitions",  # 过渡柔和
    "rich color depth",  # 色彩深度丰富
    "well-defined forms",  # 形状定义明确
    "perfectly aligned",  # 对齐完美
    "intricately designed",  # 设计复杂
    "detailed rendering",  # 渲染细致
    "accurate depth cues",  # 深度提示精确
    "naturalistic colors",  # 色彩自然
    "detailed layering",  # 层次细致
    "smooth texture transitions",  # 纹理过渡平滑
    "flawless resolution",  # 分辨率无瑕
    "precise color rendering",  # 色彩渲染精确
    "hyperrealistic textures",  # 超真实纹理
    "perfect lighting balance",  # 光照平衡完美
    "balanced composition of light",  # 光线构图平衡
    "refined lighting effects",  # 光效精致
    "masterful color control",  # 色彩控制出色
    "seamless texture blending",  # 纹理融合无缝
    "sophisticated color grading",  # 色彩分级精致
    "elegant design patterns",  # 设计图案优雅
    "well-executed contrasts",  # 对比度执行良好
    "precisely rendered details",  # 细节渲染精确
    "subtle gradient shifts",  # 渐变变化微妙
    "lifelike material rendering",  # 材质渲染逼真
    "delicate light shadows",  # 光影柔和
    "vivid and true-to-life",  # 鲜活且逼真
    "remarkably fine detailing",  # 细节极其精细
    "seamless material transitions"  # 材质过渡无缝
]
MASK_SIZE_FILTER = 3
SCALE_INDEX  = 1
# prompt
###############################################
def output_edit_prompt(item,simplify_prob=True,in_the_car=False):
    edit_prompt = item['edit_prompt']
    texts = item['texts']
    edited_image_url = item['edited_image_url']
    if len(texts)>0 and random.random()>0.2:
        edit_prompt = random.choice(texts)

    if item['edit_prompt'] == 'pet': # pet dog cat
        if 'cat' in edited_image_url:
            edit_prompt = random.choice(output_texts('cat')+output_texts('pet'))
        elif 'dog' in edited_image_url:
            edit_prompt = random.choice(output_texts('dog')+output_texts('pet'))
        else:
            edit_prompt = random.choice(output_texts('pet'))

    edit_prompt = prompt_shuffle(edit_prompt)
    if simplify_prob:
        edit_prompt = simplify(edit_prompt,in_the_car)
    # edit_prompt = add_aux_world(edit_prompt)
    return edit_prompt

def output_texts(text):
    return [
        f'a {text} is located in the car',
        f'Inside the car, there is a {text}',
        f'find a {text} in the car',
        f'The car has a {text} inside',
        f'A {text} can be found in the car,',
        f'A {text} is stored in the car',
        f'There’s a {text} kept in the car',
        f'a {text} is in the vehicle',
    ]

def prompt_shuffle(prompt):
    if random.random()>0.5 and ',' in prompt:
        temp = prompt.split(',')
        random.shuffle(temp)
        prompt = ','.join(temp)
    return prompt

def add_aux_world(edited_prompt):
    if random.random()>0.5:
        return edited_prompt+','+random.choice(high_quality_descriptors)
    else:
        return edited_prompt
def simplify(edit_prompt,in_the_car=False):
    # if random.random()>0.95:
    #     return edit_prompt
    # if len(edit_prompt.split()) == 1:

        # desing for simplify the prompt ,but maybe is wrong
    if 'dog' in edit_prompt or 'Dog' in edit_prompt or 'DOG' in edit_prompt:
        # edit_prompt = random.choice(['a dog','a Dog','a DOG','a pet','a Pet','a PET'])
        edit_prompt = random.choice(['dog'])
    elif 'cat' in edit_prompt or 'Cat' in edit_prompt or 'CAT' in edit_prompt:
        # edit_prompt = random.choice(['a cat','a CAT','a Cat','a pet','a Pet','a PET'])
        edit_prompt = random.choice(['cat'])
    elif 'pad' in edit_prompt or 'Pad' in edit_prompt or 'PAD' in edit_prompt:
        # edit_prompt = random.choice(['a pad','a PAD','a Pad'])
        edit_prompt = random.choice(['tablet'])
    elif 'wallet' in edit_prompt or 'WALLET' in edit_prompt or 'Wallet' in edit_prompt:
        # edit_prompt = random.choice(['a wallet','a Wallet','a WALLET','a purse','a Purse','a PURSE'])
        edit_prompt = random.choice(['wallet'])
    elif 'phone' in edit_prompt or 'Phone' in edit_prompt or 'PHONE' in edit_prompt:
        # edit_prompt = random.choice(['a phone','a Phone','a PHONE','a mobile phone','a cell phone','a smart phone'])
        edit_prompt = random.choice(['phone'])
    elif 'laptop' in edit_prompt or 'Laptop' in edit_prompt or 'LAPTOP' in edit_prompt:
        # edit_prompt = random.choice(['a laptop','a Laptop','a LAPTOP',])
        edit_prompt = random.choice(['laptop'])
    
    if in_the_car:
        edit_prompt = edit_prompt + random.choice([' in the car',' in the cabin',' in the car cabin'])
    
    return edit_prompt
###############################################
# mask
###############################################
def random_irregular_mask_image():

    mask_data =  random.choice(MASK_DATA)
    return ImageOps.invert(read_image_url(os.path.join(MASK_ROOT,mask_data)))

def generate_mask_for_outpainting(width,height,revert=False):
    center_keep_ratio = random.randint(6,9)/10
    keep_width = int(center_keep_ratio*width)
    keep_height = int(center_keep_ratio*height)

    mask = np.ones([height,width])*255
    mask[int((height-keep_height)/2):-int((height-keep_height)/2),int((width-keep_width)/2):-int((width-keep_width)/2)] =0
    if revert:
        mask = 255-mask
    return Image.fromarray(mask).convert('L')

def generate_mask_resized(x1y1x2y2,width,height):
    x1 = x1y1x2y2[0]
    y1 = x1y1x2y2[1]
    x2 = x1y1x2y2[2]
    y2 = x1y1x2y2[3]
    x1 = int(x1/width*(640*SCALE_INDEX))
    y1 = int(y1/height*(360*SCALE_INDEX))
    x2 = int(x2/width*(640*SCALE_INDEX))
    y2 = int(y2/height*(360*SCALE_INDEX))

    mask = np.zeros((360*SCALE_INDEX,640*SCALE_INDEX),dtype=np.uint8)
    mask[int(y1):int(y2),int(x1):int(x2)] = 255
    return Image.fromarray(mask).convert('L')

def prepare_mask_and_masked_image(image, mask):
    image = np.array(image.convert("RGB"))
    image = image[None].transpose(0, 3, 1, 2)
    image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

    mask = np.array(mask.convert("L"))
    mask = mask.astype(np.float32) / 255.0
    mask = mask[None, None]
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    mask = torch.from_numpy(mask)

    masked_image = image * (mask < 0.5)

    return mask, masked_image

def visualize_masked_image(masked_image,save_path):
    if random.random()>0.5:
        # 反归一化，将像素值转换回 [0, 255] 的范围
        masked_image = masked_image.squeeze(0).detach().cpu().numpy()  # 去除批次维度
        masked_image = (masked_image + 1.0) * 127.5  # 反归一化
        masked_image = masked_image.transpose(1, 2, 0).astype(np.uint8)  # 转换为 HWC 格式

        # 将 NumPy 数组转换回 PIL Image
        masked_image_pil = Image.fromarray(masked_image)
        masked_image_pil.save(save_path)
###############################################

def image_pil_revert(image):
    return image.transpose(Image.FLIP_LEFT_RIGHT)


def revers_prompt(prompt):

    obj = ''

    mem = {}
    mem['toy'] = ['doll','toy','Doll','Toy','DOLL','TOY']
    mem['phone'] = ['mobile phone','ceil phone','phone','smart phone','Cellular phone','Mobile device','Handset','Mobile','Cell','Portable phone']
    mem['dog'] = ['dog','cannie','Dog','Cannie','DOG','animal','pet','Pet', 'Animal companion','Furry friend','Domestic animal','Household pet','Companion animal','Four-legged friend','Creature','Beloved pet','Domesticated pet']
    mem['cat'] = ['cat','Cat','CAT','animal','pet','Pet', 'Animal companion','Furry friend','Domestic animal','Household pet','Companion animal','Four-legged friend','Creature','Beloved pet','Domesticated pet']
    mem['wallet']=['Wallet','wallet','Billfold','billfold','Purse','purse','Money clip','money clip','Cardholder','cardholder','Coin purse','coin purse','Pouch','pouch','Clutch','clutch','Money holder','money holder','Pocketbook','pocketbook']
    for k,v in mem.items():
        if k in prompt:
            obj = random.choice(v)
            break
    if obj=='':
        return prompt
    phrases = [
    f'Take the {obj} out of the car',
    f'Get the {obj} out of the car',
    f'Remove the {obj} from the vehicle',
    f'Take the {obj} out of the car',
    f'Get the {obj} out of the vehicle',
    f'Remove the {obj} from the car',
    f'Take the {obj} out of the vehicle',
    f'Evacuate the {obj} from the car',
    f'Get the {obj} out of the car',
    f'Take the {obj} out of the automobile',
    f'Extract the {obj} from the car',
    f'Get the {obj} out of the automobile',
    f'Take the {obj} out of the SUV',
    f'Remove the {obj} from the automobile',
    f'Release the {obj} from the car',
    f'Unload the {obj} from the car',
    f'Take the {obj} out of the transportation',
    f'Remove the {obj} from the SUV',
    f'Get the {obj} out of the transport',
    f'Free the {obj} from the vehicle',
    ]
    return random.choice(phrases)

def read_image_url(url):
    try:
        if 's3://' in url:
            image_bytes = client.get(url)
            image_mem_view = memoryview(image_bytes)
            image_array = np.frombuffer(image_mem_view,np.uint8)
            image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
            image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
            image = Image.fromarray(image_np).convert('RGB')
        else:
            image = Image.open(url).convert('RGB')
        return image
    except:
        raise ValueError(f'{url}')




from concurrent.futures import ThreadPoolExecutor, as_completed


def check_len(prompt):
    if len(prompt.split())>70:
        return False
    return True

def process_map(index, map):
    metadata = []
    meta_file = map[0]['meta_file']
    raw_prefix = map[0]['raw_prefix']
    edited_prefix = map[0]['edited_prefix']
    mask_prefix = map[0]['mask_prefix']
    dataset_type = map[0]['type']
    can_use_mask = map[0]['can_use_mask']

    for line in list(jsonlines.open(meta_file, 'r')):
        assert 'text' in line.keys() and 'edited_image' in line.keys(), f'text and edited_image must be in {meta_file}'
        if line['text'] == 'person':
            continue

        edit_prompt = [line['text']]
        if 'loc_x1y1x2y2_blip' in line.keys():
            assert isinstance(line['loc_x1y1x2y2_blip'],str)
            edit_prompt+=[line['loc_x1y1x2y2_blip'].replace('arafly','').replace('araffex','').replace('arafed','').replace('araflane','airplane').replace('The image features','')]# blip is a string
        if 'texts_bird' in line.keys():
            assert isinstance(line['texts_bird'],List)
            edit_prompt+=line['texts_bird'] # texts_bird is a list
        if 'vqa_text' in line.keys():
            assert isinstance(line['vqa_text'],str)
            edit_prompt+=[line['vqa_text'].replace('arafly','').replace('araffex','').replace('arafed','').replace('araflane','airplane').replace('The image features','')]
        # if 'text_vqa' in line.keys():
        #     edit_prompt+=[line['text_vqa']]
        # if 'text_openbmb_MiniCPM_v2' in line.keys():
        #     edit_prompt+=line['text_openbmb_MiniCPM_v2']
        new_edit_prompt = []
        for ep in edit_prompt:
            if check_len(ep):
                new_edit_prompt.append(ep)
        if len(new_edit_prompt)==0:
            continue
        edit_prompt = new_edit_prompt
        
        edited_image_url = os.path.join(edited_prefix, line['edited_image'])

        raw_image_url = os.path.join(raw_prefix, line['raw_image']) if 'raw_image' in line.keys() else ''

        texts = line.get('texts', [])
        if 'text_openbmb_MiniCPM_v2' in line.keys():
            texts += line['text_openbmb_MiniCPM_v2']
        
        if 'mask_image' in line.keys():
            mask_image_url = os.path.join(mask_prefix, line['mask_image'])
        else:
            mask_image_url = ''

        if 'loc_x1y1x2y2' in line.keys():
            loc_x1y1x2y2 = line['loc_x1y1x2y2']
            x1, y1, x2, y2 = loc_x1y1x2y2
            if (x2 - x1) < 32 * 2 and (y2 - y1) < 32 * 2:
                continue
        else:
            loc_x1y1x2y2 = [0, 0, 0, 0]

        texts = strip_feature(texts, 'store')
        assert isinstance(edit_prompt,List)
        metadata.append({
            'type': dataset_type,
            'can_use_mask': can_use_mask,
            'edited_image_url': edited_image_url,
            'edit_prompt': edit_prompt,
            'raw_image_url': raw_image_url,
            'texts': texts,
            'loc_x1y1x2y2': loc_x1y1x2y2,
            'mask_image_url': mask_image_url,
        })

    assert len(metadata) > 0, f'{meta_file} is illegal'
    return index, metadata



class InpaintingDataset(Dataset):
    def __init__(self,image_maps,tokenize_captions) -> None:
        super(InpaintingDataset,self).__init__()
        self.data_identy = [image_map[0]['meta_file'] for image_map in image_maps]
        self.metadata = self.get_maps(image_maps)
        self.T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
        self.weight = []
        i2i_weight = 0
        t2i_weight = 0
        for im in image_maps:
            if im[0]['type'] == 'Image2ImageDataset':
                i2i_weight+=im[1]
            elif im[0]['type'] == 'Text2ImageDataset':
                t2i_weight+=im[1]
            else:
                raise ValueError()
        for im in image_maps:
            if im[0]['type'] == 'Image2ImageDataset':
                self.weight.append(im[1]*t2i_weight*3)
            elif im[0]['type'] == 'Text2ImageDataset':
                self.weight.append(im[1]*i2i_weight)
            else:
                raise ValueError()
        
        self.tokenize_captions = tokenize_captions

    def __len__(self):
        return 100000000

    @staticmethod
    def get_maps(maps):
        metadata = {}

        # 创建线程池，定义最大线程数
        with ThreadPoolExecutor(max_workers=8) as executor:
            # 提交任务并行处理
            futures = [executor.submit(process_map, index, map) for index, map in enumerate(maps)]
            
            for future in as_completed(futures):
                index, data = future.result()
                metadata[index] = data

        return metadata

    def __getitem__(self, index):
        metadata =random.choices(self.metadata,weights=self.weight)[0]
        item = random.choice(metadata)

        if item['type'] == 'Image2ImageDataset':
            return self._getitem_from_image2imagedataset(item)
        elif item['type'] == 'Text2ImageDataset':
            return self._getitem_from_text2imagedataset(item)
        else:
            raise ValueError(f'type {metadata["type"]} is not supported yet')

    def _getitem_from_image2imagedataset(self,item):
        if item['can_use_mask']: # for oabench , pet seg mask, food seg
            return self.image_inpainting_task(item)
    
        task = random.choices(['inpainting','restore','text','outpainting','remove'],
                        weights=[40,           30,      0,        30,          0])[0]
        # main task    
        if task == 'inpainting':
            return self.image_inpainting_task(item)
        # aux task
        elif task == 'restore':
            return self.image_restore_task(item)
        # aux task
        elif task == 'remove':
            return self.image_remove_task(item)
        # aux task
        elif task == 'text':
            return self.text2image_task(item)
        # aux task
        elif task == 'outpainting':
            return self.image_outpainting_task(item)
        else:
            raise ValueError(f"{task} is not supported")

        
    def text2image_task(self,item):
        '''
        全部掩码，把任务转换成文生图任务
        '''
        edit_prompt = item['edit_prompt'][0]
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        texts = item['texts']
        mask_image_url = item['mask_image_url']
        raw_image_url = item['raw_image_url']
        
        edited_image = read_image_url(edited_image_url)
        
        mask = generate_mask_resized([0,0,edited_image.width,edited_image.height],edited_image.width,edited_image.height).convert('L')
        mask = mask.resize((640*SCALE_INDEX,360*SCALE_INDEX))
                
        edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))


        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./text2image.png')
        aux_world = output_edit_prompt(item,simplify_prob=True,in_the_car=True)
        if 'dog' in aux_world or 'cat' in aux_world or 'pet' in aux_world:

            edit_prompt =random.choice(translations) +aux_world

        else:
            edit_prompt = random.choice(translations) 
        print('\033[31m('+' text2image:'+edit_prompt+')\033[0m')

        mem = {
                'input_ids':self.tokenize_captions(edit_prompt)[0],
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask
                } 
        return mem

    def image_restore_task(self,item):
        '''
        unconditonal image restore
        '''
        edit_prompt = item['edit_prompt']
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        texts = item['texts']
        mask_image_url = item['mask_image_url']
        raw_image_url = item['raw_image_url']
        
        edited_image = read_image_url(edited_image_url)
        
        mask = random_irregular_mask_image().convert('L')
        mask = mask.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        mask = mask.filter(ImageFilter.MaxFilter(size=MASK_SIZE_FILTER*13))
        edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./image_restore.png')
        edit_prompt = ''#uncondition mark
        # edit_prompt = torch.tensor([random.randint(0,49407) for i in range(77)])
        print('\033[31m('+' imagerestore:'+edit_prompt+')\033[0m')

        mem = {
                'input_ids':self.tokenize_captions(edit_prompt)[0],
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask
                } 
        return mem

    def image_inpainting_task(self,item):
        '''
        main task
        '''
        edit_prompt = random.choice(item['edit_prompt'])
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        texts = item['texts']
        mask_image_url = item['mask_image_url']
        raw_image_url = item['raw_image_url']
        edited_image = read_image_url(edited_image_url)

        x1 = loc_x1y1x2y2[0]
        y1 = loc_x1y1x2y2[1]
        x2 = loc_x1y1x2y2[2]
        y2 = loc_x1y1x2y2[3]
        # pad = 0
        # x1 = max(x1-pad,0)
        # y1 = max(y1-pad,0)
        # x2 = min(x2+pad,edited_image.width)
        # y2 = min(y2+pad,edited_image.height)
        loc_x1y1x2y2=[x1,y1,x2,y2]
        
        mask = generate_mask_resized(loc_x1y1x2y2 ,edited_image.width,edited_image.height)
        if item['can_use_mask']:
            mask = read_image_url(mask_image_url).convert('L')
            mask = mask.resize((640*SCALE_INDEX,360*SCALE_INDEX))
            mask = mask.filter(ImageFilter.MaxFilter(size=MASK_SIZE_FILTER))
        edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))

        # composite_image =  Image.blend(edited_image.convert('RGB'), mask.convert('RGB'), 0.3)
        # composite_image.save('./1.png')
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
   
        visualize_masked_image(masked_image,'./inpainting.png')
        # edit_prompt = output_edit_prompt(item)
        if len(edit_prompt)==1:
            print('('+' imageinpainting:'+edit_prompt+')')
        else:
            print('\033[31m('+' imageinpainting:'+edit_prompt+')\033[0m')

        mem = {
                'input_ids':self.tokenize_captions(edit_prompt)[0],
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask
                } 
        return mem

    def image_outpainting_task(self,item):
        '''
        outpainting task
        '''
        edit_prompt = item['edit_prompt']
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        texts = item['texts']
        mask_image_url = item['mask_image_url']
        raw_image_url = item['raw_image_url']
        edited_image = read_image_url(edited_image_url)
        edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        mask = generate_mask_for_outpainting(width=640*SCALE_INDEX,height=360*SCALE_INDEX)
        mask,masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./outpainting.png')
        edit_prompt = '' # 暂时定为无条件的outpainting
        mem = {
            'input_ids':self.tokenize_captions(edit_prompt)[0],
            'edited_pixel_values':self.T(edited_image),
            'maked_images':masked_image,
            'mask':mask
        }

        return mem

    def image_remove_task(self,item):
        '''
        object remove
        '''
        edit_prompt = item['edit_prompt']
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        texts = item['texts']
        mask_image_url = item['mask_image_url']
        raw_image_url = item['raw_image_url']
        edited_image = read_image_url(edited_image_url)
        raw_image = read_image_url(raw_image_url)
        

        mask = generate_mask_resized(loc_x1y1x2y2 ,edited_image.width,edited_image.height)
        if item['can_use_mask']:
            if random.random()>0.5:
                mask = read_image_url(mask_image_url).convert('L')
                mask = mask.filter(ImageFilter.MaxFilter(size=MASK_SIZE_FILTER))
                mask = mask.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        raw_image = raw_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./image_remove.png')
        edit_prompt = ''
        print('\033[31m('+' imageremove:'+edit_prompt+')\033[0m')

        mem = {
                'input_ids':self.tokenize_captions(edit_prompt)[0],
                'edited_pixel_values':self.T(raw_image),
                'maked_images':masked_image,
                'mask':mask
                } 
        return mem

    def _getitem_from_text2imagedataset(self,item):

        task = random.choices(['restore','text','outpainting','remove'],
                        weights=[ 30,      40,        30,          0])[0]

        if task == 'restore':
            return self.image_restore_task(item)
        elif task == 'outpainting':
            return self.image_outpainting_task(item)
        else:
            edit_prompt = item['edit_prompt'][0]

            edited_image_url = item['edited_image_url']
            loc_x1y1x2y2 = item['loc_x1y1x2y2']
            texts = item['texts']
            mask_image_url = item['mask_image_url']

            edited_image = read_image_url(edited_image_url)
            mask = generate_mask_resized([0,0,edited_image.width,edited_image.height],edited_image.width,edited_image.height).convert('L')

            # if random.random()>0.5:
            #     mask = generate_mask_resized([0,0,edited_image.width,edited_image.height],edited_image.width,edited_image.height).convert('L')
            # else:
            #     mask = generate_mask_for_outpainting(width=640*2,height=360*2,revert=True)

            
            edited_image = edited_image.resize((640*SCALE_INDEX,360*SCALE_INDEX))
            mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
            print('\033[31m('+' text2image:'+edit_prompt+')\033[0m')

            mem = {
                    'input_ids':self.tokenize_captions(edit_prompt)[0],
                    'edited_pixel_values':self.T(edited_image),
                    'maked_images':masked_image,
                    'mask':mask
                    } 
            return mem













































if __name__ == "__main__":
    image = '/mnt/afs2d/luotianhang/smartvehicle_diffusion/diffusers/examples/inpainting/1.png'
    image = Image.open(image)
    # draw = ImageDraw.Draw(image)
    # top_left = (659, 304)
    # bottom_right = (1078, 752)  
    # outline_color = 'red'
    # line_width = 5 
    # draw.rectangle([top_left, bottom_right], outline=outline_color, width=line_width)
    # image.save('rec.png')
    
    # mask = generate_mask([659, 304, 1078, 752],image.width,image.height)
    # image = image.resize((640*2,360*2))
    # result_image = Image.composite(image, Image.new('RGB', image.size), mask)

    # result_image.save('temp_mask.jpg')

    # /mnt/afs2d01/luotianhang/diffusion_data/trainning_data/image2image/jsonl_data/made_by_sdxl_inpanting/pet/image2image_metadata2_3_fixed_fix_loc_addmask.jsonl