import pickle
import random
from pathlib import Path
import ast
import numpy as np
import re
import json
import time
from functools import partial
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os.path as osp
import torch
import torchvision.transforms as T
import torch.nn.functional as F
from torchvision.transforms import functional as TF
from torch.utils.data import Dataset

from IndexKits.index_kits import ArrowIndexV2, MultiResolutionBucketIndexV2, MultiIndexV2
from .mask import (random_bbox, bbox2mask_uncropping, random_cropping_bbox,bbox2mask, brush_stroke_mask)
from .mask_bg import (get_random_size_box_with_constraint, get_random_size_ellipse_with_constraint, brush_stroke_mask_with_constraint)


class TextImageArrowStreamCustom(Dataset):
    def __init__(self,
                 args,
                 resolution=512,
                 random_flip=None,
                 enable_CN=True,
                 log_fn=print,
                 index_file=None,
                 multireso=False,
                 batch_size=-1,
                 world_size=1,
                 random_shrink_size_cond=False, #False
                 merge_src_cond=False, # False
                 uncond_p=0.0,
                 text_ctx_len=77,
                 tokenizer=None,
                 ):
        self.args = args
        self.resolution = resolution
        self.log_fn = lambda x: log_fn(f"    {Path(__file__).stem} | " + x)

        self.random_flip = random_flip
        # If true, the Chinese prompt from the `text_zh` column will be taken from the arrow file;
        # otherwise, the English prompt from the `text_en` column will be taken,
        # provided that `text_zh` or `text_en` exists in the arrow file.
        self.enable_CN = enable_CN
        self.index_file = index_file  # json file, {'imgpath':xxxx, 'text_zh':'#######'}
        self.multireso = multireso
        self.batch_size = batch_size
        self.world_size = world_size
        #self.index_manager = self.load_index()

        # training word
        self.is_word = args.is_word
        if self.is_word:
            print('Training word images!')

         # training inpainting task
        self.is_inpaint = args.is_inpaint
        if self.is_inpaint:
            print('Training inpainting task!')
        
        self.is_repaint = args.is_repaint
        if self.is_repaint:
            print('Training repainting task!')

        self.is_textmask = args.is_textmask
        if self.is_textmask:
            print('Training using textmask!')

        if self.multireso:
            self.index_manager = self.load_index()
            print("index_manager len!!!!!!!", len(self.index_manager))

        # clip params
        self.uncond_p = uncond_p
        # self.uncond_p = 0
        self.text_ctx_len = text_ctx_len
        self.tokenizer = tokenizer

        # size condition
        self.random_shrink_size_cond = random_shrink_size_cond
        self.merge_src_cond = merge_src_cond

        self.data = self.load_data(self.index_file[0])
        print('train data num!!!!!!!', len(self.data))

        assert isinstance(resolution, int), f"resolution must be an integer, got {resolution}"
        self.flip_norm = T.Compose(
            [
                T.RandomHorizontalFlip() if self.random_flip else T.Lambda(lambda x: x),
                T.ToTensor(),
                T.Normalize([0.5], [0.5]),
            ]
        )

        self.flip = T.RandomHorizontalFlip(p=1.0)
        
        self.image_transforms = T.Compose(
            [
                T.ToTensor(),
                T.Normalize([0.5], [0.5]),
            ]
        )

        self.masktype = args.masktype

        # show info
        if self.merge_src_cond:
            self.log_fn("Enable merging src condition: (oriW, oriH) --> ((WH)**0.5, (WH)**0.5)")

        self.log_fn("Enable image_meta_size condition (original_size, target_size, crop_coords)")
        self.log_fn(f"Image_transforms: {self.flip_norm}")


    def load_data(self, json_file):
        datalist = []
        data = open(json_file, 'r', encoding='utf-8')
        for line in data.readlines():
            dic = json.loads(line)
            datalist.append(dic)
        return datalist


    def load_index(self):
        if self.multireso:
            index_manager = MultiResolutionBucketIndexV2(
                self.batch_size, self.world_size
            )
            self.log_fn(f"Using MultiResolutionBucketIndexV2: {len(index_manager):,}")

        return index_manager

    def shuffle(self, seed, fast=False):
        self.index_manager.shuffle(seed, fast=fast)

    def get_raw_image(self, index, image_key="image"):
        try:
            ret = self.index_manager.get_image(index, image_key)
        except Exception as e:
            self.log_fn(f'get_raw_image | Error: {e}')
            ret = Image.new("RGB", (256, 256), (255, 255, 255))
        return ret

    @staticmethod
    def random_crop_image(image, origin_size, target_size):
        
        aspect_ratio = float(origin_size[0]) / float(origin_size[1])
        if origin_size[0] < origin_size[1]:
            new_width = target_size[0]
            new_height = int(new_width / aspect_ratio)
        else:
            new_height = target_size[1]
            new_width = int(new_height * aspect_ratio)

        image = image.resize((new_width, new_height), Image.LANCZOS)

        if new_width > target_size[0]:
            x_start = random.randint(0, new_width - target_size[0])
            y_start = 0
        else:
            x_start = 0
            y_start = random.randint(0, new_height - target_size[1])
        image_crop = image.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        crops_coords_top_left = (x_start, y_start)

        return image_crop, crops_coords_top_left, (new_width, new_height)


    def get_style(self, index):
        "Here we use a default learned embedder layer for future extension."
        style = 0
        return style


    def get_mask_from_key(self, index, size, image_key="textmaskpath"):

        mask = Image.new('RGB', (size[0], size[1]), (0, 0, 0))
        if image_key in self.data[index].keys():
            if self.data[index][image_key] != "":
                mask = Image.open(self.data[index][image_key]).convert("RGB")

        return mask

    def process_mask_binary(self, mask, reverse = False):
        mask = np.array(mask.convert("L"))
        mask = mask.astype(np.float32) / 255.0
        mask = mask[None]
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1

        if reverse:
            mask = 1 - mask   # 取反

        mask = torch.from_numpy(mask)
        return mask


    def get_target_size_multireso(self, image_size):

        width, height = image_size
        targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
        targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

        ratio = height / width
        if height <= width:
            res_key, res_val = min(targetdictv1.items(), key=lambda x: abs(ratio - x[1]))
        else:
            res_key, res_val = min(targetdictv2.items(), key=lambda x: abs(ratio - x[1]))
        target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

        return target_size


    def get_image_with_hwxy_custom(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)

        image_tensor = self.flip_norm(image_crop)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, image_meta_size, kwargs


    def get_image_with_hwxy_custom_textmask_logmask(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size
        textmask = self.get_mask_from_key(index, origin_size, image_key="textmaskpath")
        logomask = self.get_mask_from_key(index, origin_size, image_key="logomaskpath")

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)

        x_start, y_start = crops_coords_top_left
        textmask = textmask.resize(resize_width_height, Image.LANCZOS)
        logomask = logomask.resize(resize_width_height, Image.LANCZOS)
        textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        logomask_crop = logomask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            logomask_crop = self.flip(logomask_crop)

        image_tensor = self.image_transforms(image_crop)
        textmask_crop = self.process_mask_binary(textmask_crop, reverse = False)
        logomask_crop = self.process_mask_binary(logomask_crop, reverse = False)
        textmask_crop[logomask_crop == 1] = 1
        textmask_crop = 1 - textmask_crop   # 训练数据中textmask文字是黑，非文字是白，需要转换一下

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, textmask_crop, image_meta_size, kwargs

    def get_outpaint_mask(self, image_size):
        mask = bbox2mask_uncropping(image_size, random_cropping_bbox(img_shape=image_size, mask_mode='bilateral'))
        return mask  # h,w,1
    
    def get_inpaint_mask(self, image_size):
        # # mask未知区域为1， 已知区域为0
        max_box_shape = int(max(image_size)*0.3)
        # regular_mask = bbox2mask(image_size, random_bbox(img_shape=image_size, max_bbox_shape=(200, 200))) 
        regular_mask = bbox2mask(image_size, random_bbox(img_shape=image_size, max_bbox_shape=(max_box_shape, max_box_shape)))  
        irregular_mask = brush_stroke_mask(image_size, num_vertices=(4, 8),brush_width=(12, 30))
        mask = regular_mask | irregular_mask
        return mask
    
    def get_inpaint_mask_foreground_aware(self, image_size, mask_foreground):
        # 输出mask未知区域为1， 已知区域为0 , mask_foreground 前景区域为1，背景区域为0
        draw_type = random.randint(0, 2)
        if draw_type == 0:
            mask = brush_stroke_mask_with_constraint(image_size, mask_foreground, num_vertices=(15, 18), brush_width=(150, 180),max_loops=1)
        elif draw_type == 1: 
            mask = get_random_size_box_with_constraint(image_size, mask_foreground, ratio=0.6, min_valid_ratio=0.6)
        else:
            mask = get_random_size_ellipse_with_constraint(image_size, mask_foreground, ratio=0.6, min_valid_ratio=0.6)
        mask[mask_foreground == 1] = 0

        return mask


    def get_image_with_hwxy_custom_textmask_logmask_inpainting(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size
        textmask = self.get_mask_from_key(index, origin_size, image_key="textmaskpath")
        inpaintmask = self.get_mask_from_key(index, origin_size, image_key="commoditymaskpath")

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size, name=self.data[index]['imgpath'])
        
        x_start, y_start = crops_coords_top_left
        textmask = textmask.resize(resize_width_height, Image.LANCZOS)
        inpaintmask = inpaintmask.resize(resize_width_height, Image.LANCZOS)
        textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        inpaintmask_crop = inpaintmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            inpaintmask_crop = self.flip(inpaintmask_crop)

        image_tensor = self.image_transforms(image_crop)
        textmask_crop = self.process_mask_binary(textmask_crop, reverse = False)
        inpaintmask_crop = self.process_mask_binary(inpaintmask_crop, reverse = False)  # 商品区域为0，非商品区域为1， 。。。。。。加油站mask中加油站区域为1，非加油站区域为0，因此取反，-> 取反之后，已知的加油站区域为0，未知的背景区域为1

        textmask_crop = 1 - textmask_crop   # 训练数据中textmask文字是黑，非文字是白，需要转换一下，转换之后，文字：0，非文字：1

        # 如果 “commoditymaskpath” 不存在，则随机生成一个inpaint/outpaint mask
        if 'commoditymaskpath' in self.data[index].keys() and self.data[index]['commoditymaskpath'] != "":
            mask = inpaintmask_crop
        else:
            imgsize = [image_crop.size[1], image_crop.size[0]] # [h,w]
            if np.random.uniform() > 0.4:
                mask = self.get_outpaint_mask(imgsize) # # h,w,1 mask未知区域为1， 已知区域为0
            else:
                mask = self.get_inpaint_mask(imgsize)  # h,w,1 mask未知区域为1， 已知区域为0
            mask[mask < 0.5] = 0
            mask[mask >= 0.5] = 1
            mask = mask.transpose(2, 0, 1).astype(np.float32) # c,h,w
            mask = torch.from_numpy(mask)  # tensor:1,h,w, [0, 1]

        mask[textmask_crop == 0] = 0  # 将有文字区域置为0
        masked_image_tensor = (1 - mask) * image_tensor

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs


    def get_image_with_hwxy_custom_textmask_gasmask_inpainting(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size
        textmask = self.get_mask_from_key(index, origin_size, image_key="textmaskpath")
        inpaintmask = self.get_mask_from_key(index, origin_size, image_key="gasmaskpath")

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            
        x_start, y_start = crops_coords_top_left
        textmask = textmask.resize(resize_width_height, Image.LANCZOS)
        inpaintmask = inpaintmask.resize(resize_width_height, Image.LANCZOS)
        textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        inpaintmask_crop = inpaintmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            inpaintmask_crop = self.flip(inpaintmask_crop)

        # name = self.data[index]['imgpath'].split('/')[-1]
        # image_crop.save(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/training-images/img-{name}')
        # inpaintmask_crop.save(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/training-images/mask-{name}')
        # textmask_crop.save(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/training-images/text-{name}')

        image_tensor = self.image_transforms(image_crop)
        textmask_crop = self.process_mask_binary(textmask_crop, reverse = False)
        inpaintmask_crop = self.process_mask_binary(inpaintmask_crop, reverse = False)  # gasmask 已知的加油站区域为0，未知的背景区域为1

        textmask_crop = 1 - textmask_crop   # 训练数据中textmask文字是黑，非文字是白，需要转换一下，转换之后，文字：0，非文字：1

        # 如果 “commoditymaskpath” 不存在，则随机生成一个inpaint/outpaint mask
        if 'gasmaskpath' in self.data[index].keys() and self.data[index]['gasmaskpath'] != "":
            mask = inpaintmask_crop
        else:
            imgsize = [image_crop.size[1], image_crop.size[0]] # [h,w]
            if np.random.uniform() > 0.4:
                mask = self.get_outpaint_mask(imgsize) # # h,w,1 mask未知区域为1， 已知区域为0
            else:
                mask = self.get_inpaint_mask(imgsize)  # h,w,1 mask未知区域为1， 已知区域为0
            mask[mask < 0.5] = 0
            mask[mask >= 0.5] = 1
            mask = mask.transpose(2, 0, 1) # c,h,w
            mask = torch.from_numpy(mask)  # tensor:1,h,w, [0, 1]

        mask[textmask_crop == 0] = 0  # 将有文字区域置为0
        masked_image_tensor = (1 - mask) * image_tensor

        # changed
        # textmask_crop = torch.ones_like(mask)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs


    def get_inpaint_mask_foreground_aware(self, image_size, mask_foreground):
        # 输出mask未知区域为1， 已知区域为0 , mask_foreground 前景区域为1，背景区域为0
        draw_type = random.randint(0, 2)
        # draw_type = 2
        if draw_type == 0:
            # 随机mask
            brush_width_min = int(min(image_size)*0.15)
            brush_width_max = int(min(image_size)*0.2)
            mask = brush_stroke_mask_with_constraint(image_size, mask_foreground, num_vertices=(15, 18),
                                                    brush_width=(brush_width_min, brush_width_max), max_loops=1) # brush_width=(150, 180)
        elif draw_type == 1:
            # 方框
            mask = get_random_size_box_with_constraint(image_size, mask_foreground, ratio=0.6, min_valid_ratio=0.6)
        else:
            # 椭圆
            mask = get_random_size_ellipse_with_constraint(image_size, mask_foreground, ratio=0.6, min_valid_ratio=0.6)

        mask[mask_foreground == 1] = 0

        return mask


    def get_image_textmask_erasermask(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size
        textmask = self.get_mask_from_key(index, origin_size, image_key="textmaskpath")

        try:
            if self.data[index].get('maskpath') is None or self.data[index].get('maskpath') == "": # 不存在前景mask
                fgmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                maskpathlist = self.data[index].get('maskpath')
                fgmask = np.zeros((image.size[1], image.size[0], 3), dtype=np.uint8)  
                for i in range(len(maskpathlist)):
                    maskpath = maskpathlist[i]
                    mask = Image.open(maskpath).convert("RGB")
                    mask = np.array(mask)
                    mask = mask / 255
                    mask[mask<0.5]=0
                    mask[mask>=0.5]=1
                    fgmask[mask==1]=1
                fgmask = Image.fromarray((fgmask*255).astype(np.uint8))
        except:
            fgmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            
        x_start, y_start = crops_coords_top_left
        textmask = textmask.resize(resize_width_height, Image.LANCZOS)
        fgmask = fgmask.resize(resize_width_height, Image.LANCZOS)
        textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        fgmask_crop = fgmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            fgmask_crop = self.flip(fgmask_crop)

        image_tensor = self.image_transforms(image_crop)
        textmask_crop = self.process_mask_binary(textmask_crop, reverse = False)
        fgmask_crop = self.process_mask_binary(fgmask_crop, reverse = False).squeeze()  # fgmask 前景为1.背景为0 tensor h,w

        textmask_crop = 1 - textmask_crop   # 训练数据中textmask文字是黑，非文字是白，需要转换一下，转换之后，文字：0，非文字：1
        
        # gen mask
        ## 生成擦除mask，擦除区域置1，背景区域置0
        mask = self.get_inpaint_mask_foreground_aware((fgmask_crop.shape[0], fgmask_crop.shape[1]), np.array(fgmask_crop)) # h,w
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1
        mask = mask[None].astype(np.float32) # 1,h,w
        mask = torch.from_numpy(mask)  # tensor:1,h,w, [0, 1]

        mask[textmask_crop == 0] = 0  # 将有文字区域置为0
        masked_image_tensor = (1 - mask) * image_tensor

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        # if index % 20 == 0:
        #     # visual fgmask, mask, textmask_crop, image_tensor, masked_image_tensor
        #     fgmask_crop_visual = np.stack([np.array(fgmask_crop.squeeze())] * 3, axis=-1)
        #     mask_visual = np.stack([np.array(mask.squeeze())] * 3, axis=-1)
        #     textmask_crop_visual = np.stack([np.array(textmask_crop.squeeze())] * 3, axis=-1)
        #     image_tensor_visual = np.array((image_tensor + 1) * 127.5).transpose(1, 2, 0)
        #     masked_image_tensor_visual = np.array((masked_image_tensor + 1) * 127.5).transpose(1, 2, 0)
        #     concat = np.uint8(np.concatenate((image_tensor_visual, masked_image_tensor_visual, fgmask_crop_visual*255, mask_visual*255, textmask_crop_visual*255), axis=1))

        #     ouput_concat = Image.fromarray(concat)
        #     savename = self.data[index]['imgpath'].split('/')[-1]
        #     ouput_concat.save(osp.join('/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/trainingdata', savename))

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs

    def get_image_textmask_repaint(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size
        textmask = self.get_mask_from_key(index, origin_size, image_key="textmaskpath")

        fgtext = ''
        try:
            if self.data[index].get('maskpath') is None or self.data[index].get('maskpath') == "": # 不存在前景mask
                fgmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                maskpathlist = self.data[index].get('maskpath')      # 随机选择一项，作为前景
                masktextlist = self.data[index].get('text_zh')
                i = random.randint(0, len(maskpathlist)-1)
                fgmask = Image.open(maskpathlist[i]).convert("RGB")
                fgtext = masktextlist[i]
        except:
            fgmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))

        if self.multireso:
            target_size = self.get_target_size_multireso(origin_size)
            image_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            
        x_start, y_start = crops_coords_top_left
        textmask = textmask.resize(resize_width_height, Image.LANCZOS)
        fgmask = fgmask.resize(resize_width_height, Image.LANCZOS)
        textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        fgmask_crop = fgmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            fgmask_crop = self.flip(fgmask_crop)

        image_tensor = self.image_transforms(image_crop)
        textmask_crop = self.process_mask_binary(textmask_crop, reverse = False)
        fgmask_crop = self.process_mask_binary(fgmask_crop, reverse = False)  # fgmask 前景为1.背景为0 tensor h,w

        textmask_crop = 1 - textmask_crop   # 训练数据中textmask文字是黑，非文字是白，需要转换一下，转换之后，文字：0，非文字：1

        fgmask_crop[textmask_crop == 0] = 0  # 将有文字区域置为0
        masked_image_tensor = (1 - fgmask_crop) * image_tensor

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        # if index % 20 == 0:
        #     # visual fgmask, mask, textmask_crop, image_tensor, masked_image_tensor
        #     fgmask_crop_visual = np.stack([np.array(fgmask_crop.squeeze())] * 3, axis=-1)
        #     textmask_crop_visual = np.stack([np.array(textmask_crop.squeeze())] * 3, axis=-1)
        #     image_tensor_visual = np.array((image_tensor + 1) * 127.5).transpose(1, 2, 0)
        #     masked_image_tensor_visual = np.array((masked_image_tensor + 1) * 127.5).transpose(1, 2, 0)
        #     concat = np.uint8(np.concatenate((image_tensor_visual, masked_image_tensor_visual, fgmask_crop_visual*255, textmask_crop_visual*255), axis=1))

        #     ouput_concat = Image.fromarray(concat) 
        #     savename = self.data[index]['imgpath'].split('/')[-1]
        #     ouput_concat.save(osp.join('/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/trainingdata', savename))

        return image_tensor, masked_image_tensor, fgmask_crop, textmask_crop, image_meta_size, fgtext, kwargs



    def prepare_mask_and_masked_image(self, image, mask):
        image = np.array(image.convert("RGB"))
        image = image.transpose(2, 0, 1)
        image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

        mask = np.array(mask.convert("L"))
        mask = mask.astype(np.float32) / 255.0
        mask = mask[None]
        # mask = 1 - mask   # 取反
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1
        mask = torch.from_numpy(mask)

        masked_image = image * (mask < 0.5)

        return mask, masked_image

    def get_image_with_hwxy_custom_inpainting(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        commoditymask = Image.open(self.data[index]['gasmaskpath'])

        origin_size = image.size
        
        target_size = (self.resolution, self.resolution)
        image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
        x_start, y_start = crops_coords_top_left
        commoditymask = commoditymask.resize(resize_width_height, Image.LANCZOS)
        commoditymask_crop = commoditymask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        name = self.data[index]['imgpath'].split('/')[-1]
        image_tensor = self.image_transforms(image_crop)
        mask, masked_image_tensor = self.prepare_mask_and_masked_image(image_crop, commoditymask_crop)
        textmask_crop = torch.ones_like(mask)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs


    def get_text_info_with_encoder(self, description):
        pad_num = 0
        text_inputs = self.tokenizer(
            description,
            padding="max_length",
            max_length=self.text_ctx_len,
            truncation=True,
            return_tensors="pt",
        )
        text_input_ids = text_inputs.input_ids[0]
        attention_mask = text_inputs.attention_mask[0]
        position_ids = text_inputs.position_ids[0]

        # if pad_num > 0:
        #     attention_mask[1:pad_num + 1] = False
        return text_input_ids, attention_mask, position_ids

    def get_original_text(self, ind):
        text = self.index_manager.get_attribute(ind, 'text_zh' if self.enable_CN else 'text_en')
        text = str(text).strip()
        return text

    def get_text(self, ind):
        text =  self.get_original_text(ind)
        if text == '':
            text = '随机生成一张图片'
        return text

    def get_word_pos(self, ind):

        idx2position = {
            0 : '左上角',
            1 : '上方',
            2 : '右上角',
            3 : '左侧',
            4 : '中间',
            5 : '右侧',
            6 : '左下方',
            7 : '下方',
            8 : '右下角',
        }

        return idx2position[ind]

    def __getitem__(self, ind):
        # Get text
        if random.random() < self.uncond_p:
            description = ""
        else:
            # description = self.data[ind]['text_zh']
            description = self.data[ind].get('text_zh', "")
            if self.is_word:
                word = self.data[ind]['word']
                # pos = self.get_word_pos(self.data[ind]['pos'])
                word_size = self.data[ind]['font_size']
                word_mode = '竖版' if self.data[ind]['vertical'] else '横版'
                word_font = self.data[ind]['font']

                # word_prompt = f'图像的{pos}写着“{word}”。'
                # word_prompt = f'图像上写着“{word}”。'
                # word_prompt = f'图像上写着“{word}”，版式为{word_mode}。'
                # word_prompt = f'图像上写着“{word}”，文字大小是{word_size}。'
                # word_prompt = f'图像上写着“{word}”，字体是{word_font}。'
                word_prompt = f'图像上写着“{word}”，字体是{word_font}，文字大小是{word_size}，版式为{word_mode}。'
                # word_prompt = f'图像上写着“{word}”，字体是{word_font}，版式为{word_mode}。'
                description = word_prompt + description
            description = description[0:256]

        if self.is_inpaint:
            description = "艺术海报,高质量"

        # Use encoder to embed tokens online
        text_embedding, text_embedding_mask, text_position_ids = self.get_text_info_with_encoder(description)

        if self.is_inpaint:
            if self.masktype == "eraser":
                original_pil_image, masked_image_tensor, mask, textmask, image_meta_size, kwargs = self.get_image_textmask_erasermask(ind)
            elif self.masktype == 'inpaint_outpaint':
                original_pil_image, masked_image_tensor, mask, textmask, image_meta_size, kwargs = self.get_image_with_hwxy_custom_textmask_logmask_inpainting(ind)
            
            return (
                original_pil_image,
                masked_image_tensor,
                mask,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        if self.is_repaint:
            original_pil_image, masked_image_tensor, fgmask, textmask, image_meta_size, fgtext, kwargs = self.get_image_textmask_repaint(ind)
            text_embedding, text_embedding_mask, text_position_ids = self.get_text_info_with_encoder(fgtext)
            return (
                original_pil_image,
                masked_image_tensor,
                fgmask,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        elif self.is_textmask:
            original_pil_image, textmask, image_meta_size, kwargs = self.get_image_with_hwxy_custom_textmask_logmask(ind)

            return (
                original_pil_image,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        else:
            original_pil_image, image_meta_size, kwargs = self.get_image_with_hwxy_custom(ind)

            return (
                original_pil_image,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

    def __len__(self):
        if self.multireso:
            return len(self.index_manager)
        else:
            return len(self.data)

