import pickle
import random
from pathlib import Path
import ast
import numpy as np
import re
import json
import time
from functools import partial
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

import torch
import torchvision.transforms as T
import torch.nn.functional as F
from torchvision.transforms import functional as TF
from torch.utils.data import Dataset

from IndexKits.index_kits import ArrowIndexV2, MultiResolutionBucketIndexV2, MultiIndexV2
from .mask import (bbox2mask, brush_stroke_mask, get_irregular_mask, random_bbox, bbox2mask_uncropping, random_cropping_bbox)

class TextImageArrowStreamCustom(Dataset):
    def __init__(self,
                 args,
                 resolution=512,
                 random_flip=None,
                 enable_CN=True,
                 log_fn=print,
                 index_file=None,
                 multireso=False,
                 batch_size=-1,
                 world_size=1,
                 random_shrink_size_cond=False, #False
                 merge_src_cond=False, # False
                 uncond_p=0.0,
                 text_ctx_len=77,
                 tokenizer=None,
                 ):
        self.args = args
        self.resolution = resolution
        self.log_fn = lambda x: log_fn(f"    {Path(__file__).stem} | " + x)

        self.random_flip = random_flip
        # If true, the Chinese prompt from the `text_zh` column will be taken from the arrow file;
        # otherwise, the English prompt from the `text_en` column will be taken,
        # provided that `text_zh` or `text_en` exists in the arrow file.
        self.enable_CN = enable_CN
        self.index_file = index_file  # json file, {'imgpath':xxxx, 'text_zh':'#######'}
        self.multireso = multireso
        self.batch_size = batch_size
        self.world_size = world_size
        #self.index_manager = self.load_index()

        # training word
        self.is_word = args.is_word
        if self.is_word:
            print('Training word images!')

         # training inpainting task
        self.is_inpaint = args.is_inpaint
        if self.is_inpaint:
            print('Training inpainting task!')

        self.is_textmask = args.is_textmask
        if self.is_textmask:
            print('Training using textmask!')

        if self.multireso:
            self.index_manager = self.load_index()
            print("index_manager len!!!!!!!", len(self.index_manager))

        # clip params
        #self.uncond_p = uncond_p
        self.uncond_p = 0
        self.text_ctx_len = text_ctx_len
        self.tokenizer = tokenizer

        # size condition
        self.random_shrink_size_cond = random_shrink_size_cond
        self.merge_src_cond = merge_src_cond

        self.data = self.load_data(self.index_file[0])
        print('train data num!!!!!!!', len(self.data))

        assert isinstance(resolution, int), f"resolution must be an integer, got {resolution}"
        self.flip_norm = T.Compose(
            [
                T.RandomHorizontalFlip() if self.random_flip else T.Lambda(lambda x: x),
                T.ToTensor(),
                T.Normalize([0.5], [0.5]),
            ]
        )
        
        self.image_transforms = T.Compose(
            [
                T.ToTensor(),
                T.Normalize([0.5], [0.5]),
            ]
        )

        # show info
        if self.merge_src_cond:
            self.log_fn("Enable merging src condition: (oriW, oriH) --> ((WH)**0.5, (WH)**0.5)")

        self.log_fn("Enable image_meta_size condition (original_size, target_size, crop_coords)")
        self.log_fn(f"Image_transforms: {self.flip_norm}")


    def load_data(self, json_file):
        datalist = []
        data = open(json_file, 'r', encoding='utf-8')
        for line in data.readlines():
            dic = json.loads(line)
            datalist.append(dic)
        return datalist


    def load_index(self):
        if self.multireso:
            index_manager = MultiResolutionBucketIndexV2(
                self.batch_size, self.world_size
            )
            self.log_fn(f"Using MultiResolutionBucketIndexV2: {len(index_manager):,}")

        return index_manager

    def shuffle(self, seed, fast=False):
        self.index_manager.shuffle(seed, fast=fast)

    def get_raw_image(self, index, image_key="image"):
        try:
            ret = self.index_manager.get_image(index, image_key)
        except Exception as e:
            self.log_fn(f'get_raw_image | Error: {e}')
            ret = Image.new("RGB", (256, 256), (255, 255, 255))
        return ret

    @staticmethod
    def random_crop_image(image, origin_size, target_size):
        aspect_ratio = float(origin_size[0]) / float(origin_size[1])
        if origin_size[0] < origin_size[1]:
            new_width = target_size[0]
            new_height = int(new_width / aspect_ratio)
        else:
            new_height = target_size[1]
            new_width = int(new_height * aspect_ratio)

        image = image.resize((new_width, new_height), Image.LANCZOS)

        if new_width > target_size[0]:
            x_start = random.randint(0, new_width - target_size[0])
            y_start = 0
        else:
            x_start = 0
            y_start = random.randint(0, new_height - target_size[1])
        image_crop = image.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        crops_coords_top_left = (x_start, y_start)

        return image_crop, crops_coords_top_left, (new_width, new_height)

    def get_style(self, index):
        "Here we use a default learned embedder layer for future extension."
        style = 0
        return style

    def get_image_with_hwxy(self, index, image_key="image"):

        image = self.get_raw_image(index, image_key=image_key)
        origin_size = image.size

        if self.multireso:
            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
            image_tensor = self.flip_norm(image)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            image_tensor = self.flip_norm(image_crop)

        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, image_meta_size, kwargs


    def get_image_with_hwxy_custom(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size

        if self.multireso:
            # target_size = self.index_manager.get_target_size(index)

            # argetdictv1 = {
            #     "768x1280": 0.6,
            #     "960x1280": 0.75,
            #     "1024x1024": 1,
            # }  # 每个尺寸h/w值
            # targetdictv2 = {"1280x960": 1.3333, "1280x768": 1.6667}  # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
            image_tensor = self.flip_norm(image)

        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            image_tensor = self.flip_norm(image_crop)

        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, image_meta_size, kwargs



    def get_image_with_hwxy_custom_textmask_logmask(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")

        try:
            if 'textmaskpath' not in self.data[index].keys():
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['textmaskpath'] == "":
                    textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    textmask = Image.open(self.data[index]['textmaskpath']).convert("RGB")
        except:
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))


        try:
            if 'logomaskpath' not in self.data[index].keys():
                logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['logomaskpath'] == "":
                    logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    logomask = Image.open(self.data[index]['logomaskpath']).convert("RGB")
        except:
            logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))

        origin_size = image.size

        if self.multireso:
            # target_size = self.index_manager.get_target_size(index)

            # argetdictv1 = {
            #     "768x1280": 0.6,
            #     "960x1280": 0.75,
            #     "1024x1024": 1,
            # }  # 每个尺寸h/w值
            # targetdictv2 = {"1280x960": 1.3333, "1280x768": 1.6667}  # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image_crop, mask_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', mask=[textmask,logomask], return_resize=True)
            
            textmask_crop, logomask_crop = mask_crop
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
                logomask_crop = self.flip(logomask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1

            logomask_crop = (np.array(logomask_crop).astype(np.float32) / 255.0)[:, :, 0]
            logomask_crop = np.expand_dims(logomask_crop, axis=2) # h,w,1

            # import cv2
            # name = self.data[index]['imgpath'].split('/')[-1]
            # cv2.imwrite(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/masks/{name}', textmask_crop*255)
           
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0
            logomask_crop[logomask_crop < 0.5] = 0
            logomask_crop[logomask_crop >= 0.5] = 1 ## h,w,1, 0
            textmask_crop[logomask_crop==1] = 1
            textmask_crop = 1 - textmask_crop 
            
            # 训练数据中textmask文字是0，非文字是1
            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]

            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0)
            image_tensor = torch.from_numpy(image_tensor.transpose(2, 0, 1))
            image_tensor = image_tensor * 2.0 - 1.0


        else:

            print('logo not support single solution!')
            return -1
            target_size = (self.resolution, self.resolution)
            image_crop, textmask_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size, mask=textmask)
            
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0
            textmask_crop = 1 - textmask_crop # 训练数据中textmask文字是黑，非文字是白，需要转换一下
            
            # 文字：1 ，背景是:0
            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]


            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0).transpose(2, 0, 1)
            image_tensor = torch.from_numpy(image_tensor)

            image_tensor = image_tensor * 2.0 - 1.0


        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, textmask_crop, image_meta_size, kwargs

    def get_outpaint_mask(self, image_size):
        mask = bbox2mask_uncropping(image_size, random_cropping_bbox(img_shape=image_size, mask_mode='bilateral'))
        return mask  # h,w,1
    
    def get_inpaint_mask(self, image_size):
        # # mask未知区域为1， 已知区域为0
        max_box_shape = int(max(image_size)*0.3)
        # regular_mask = bbox2mask(image_size, random_bbox(img_shape=image_size, max_bbox_shape=(200, 200))) 
        regular_mask = bbox2mask(image_size, random_bbox(img_shape=image_size, max_bbox_shape=(max_box_shape, max_box_shape)))  
        irregular_mask = brush_stroke_mask(image_size, num_vertices=(4, 8),brush_width=(12, 30))
        mask = regular_mask | irregular_mask
        return mask


    def get_image_with_hwxy_custom_textmask_logmask_inpainting(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        
        try:
            if 'textmaskpath' not in self.data[index].keys():
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['textmaskpath'] == "":
                    textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    textmask = Image.open(self.data[index]['textmaskpath']).convert("RGB")
        except:
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))


        try:
            if 'logomaskpath' not in self.data[index].keys():
                logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['logomaskpath'] == "":
                    logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    logomask = Image.open(self.data[index]['logomaskpath']).convert("RGB")
        except:
            logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))


        try:
            if 'commoditymaskpath' not in self.data[index].keys():
                commoditymask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['commoditymaskpath'] == "":
                    commoditymask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    commoditymask = Image.open(self.data[index]['commoditymaskpath']).convert("RGB")
        except:
            commoditymask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))


        origin_size = image.size
        if self.multireso:
            # targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1} # 每个尺寸h/w值
            # targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667} # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(targetdictv1.items(), key=lambda x: abs(ratio - x[1]))
            else:
                res_key, res_val = min(targetdictv2.items(), key=lambda x: abs(ratio - x[1]))
            target_size = int(res_key.split('x')[1]), int(res_key.split('x')[0]) # w,h

            image_crop, mask_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', mask=[textmask,logomask,commoditymask], return_resize=True)
            textmask_crop, logomask_crop, commoditymask_crop = mask_crop

        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            x_start, y_start = crops_coords_top_left
            textmask_crop = textmask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
            logomask_crop = logomask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
            commoditymask_crop = commoditymask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        # flip
        if self.random_flip and random.random() < 0.5:
            image_crop = self.flip(image_crop)
            textmask_crop = self.flip(textmask_crop)
            logomask_crop = self.flip(logomask_crop)
            commoditymask_crop = self.flip(commoditymask_crop)
        

        commoditymask_crop = (np.array(commoditymask_crop).astype(np.float32) / 255.0)[:, :, 0]
        commoditymask_crop = np.expand_dims(commoditymask_crop, axis=2) # h,w,1
        # commoditymask_crop商品区域为0，背景区域为1，即已知区域为0，未知区域为1
        commoditymask_crop[commoditymask_crop < 0.5] = 0
        commoditymask_crop[commoditymask_crop >= 0.5] = 1
        commoditymask_crop = 1 -  commoditymask_crop ## 加油站mask中加油站区域为1，非加油站区域为0，因此取反


        textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
        textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1
        # text mask文字区域为1，非文字区域为0
        textmask_crop[textmask_crop < 0.5] = 0
        textmask_crop[textmask_crop >= 0.5] = 1 

        logomask_crop = (np.array(logomask_crop).astype(np.float32) / 255.0)[:, :, 0]
        logomask_crop = np.expand_dims(logomask_crop, axis=2) # h,w,1
        # logo文字区域为1，非logo区域为0
        logomask_crop[logomask_crop < 0.5] = 0
        logomask_crop[logomask_crop >= 0.5] = 1 
        textmask_crop[logomask_crop==1] = 1


        # gen mask
        # if self.is_commoditymask:
        if 'commoditymaskpath' in self.data[index].keys() and self.data[index]['commoditymaskpath'] != "":
            mask = commoditymask_crop.copy()
        else:
            imgsize = [image_crop.size[1], image_crop.size[0]] # [h,w]
            if np.random.uniform() > 0.4:
                mask = self.get_outpaint_mask(imgsize) # # h,w,1 mask未知区域为1， 已知区域为0
            else:
                mask = self.get_inpaint_mask(imgsize)  # h,w,1 mask未知区域为1， 已知区域为0
            mask[mask < 0.5] = 0
            mask[mask >= 0.5] = 1

        mask[textmask_crop==1]=0  # 将有文字区域置为0
        mask = mask.transpose(2, 0, 1) # c,h,w
        mask = torch.from_numpy(mask)  # tensor:3,h,w, [0, 1]
        
        image_tensor = np.array(image_crop).astype(np.float32) / 255.0
        image_tensor = image_tensor.transpose(2, 0, 1)
        image_tensor = torch.from_numpy(image_tensor)
        image_tensor = image_tensor * 2.0 - 1.0 # [-1, 1]
        masked_image_tensor = (1-mask)*image_tensor
        # masked_image_tensor = masked_image_tensor * 2.0 - 1.0  

        textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
        textmask_crop = torch.from_numpy(textmask_crop) 
        textmask_crop = 1 - textmask_crop  # 将文字区域设为0，计算loss时不统计该部分


        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs

    def prepare_mask_and_masked_image(self, image, mask):
        image = np.array(image.convert("RGB"))
        image = image.transpose(2, 0, 1)
        image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

        mask = np.array(mask.convert("L"))
        mask = mask.astype(np.float32) / 255.0
        mask = mask[None]
        mask = 1 - mask   # 取反
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1
        mask = torch.from_numpy(mask)

        masked_image = image * (mask < 0.5)

        return mask, masked_image

    def get_image_with_hwxy_custom_inpainting(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        commoditymask = Image.open(self.data[index]['commoditymaskpath'])

        origin_size = image.size
        
        target_size = (self.resolution, self.resolution)
        image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
        x_start, y_start = crops_coords_top_left
        commoditymask = commoditymask.resize(resize_width_height, Image.LANCZOS)
        commoditymask_crop = commoditymask.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))

        name = self.data[index]['imgpath'].split('/')[-1]
        # image_crop.save(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/training-images/img-{name}')
        # commoditymask_crop.save(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/training-images/mask-{name}')

        image_tensor = self.image_transforms(image_crop)
        mask, masked_image_tensor = self.prepare_mask_and_masked_image(image_crop, commoditymask_crop)

        # textmask_crop = torchImage.new('RGB', (image_crop.size[0], image_crop.size[1]), (1, 1, 1))
        textmask_crop = torch.ones_like(mask)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        return image_tensor, masked_image_tensor, mask, textmask_crop, image_meta_size, kwargs


    def get_text_info_with_encoder(self, description):
        pad_num = 0
        text_inputs = self.tokenizer(
            description,
            padding="max_length",
            max_length=self.text_ctx_len,
            truncation=True,
            return_tensors="pt",
        )
        text_input_ids = text_inputs.input_ids[0]
        attention_mask = text_inputs.attention_mask[0]
        position_ids = text_inputs.position_ids[0]

        # if pad_num > 0:
        #     attention_mask[1:pad_num + 1] = False
        return text_input_ids, attention_mask, position_ids

    def get_original_text(self, ind):
        text = self.index_manager.get_attribute(ind, 'text_zh' if self.enable_CN else 'text_en')
        text = str(text).strip()
        return text

    def get_text(self, ind):
        text =  self.get_original_text(ind)
        if text == '':
            text = '随机生成一张图片'
        return text

    def get_word_pos(self, ind):

        idx2position = {
            0 : '左上角',
            1 : '上方',
            2 : '右上角',
            3 : '左侧',
            4 : '中间',
            5 : '右侧',
            6 : '左下方',
            7 : '下方',
            8 : '右下角',
        }

        return idx2position[ind]

    def __getitem__(self, ind):
        # Get text
        if random.random() < self.uncond_p:
            description = ""
        else:
            description = self.data[ind]['text_zh']
            if self.is_word:
                word = self.data[ind]['word']
                # pos = self.get_word_pos(self.data[ind]['pos'])
                word_size = self.data[ind]['font_size']
                word_mode = '竖版' if self.data[ind]['vertical'] else '横版'
                word_font = self.data[ind]['font']

                # word_prompt = f'图像的{pos}写着“{word}”。'
                # word_prompt = f'图像上写着“{word}”。'
                # word_prompt = f'图像上写着“{word}”，版式为{word_mode}。'
                # word_prompt = f'图像上写着“{word}”，文字大小是{word_size}。'
                word_prompt = f'图像上写着“{word}”，字体是{word_font}。'
                # word_prompt = f'图像上写着“{word}”，字体是{word_font}，文字大小是{word_size}，版式为{word_mode}。'
                description = word_prompt + description
            description = description[0:256]

        # original_pil_image, image_meta_size, kwargs = self.get_image_with_hwxy_custom(ind)

        # Use encoder to embed tokens online
        text_embedding, text_embedding_mask, text_position_ids = self.get_text_info_with_encoder(description)

        if self.is_inpaint:
            original_pil_image, masked_image_tensor, mask, textmask, image_meta_size, kwargs = self.get_image_with_hwxy_custom_textmask_logmask_inpainting(ind)
            return (
                original_pil_image,
                masked_image_tensor,
                mask,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        elif self.is_textmask:
            original_pil_image, textmask, image_meta_size, kwargs = self.get_image_with_hwxy_custom_textmask_logmask(ind)

            return (
                original_pil_image,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        else:
            original_pil_image, image_meta_size, kwargs = self.get_image_with_hwxy_custom(ind)

            return (
                original_pil_image,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

    def __len__(self):
        if self.multireso:
            return len(self.index_manager)
        else:
            return len(self.data)

