import pickle
import random
from pathlib import Path
import ast
import numpy as np
import re
import json
import time
from functools import partial
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

import torch
import torchvision.transforms as T
import torch.nn.functional as F
from torchvision.transforms import functional as TF
from torch.utils.data import Dataset

from IndexKits.index_kits import ArrowIndexV2, MultiResolutionBucketIndexV2, MultiIndexV2


class TextImageArrowStreamCustom(Dataset):
    def __init__(self,
                 args,
                 resolution=512,
                 random_flip=None,
                 enable_CN=True,
                 log_fn=print,
                 index_file=None,
                 multireso=False,
                 batch_size=-1,
                 world_size=1,
                 random_shrink_size_cond=False, #False
                 merge_src_cond=False, # False
                 uncond_p=0.0,
                 text_ctx_len=77,
                 tokenizer=None,
                 ):
        self.args = args
        self.resolution = resolution
        self.log_fn = lambda x: log_fn(f"    {Path(__file__).stem} | " + x)

        self.random_flip = random_flip
        # If true, the Chinese prompt from the `text_zh` column will be taken from the arrow file;
        # otherwise, the English prompt from the `text_en` column will be taken,
        # provided that `text_zh` or `text_en` exists in the arrow file.
        self.enable_CN = enable_CN
        self.index_file = index_file  # json file, {'imgpath':xxxx, 'text_zh':'#######'}
        self.multireso = multireso
        self.batch_size = batch_size
        self.world_size = world_size
        #self.index_manager = self.load_index()

        if self.multireso:
            self.index_manager = self.load_index()
            print("index_manager len!!!!!!!", len(self.index_manager))

        # clip params
        #self.uncond_p = uncond_p
        self.uncond_p = 0
        self.text_ctx_len = text_ctx_len
        self.tokenizer = tokenizer

        self.is_textmask = args.is_textmask

        # size condition
        self.random_shrink_size_cond = random_shrink_size_cond
        self.merge_src_cond = merge_src_cond

        self.data = self.load_data(self.index_file[0])
        print('train data num!!!!!!!', len(self.data))

        assert isinstance(resolution, int), f"resolution must be an integer, got {resolution}"
        self.flip_norm = T.Compose(
            [
                T.RandomHorizontalFlip() if self.random_flip else T.Lambda(lambda x: x),
                T.ToTensor(),
                T.Normalize([0.5], [0.5]),
            ]
        )

        self.flip = T.RandomHorizontalFlip(p=1.0)

        # show info
        if self.merge_src_cond:
            self.log_fn("Enable merging src condition: (oriW, oriH) --> ((WH)**0.5, (WH)**0.5)")

        self.log_fn("Enable image_meta_size condition (original_size, target_size, crop_coords)")
        self.log_fn(f"Image_transforms: {self.flip_norm}")


    def load_data(self, json_file):
        datalist = []
        print('////////////')
        print(json_file)
        data = open(json_file, 'r', encoding='utf-8')
        for line in data.readlines():
            dic = json.loads(line)
            datalist.append(dic)
        return datalist


    def load_index(self):
        if self.multireso:
            index_manager = MultiResolutionBucketIndexV2(
                self.batch_size, self.world_size
            )
            self.log_fn(f"Using MultiResolutionBucketIndexV2: {len(index_manager):,}")

        return index_manager

    def shuffle(self, seed, fast=False):
        self.index_manager.shuffle(seed, fast=fast)

    def get_raw_image(self, index, image_key="image"):
        try:
            ret = self.index_manager.get_image(index, image_key)
        except Exception as e:
            self.log_fn(f'get_raw_image | Error: {e}')
            ret = Image.new("RGB", (256, 256), (255, 255, 255))
        return ret

    @staticmethod
    def random_crop_image(image, origin_size, target_size, mask=None):
        aspect_ratio = float(origin_size[0]) / float(origin_size[1])
        if origin_size[0] < origin_size[1]:
            new_width = target_size[0]
            new_height = int(new_width / aspect_ratio)
        else:
            new_height = target_size[1]
            new_width = int(new_height * aspect_ratio)

        image = image.resize((new_width, new_height), Image.LANCZOS)

        if new_width > target_size[0]:
            x_start = random.randint(0, new_width - target_size[0])
            y_start = 0
        else:
            x_start = 0
            y_start = random.randint(0, new_height - target_size[1])
        image_crop = image.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
        crops_coords_top_left = (x_start, y_start)

        if mask is not None:
            mask = image.resize((new_width, new_height), Image.LANCZOS)
            mask_crop = image.crop((x_start, y_start, x_start + target_size[0], y_start + target_size[1]))
            return image_crop, mask_crop, crops_coords_top_left, (new_width, new_height)

        return image_crop, crops_coords_top_left, (new_width, new_height)

    def get_style(self, index):
        "Here we use a default learned embedder layer for future extension."
        style = 0
        return style

    def get_image_with_hwxy(self, index, image_key="image"):
    
        image = self.get_raw_image(index, image_key=image_key)
        origin_size = image.size

        if self.multireso:
            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
            image_tensor = self.flip_norm(image)
        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            image_tensor = self.flip_norm(image_crop)

        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, image_meta_size, kwargs


    def get_image_with_hwxy_custom(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")
        origin_size = image.size

        if self.multireso:
            # target_size = self.index_manager.get_target_size(index)

            # argetdictv1 = {
            #     "768x1280": 0.6,
            #     "960x1280": 0.75,
            #     "1024x1024": 1,
            # }  # 每个尺寸h/w值
            # targetdictv2 = {"1280x960": 1.3333, "1280x768": 1.6667}  # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', return_resize=True)
            image_tensor = self.flip_norm(image)

        else:
            target_size = (self.resolution, self.resolution)
            image_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size)
            image_tensor = self.flip_norm(image_crop)

        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, image_meta_size, kwargs


    def get_image_with_hwxy_custom_textmask_logmask(self, index, image_key="image"):
        image = Image.open(self.data[index]['imgpath']).convert("RGB")

        try:
            if 'textmaskpath' not in self.data[index].keys():
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['textmaskpath'] == "":
                    textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    textmask = Image.open(self.data[index]['textmaskpath']).convert("RGB")
        except:
                textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))


        try:
            if 'logomaskpath' not in self.data[index].keys():
                logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
            else:
                if self.data[index]['logomaskpath'] == "":
                    logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
                else:
                    logomask = Image.open(self.data[index]['logomaskpath']).convert("RGB")
        except:
            logomask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))

        origin_size = image.size

        if self.multireso:
            # target_size = self.index_manager.get_target_size(index)

            # argetdictv1 = {
            #     "768x1280": 0.6,
            #     "960x1280": 0.75,
            #     "1024x1024": 1,
            # }  # 每个尺寸h/w值
            # targetdictv2 = {"1280x960": 1.3333, "1280x768": 1.6667}  # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image_crop, mask_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', mask=[textmask,logomask], return_resize=True)
            
            textmask_crop, logomask_crop = mask_crop
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
                logomask_crop = self.flip(logomask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1

            logomask_crop = (np.array(logomask_crop).astype(np.float32) / 255.0)[:, :, 0]
            logomask_crop = np.expand_dims(logomask_crop, axis=2) # h,w,1

            # import cv2
            # name = self.data[index]['imgpath'].split('/')[-1]
            # cv2.imwrite(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/masks/{name}', textmask_crop*255)
           
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0

            logomask_crop[logomask_crop < 0.5] = 0
            logomask_crop[logomask_crop >= 0.5] = 1 ## h,w,1, 0

            textmask_crop[logomask_crop==1] = 1

            textmask_crop = 1 - textmask_crop 
            
            # 训练数据中textmask文字是0，非文字是1

            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]


            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0)

            # cv2.imwrite(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/images/{name}', image_tensor*255)

            image_tensor = torch.from_numpy(image_tensor.transpose(2, 0, 1))

            image_tensor = image_tensor * 2.0 - 1.0


        else:

            print('logo not support single solution!')
            return -1
            target_size = (self.resolution, self.resolution)
            image_crop, textmask_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size, mask=textmask)
            
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0
            textmask_crop = 1 - textmask_crop # 训练数据中textmask文字是黑，非文字是白，需要转换一下
            
            # 文字：1 ，背景是:0
            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]


            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0).transpose(2, 0, 1)
            image_tensor = torch.from_numpy(image_tensor)

            image_tensor = image_tensor * 2.0 - 1.0


        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, textmask_crop, image_meta_size, kwargs

    def get_image_with_hwxy_custom_textmask(self, index, image_key="image"):
        try:
            image = Image.open(self.data[index]['imgpath']).convert("RGB")
        except:
            print('/////////')
            print(self.data[index]['imgpath'])

        if self.data[index]['textmaskpath'] == "":
            textmask = Image.new('RGB', (image.size[0], image.size[1]), (0, 0, 0))
        else:
            textmask = Image.open(self.data[index]['textmaskpath']).convert("RGB")
            if textmask.size != image.size:
                textmask = textmask.resize(image.size, Image.ANTIALIAS)  # 使用ANTIALIAS来保持图像质量
            
        origin_size = image.size

        if self.multireso:
            # target_size = self.index_manager.get_target_size(index)

            # argetdictv1 = {
            #     "768x1280": 0.6,
            #     "960x1280": 0.75,
            #     "1024x1024": 1,
            # }  # 每个尺寸h/w值
            # targetdictv2 = {"1280x960": 1.3333, "1280x768": 1.6667}  # 每个尺寸h/w值

            targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
            targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图

            w, h = image.size
            ratio = h / w
            if h <= w:
                res_key, res_val = min(
                    targetdictv1.items(), key=lambda x: abs(ratio - x[1])
                )
            else:
                res_key, res_val = min(
                    targetdictv2.items(), key=lambda x: abs(ratio - x[1])
                )
            target_size = int(res_key.split("x")[1]), int(res_key.split("x")[0])  # w,h

            image_crop, mask_crop, crops_coords_top_left, resize_width_height = self.index_manager.resize_and_crop(
                image, target_size, resample=Image.LANCZOS, crop_type='random', mask=[textmask], return_resize=True)
            
            textmask_crop = mask_crop[0]
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1

            # import cv2
            # name = self.data[index]['imgpath'].split('/')[-1]
            # cv2.imwrite(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/masks/{name}', textmask_crop*255)
           
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0
            textmask_crop = 1 - textmask_crop 
            
            # 训练数据中textmask文字是0，非文字是1

            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]


            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0)

            # cv2.imwrite(f'/llmcapagroup1/test-bucket/xinyu/code/HunyuanDIT-PRE-main/dataset/images/{name}', image_tensor*255)

            image_tensor = torch.from_numpy(image_tensor.transpose(2, 0, 1))

            # image_tensor = textmask_crop*image_tensor
            image_tensor = image_tensor * 2.0 - 1.0


        else:
            target_size = (self.resolution, self.resolution)
            image_crop, textmask_crop, crops_coords_top_left, resize_width_height = self.random_crop_image(image, origin_size, target_size, mask=textmask)
            
            if self.random_flip and random.random() < 0.5:
                image_crop = self.flip(image_crop)
                textmask_crop = self.flip(textmask_crop)
        
            textmask_crop = (np.array(textmask_crop).astype(np.float32) / 255.0)[:, :, 0]
            textmask_crop = np.expand_dims(textmask_crop, axis=2) # h,w,1
            # text mask文字区域为1，非文字区域为0
            textmask_crop[textmask_crop < 0.5] = 0
            textmask_crop[textmask_crop >= 0.5] = 1 ## h,w,1, 0
            textmask_crop = 1 - textmask_crop # 训练数据中textmask文字是黑，非文字是白，需要转换一下
            
            # 文字：1 ，背景是:0
            textmask_crop = textmask_crop.transpose(2, 0, 1) # c,h,w
            textmask_crop = torch.from_numpy(textmask_crop)  # tensor:3,h,w, [0, 1]


            image_tensor = (np.array(image_crop).astype(np.float32) / 255.0).transpose(2, 0, 1)
            image_tensor = torch.from_numpy(image_tensor)

            # image_tensor = (1-textmask_crop)*image_tensor
            image_tensor = image_tensor * 2.0 - 1.0


        if self.random_shrink_size_cond: # False
            origin_size = (1024 if origin_size[0] < 1024 else origin_size[0],
                           1024 if origin_size[1] < 1024 else origin_size[1])
        if self.merge_src_cond: # False
            val = (origin_size[0] * origin_size[1]) ** 0.5
            origin_size = (val, val)

        image_meta_size = torch.tensor([resize_width_height[1], resize_width_height[0], crops_coords_top_left[1], crops_coords_top_left[0], target_size[1], target_size[0]])
        kwargs = {
            'image_meta_size': image_meta_size,
        }

        style = self.get_style(index)
        kwargs['style'] = style

        return image_tensor, textmask_crop, image_meta_size, kwargs


    def get_text_info_with_encoder(self, description):
        pad_num = 0
        text_inputs = self.tokenizer(
            description,
            padding="max_length",
            max_length=self.text_ctx_len,
            truncation=True,
            return_tensors="pt",
        )
        text_input_ids = text_inputs.input_ids[0]
        attention_mask = text_inputs.attention_mask[0]
        position_ids = text_inputs.position_ids[0]

        # if pad_num > 0:
        #     attention_mask[1:pad_num + 1] = False
        return text_input_ids, attention_mask, position_ids

    def get_original_text(self, ind):
        text = self.index_manager.get_attribute(ind, 'text_zh' if self.enable_CN else 'text_en')
        text = str(text).strip()
        return text

    def get_text(self, ind):
        text =  self.get_original_text(ind)
        if text == '':
            text = '随机生成一张图片'
        return text

    def __getitem__(self, ind):
        # Get text
        if random.random() < self.uncond_p:
            description = ""
        else:
            # description = self.get_text(ind)
            description = self.data[ind]['text_zh']
            description = description[0:256]

        # Use encoder to embed tokens online
        text_embedding, text_embedding_mask, text_position_ids = self.get_text_info_with_encoder(description)

        # original_pil_image, kwargs = self.get_image_with_hwxy(ind)
        if self.is_textmask:
            original_pil_image, textmask, image_meta_size, kwargs = self.get_image_with_hwxy_custom_textmask_logmask(ind)

            return (
                original_pil_image,
                textmask,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

        else:
            original_pil_image, image_meta_size, kwargs = self.get_image_with_hwxy_custom(ind)

        
            return (
                original_pil_image,
                text_embedding.clone().detach(),
                text_embedding_mask.clone().detach(),
                text_position_ids.clone().detach(),
                image_meta_size.clone().detach(),
                {k: torch.tensor(np.array(v)).clone().detach() for k, v in kwargs.items()},
            )

    def __len__(self):
        if self.multireso:
            return len(self.index_manager)
        else:
            return len(self.data)

