from torch.utils.data import Dataset
from PIL import Image
#import cv2
import numpy as np
from torchvision import transforms
from torchvision.transforms.functional import crop
from .mask import (bbox2mask, brush_stroke_mask, get_irregular_mask, random_bbox, bbox2mask_uncropping, random_cropping_bbox)
import torch
import os
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import random
import json
def pil_loader(path):
    return Image.open(path).convert('RGB')

def _maybe_convert_prompt(prompt, tokenizer):
    tokens = tokenizer.tokenize(prompt)
    unique_tokens = set(tokens)
    for token in unique_tokens:
        if token in tokenizer.added_tokens_encoder:
            replacement = token
            i = 1
            while f"{token}_{i}" in tokenizer.added_tokens_encoder:
                replacement += f" {token}_{i}"
                i += 1
            prompt = prompt.replace(token, replacement)
    return prompt

def maybe_convert_prompt(prompt, tokenizer):
    if not isinstance(prompt, list):
        prompts = [prompt]
    else:
        prompts = prompt
    prompts = [_maybe_convert_prompt(p, tokenizer) for p in prompts]

    if not isinstance(prompt, list):
        return prompts[0]
    return prompts

class PosterOutpaint_SdInpainting_Train(Dataset):
    def __init__(self, prompt_root, json_path, tokenizer_one, tokenizer_two,image_size,random_flip,center_crop):
        # imgs = make_dataset(data_root)
        self.trainpaths = self.get_img_files(prompt_root,json_path)
        print('train images:', len(self.trainpaths))
        self.mask_config = {'mask_mode':"hybrid"}
        self.mask_mode = self.mask_config['mask_mode']
        self.image_size = [image_size,image_size]
        self.tokenizer_one = tokenizer_one
        self.tokenizer_two = tokenizer_two
        # self.image_transforms = transforms.Compose(
        #     [
        #         transforms.Resize(800, interpolation=transforms.InterpolationMode.BILINEAR),
        #         transforms.RandomCrop(image_size),
        #     ]
        # ) # 660 800
        
        self.resolution=image_size
        self.random_flip=random_flip
        self.center_crop=center_crop
        self.train_resize = transforms.Resize(self.resolution, interpolation=transforms.InterpolationMode.BILINEAR)
        self.train_crop = transforms.CenterCrop(self.resolution) if self.center_crop else transforms.RandomCrop(self.resolution)
        self.train_flip = transforms.RandomHorizontalFlip(p=1.0)
        self.train_transforms = transforms.Compose(
        [
            transforms.ToTensor(),
            #transforms.Normalize([0.5], [0.5]),
        ]
    )

        # self.image_transforms = transforms.Compose(
        #     [
        #         transforms.Resize(800, interpolation=transforms.InterpolationMode.BILINEAR),
        #     ]
        # ) # 660 800

    def get_img_files(self,prompt_root,all_json_path):

        imgpathlist = []
        json_paths=os.listdir(all_json_path)
        for json_path in json_paths:
            json_path=os.path.join(all_json_path,json_path)
            with open(json_path, 'r') as f:
                for line in f:
                    data = json.loads(line) # dict
                    try:
                        imgpath=data['img']
                        mask=data['commoditymask']
                        prompt=os.path.join(prompt_root,imgpath.split('/')[-1].split('.')[0] + '.txt')
                        #text_mask=os.path.join(text_mask,imgpath.split('/')[-1].split('.')[0] + '.mp4')
                        text_mask=data['textmask']
                        imgpathlist.append((imgpath, text_mask, mask, prompt))

                    except Exception as eg:
                        print(f'!!!!!!!!ERROR, type is {eg}')
                        continue
        return imgpathlist

    def gen_out_mask(self, img_shape):
        h,w = img_shape
        x1 = random.randint(0, w//3)
        y1 = random.randint(0, h//3)
        x2 = random.randint((w*2)//3, w)
        y2 = random.randint((h*2)//3, h)
        height = y2-y1
        wight = x2-x1
        return (y1, x1, height, wight)

    def get_outpaint_mask(self):
        # mask未知区域为1， 已知区域为0
        #random_cropping_bbox为设定好的裁剪，而不是真正随机，设定了不同随机裁剪模式
        if np.random.randint(0,2)<1:
            mask = bbox2mask_uncropping(self.image_size, random_cropping_bbox(img_shape=self.image_size, mask_mode='onedirection_multiscale'))
        else:
            mask = bbox2mask_uncropping(self.image_size, random_cropping_bbox(img_shape=self.image_size, mask_mode='fourdirection'))
            #mask = bbox2mask_uncropping(self.image_size, self.gen_out_mask(img_shape=self.image_size))
    
        # return torch.from_numpy(mask).permute(2,0,1)
        return mask  # h,w,c

    def get_inpaint_mask(self):
        regular_mask = bbox2mask(self.image_size, random_bbox(img_shape=self.image_size, max_bbox_shape=(100, 100)))  
        irregular_mask = brush_stroke_mask(self.image_size, num_vertices=(4, 8),brush_width=(12, 30))
        mask = regular_mask | irregular_mask
        return mask
    
    def augment(self, img, mask, hflip=True, rot=True):
        hflip = hflip and random.random() < 0.5
        vflip = rot and random.random() < 0.5
        rot90 = rot and random.random() < 0.5
        if hflip: img, mask = img[:, ::-1, :], mask[:, ::-1, :]
        if vflip: img, mask = img[::-1, :, :], mask[::-1, :, :]
        if rot90: img, mask = img.transpose(1, 0, 2), mask.transpose(1, 0, 2)
        return img, mask
    
    def random_crop(self, img, mask, patchsize=512):
        # img: ndarray, mask:ndarray
        h, w = img.shape[:2]
        ix = random.randrange(0, w - patchsize)
        iy = random.randrange(0, h - patchsize)

        patch_img = img[iy:iy+patchsize, ix:ix+patchsize, :]
        patch_mask = mask[iy:iy+patchsize, ix:ix+patchsize, :]

        return patch_img, patch_mask

    def __getitem__(self, index):
        imgpath, textmaskpath, outpaintmaskpath, prompt_path = self.trainpaths[index]

        image = Image.open(imgpath).convert("RGB")
        text_mask = Image.open(textmaskpath).convert("RGB")
        mask = Image.open(outpaintmaskpath).convert("RGB")

        original_sizes=(image.height, image.width)

        image = self.train_resize(image)
        text_mask = self.train_resize(text_mask)
        mask = self.train_resize(mask)

        if self.random_flip and random.random() < 0.5:
            # flip
            image = self.train_flip(image)
            text_mask = self.train_flip(text_mask)
            mask = self.train_flip(mask)

        if self.center_crop:
            y1 = max(0, int(round((image.height - self.resolution) / 2.0)))
            x1 = max(0, int(round((image.width - self.resolution) / 2.0)))
            
            image = self.train_crop(image)
            text_mask = self.train_crop(text_mask)
            mask = self.train_crop(mask)
        else:
            y1, x1, h, w = self.train_crop.get_params(image, (self.resolution, self.resolution))
            image = crop(image, y1, x1, h, w)
            text_mask=crop(text_mask,y1, x1, h, w)
            mask=crop(mask,y1, x1, h, w)

        crop_top_lefts = (y1, x1)
        image = self.train_transforms(image)
        import copy
        control_image =copy.deepcopy(image)
        image = image * 2.0 - 1.0 # [-1, 1]
        text_mask=self.train_transforms(text_mask)
     
        #text_mask[text_mask < 0.5] = 0
        #text_mask[text_mask >= 0.5] = 1 ## h,w,1, 0-1

        import copy
        text_mask2=copy.deepcopy(text_mask)
        text_mask2[text_mask >= 0.5] = 0
        text_mask2[text_mask < 0.5] = 1 ## h,w,1, 0-1
        #print("text_mask:",text_mask.size())
        #text_mask=text_mask[0:1,:,:]
        #print("text_mask_new:",text_mask.size())
        mask=self.train_transforms(mask)
        #print(mask.size())
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1 ## h,w,1, 0-1
        
        control_image[mask > 0.5] = 1
        #masked_image = (1-mask)*image#(1 - mask) * image
        save_image=control_image.numpy()#
        #print(save_image.shape)
        save_image=save_image.transpose((1,2,0))*255
        
        #import cv2
        #print(save_image.shape)
        #cv2.imwrite('11111.jpg',save_image)
        examples = {"pixel_values": image, "mask": mask,"conditioning_pixel_values": control_image,"text_mask":text_mask2}
        examples["original_sizes"] = original_sizes
        examples["crop_top_lefts"] = crop_top_lefts

        prompt = "product marketing poster,highest quality settings"
        #prompt = maybe_convert_prompt(prompt, self.tokenizer)

        #with open(prompt_path, 'r') as f:
            #prompt = f.read()
            #print(prompt)
        text_inputs1 = self.tokenizer_one(prompt, max_length=self.tokenizer_one.model_max_length, padding="max_length", truncation=True, return_tensors="pt")
        examples["input_ids_one"] = text_inputs1.input_ids
        
        text_inputs2 = self.tokenizer_two(prompt, max_length=self.tokenizer_two.model_max_length, padding="max_length", truncation=True, return_tensors="pt")
        examples["input_ids_two"] = text_inputs2.input_ids

        return examples

    def __len__(self):
        return len(self.trainpaths)



