from torch.utils.data import Dataset
from PIL import Image
#import cv2
import numpy as np
from torchvision import transforms
from .mask import (bbox2mask, brush_stroke_mask, get_irregular_mask, random_bbox, bbox2mask_uncropping, random_cropping_bbox)
import torch
import os
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import random

def pil_loader(path):
    return Image.open(path).convert('RGB')

def _maybe_convert_prompt(prompt, tokenizer):
    tokens = tokenizer.tokenize(prompt)
    unique_tokens = set(tokens)
    for token in unique_tokens:
        if token in tokenizer.added_tokens_encoder:
            replacement = token
            i = 1
            while f"{token}_{i}" in tokenizer.added_tokens_encoder:
                replacement += f" {token}_{i}"
                i += 1
            prompt = prompt.replace(token, replacement)
    return prompt

def maybe_convert_prompt(prompt, tokenizer):
    if not isinstance(prompt, list):
        prompts = [prompt]
    else:
        prompts = prompt
    prompts = [_maybe_convert_prompt(p, tokenizer) for p in prompts]

    if not isinstance(prompt, list):
        return prompts[0]
    return prompts

class PosterOutpaint_SdInpainting_Train(Dataset):
    def __init__(self, data_root, tokenizer, image_size=512):
        # imgs = make_dataset(data_root)
        self.imgpaths, self.maskpaths = self.get_img_mask_files(data_root)
        # self.tfs = transforms.Compose([
        #         transforms.Resize((image_size[0], image_size[1])),
        #         transforms.ToTensor(),
        #         transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5,0.5, 0.5])
        # ])
        self.mask_config = {'mask_mode':"hybrid"}
        self.mask_mode = self.mask_config['mask_mode']
        self.image_size = [image_size,image_size]
        self.tokenizer = tokenizer

        # self.image_transforms = transforms.Compose(
        #     [
        #         transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
        #         transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
        #         transforms.ToTensor(),
        #         transforms.Normalize([0.5], [0.5]),
        #     ]
        # )
        # self.image_transforms = transforms.Compose(
        #     [
        #         transforms.Resize(800, interpolation=transforms.InterpolationMode.BILINEAR),
        #         transforms.RandomCrop(image_size),
        #     ]
        # ) # 660 800

        self.image_transforms = transforms.Compose(
            [
                transforms.Resize(800, interpolation=transforms.InterpolationMode.BILINEAR),
            ]
        ) # 660 800

    def get_imgfiles(self, pathdir):
        images = []
        with open(pathdir, 'r') as f:
            lines = f.readlines()
            for line in lines:
                imgpath = line.strip()
                images.append(imgpath)
        return images

    def get_img_mask_files(self, pathdir):
        imgpathlist, maskpathlist = [], []
        with open(pathdir, 'r') as f:
            lines = f.readlines()
            for line in lines:
                imgpath, maskpath = line.strip().split('####')
                imgpathlist.append(imgpath)
                maskpathlist.append(maskpath)
        return imgpathlist, maskpathlist


    def gen_out_mask(self, img_shape):
        h,w = img_shape
        x1 = random.randint(0, w//3)
        y1 = random.randint(0, h//3)
        x2 = random.randint((w*2)//3, w)
        y2 = random.randint((h*2)//3, h)
        height = y2-y1
        wight = x2-x1
        return (y1, x1, height, wight)



    def get_outpaint_mask(self):
        # mask未知区域为1， 已知区域为0
        if np.random.randint(0,2)<1:
            mask = bbox2mask_uncropping(self.image_size, random_cropping_bbox(img_shape=self.image_size, mask_mode='onedirection_multiscale'))
        else:
            mask = bbox2mask_uncropping(self.image_size, random_cropping_bbox(img_shape=self.image_size, mask_mode='fourdirection'))
            #mask = bbox2mask_uncropping(self.image_size, self.gen_out_mask(img_shape=self.image_size))
    
        # return torch.from_numpy(mask).permute(2,0,1)
        return mask  # h,w,c

    def get_inpaint_mask(self):
        regular_mask = bbox2mask(self.image_size, random_bbox(img_shape=self.image_size, max_bbox_shape=(100, 100)))  
        irregular_mask = brush_stroke_mask(self.image_size, num_vertices=(4, 8),brush_width=(12, 30))
        mask = regular_mask | irregular_mask
        return mask
    
    def augment(self, img, mask, hflip=True, rot=True):
        hflip = hflip and random.random() < 0.5
        vflip = rot and random.random() < 0.5
        rot90 = rot and random.random() < 0.5
        if hflip: img, mask = img[:, ::-1, :], mask[:, ::-1, :]
        if vflip: img, mask = img[::-1, :, :], mask[::-1, :, :]
        if rot90: img, mask = img.transpose(1, 0, 2), mask.transpose(1, 0, 2)
        return img, mask
    
    def random_crop(self, img, mask, patchsize=512):
        # img: ndarray, mask:ndarray
        h, w = img.shape[:2]
        ix = random.randrange(0, w - patchsize)
        iy = random.randrange(0, h - patchsize)

        patch_img = img[iy:iy+patchsize, ix:ix+patchsize, :]
        patch_mask = mask[iy:iy+patchsize, ix:ix+patchsize, :]

        return patch_img, patch_mask


    def __getitem__(self, index):
        
        image = Image.open(self.imgpaths[index]).convert("RGB")
        #image = image.resize((self.image_size[0], self.image_size[1]))
        image = np.array(image)

        text_mask = Image.open(self.maskpaths[index]).convert("RGB")
        text_mask = np.array(text_mask)

        h,w = image.shape[:2]
        crop_h_top,crop_h_bottom, crop_w_left, crop_w_right = int(0.1*h), int(0.25*h), int(w*0.1), int(w*0.1)
        image = image[crop_h_top:h-crop_h_bottom, crop_w_left:w-crop_w_right, :]
        text_mask = text_mask[crop_h_top:h-crop_h_bottom, crop_w_left:w-crop_w_right, :]
        
        image = Image.fromarray(image) #PIL
        text_mask = Image.fromarray(text_mask) #PIL

        image = np.array(self.image_transforms(image))
        text_mask = np.array(self.image_transforms(text_mask)) # 512x512
        
        # random crop
        image, text_mask = self.random_crop(image, text_mask, patchsize=self.image_size[0])

        # 数据增强
        image, text_mask = self.augment(image, text_mask)

        image = image.astype(np.float32) / 255.0
        control_image = image.copy()
        image = image.transpose(2, 0, 1)
        image = torch.from_numpy(image)
        image = image * 2.0 - 1.0 # [-1, 1]

        text_mask = (text_mask.astype(np.float32) / 255.0)[:, :, 0]
        text_mask = np.expand_dims(text_mask, axis=2) # h,w,1
        text_mask[text_mask < 0.5] = 0
        text_mask[text_mask >= 0.5] = 1 ## h,w,1, 0-1

        #mask = self.get_outpaint_mask() # h,w,1
        if np.random.uniform() > 0.4:
            mask = self.get_outpaint_mask()
        else:
            mask = self.get_inpaint_mask()  # h,w,1
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1

        mask[np.where(text_mask==1)]=0
        # mask = mask.transpose(2, 0, 1)
        # mask = torch.from_numpy(mask)  # h,w,1, [0, 1]

        # masked_image = (1 - mask) * image

        mask_con = np.concatenate((mask, mask, mask),-1)
        control_image[mask_con > 0.5] = -1
        control_image = control_image.transpose(2, 0, 1)
        control_image = torch.from_numpy(control_image)

        mask = mask.transpose(2, 0, 1)
        mask = torch.from_numpy(mask)  # h,w,1, [0, 1]

        examples = {"image": image, "mask": mask, "control_image": control_image}
        
        # caption = "detailed picture, 4k dslr, best quality"
        prompt = "photograph of a beautiful empty scene, highest quality settings"
        prompt = maybe_convert_prompt(prompt, self.tokenizer)

        negative_prompt = "text, ugly, deformed, disfigured, poor details"
        negative_prompt = maybe_convert_prompt(negative_prompt, self.tokenizer)

        text_inputs = self.tokenizer(prompt, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt")
        examples["input_ids"] = text_inputs.input_ids

        negative_text_inputs = self.tokenizer(negative_prompt, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt")
        examples["negative_input_ids"] = negative_text_inputs.input_ids
            
        return examples

    def __len__(self):
        return len(self.imgpaths)


class PosterOutpaint_SdInpainting_Validation(Dataset):
    def __init__(self, data_root, mask_root, tokenizer, image_size=512):
        # imgs = make_dataset(data_root)
        self.imgs, self.masks= self.get_gtfiles(data_root, mask_root)
        self.image_size = [image_size, image_size]
        self.tokenizer = tokenizer

    def get_gtfiles(self, gt_dir, mask_dir):
        gt_images, mask_images, cond_images = [], [], []
        with open(gt_dir, 'r') as f:
            lines = f.readlines()
            for line in lines:
                imgpath = line.strip()
                gt_images.append(imgpath)
                imgname = imgpath.split('/')[-1]
                mask_images.append(os.path.join(mask_dir, imgname))
        return gt_images, mask_images

    def __getitem__(self, index):
        gt_img_path, mask_path = self.imgs[index], self.masks[index]
        image = Image.open(gt_img_path).convert("RGB")
        image = image.resize((self.image_size[0], self.image_size[1]))
        image = np.array(image) / 255.
        h,w = image.shape[:2]

        mask = Image.open(mask_path).convert("RGB")
        mask = mask.resize((self.image_size[0], self.image_size[1]))
        mask = np.array(mask) / 255.
        mask = mask[:, :, 0] # h,w
        mask = np.expand_dims(mask, 2)  # h,w,1
        mask[mask < 0.5] = 0
        mask[mask >= 0.5] = 1

        if h % 32!=0 or w%32!=0:
            h, w = (h//32)*32, (w//32)*32
            image = image[0:h, 0:w, :]
            mask = mask[0:h, 0:w, :]
 
        image = torch.from_numpy(image)
        image = image * 2.0 - 1.0 # [-1, 1]

        mask = torch.from_numpy(mask)
        
        masked_image = (1 - mask) * image

        examples = {"image": image, "mask": mask, "masked_image": masked_image}
        # caption = 'detailed picture, 4k dslr, best quality'

        prompt = "photograph of a beautiful empty scene, highest quality settings"
        prompt = maybe_convert_prompt(prompt, self.tokenizer)

        text_inputs = self.tokenizer(prompt, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt")
        examples["input_ids"] = text_inputs.input_ids
        
        return examples

    def __len__(self):
        return len(self.imgs)


