from typing import List
from torch.utils.data import Dataset
import torch
from torchvision import transforms
import random
import os
import jsonlines
from aoss_client.client import Client
import numpy as np
from PIL import Image
import cv2
import pickle
from PIL import Image,ImageFilter,ImageOps
conf_path = '/mnt/afs/luotianhang/aoss_yun.conf'
client = Client(conf_path)
from concurrent.futures import ThreadPoolExecutor, as_completed
MASK_ROOT = 'cluster_lth_yun:s3://cabin-aigc/AIGC/diffusion_data/luotianhang/open_source_data/irregular_mask'
MASK_DATA = pickle.load(open('/mnt/afs/luotianhang/mask_data.pkl','rb'))
MASK_SIZE_FILTER = 3
SCALE_INDEX  = 1 
def check_len(prompt):
    if len(prompt.split())>70:
        return False
    return True
def print_color(message,color):
    assert color in ['red','green','blue']
    if color == 'red':
        print('\033[31m'+message+'\033[0m')
    elif color == 'green':
        print('\033[32m'+message+'\033[0m')
    elif color == 'blue':
        print('\034[32m'+message+'\033[0m')
    else:
        raise ValueError(f'{color} is not supported')
def read_image_url(url,convertRGB=True):
    try:
        if 's3://' in url:
            image_bytes = client.get(url)
            image_mem_view = memoryview(image_bytes)
            image_array = np.frombuffer(image_mem_view,np.uint8)
            image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
            image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
            image = Image.fromarray(image_np)
        else:
            image = Image.open(url)
        
        if convertRGB:
            return image.convert('RGB')
        else:
            return image
    except:
        raise ValueError(f'Maybe {url} is wrong')
def process_map(index, map):
    metadata = []
    meta_file = map[0]['meta_file']
    raw_prefix = map[0]['raw_prefix']
    edited_prefix = map[0]['edited_prefix']
    mask_prefix = map[0]['mask_prefix']
    dataset_type = map[0]['type']
    can_use_mask = map[0]['can_use_mask']

    for line in list(jsonlines.open(meta_file, 'r')):
        assert 'text' in line.keys() and 'edited_image' in line.keys(), f'text and edited_image must be in {meta_file}'
        if line['text'] == 'person':
            continue

        edit_prompt = [line['text']]
        if 'loc_x1y1x2y2_blip' in line.keys():
            assert isinstance(line['loc_x1y1x2y2_blip'],str)
            edit_prompt+=[line['loc_x1y1x2y2_blip'].replace('arafly','').replace('araffex','').replace('arafed','').replace('araflane','airplane').replace('The image features','')]# blip is a string
        if 'texts_bird' in line.keys():
            assert isinstance(line['texts_bird'],List)
            edit_prompt+=line['texts_bird'] # texts_bird is a list
        if 'vqa_text' in line.keys():
            assert isinstance(line['vqa_text'],str)
            edit_prompt+=[line['vqa_text'].replace('arafly','').replace('araffex','').replace('arafed','').replace('araflane','airplane').replace('The image features','')]
        # if 'text_vqa' in line.keys():
        #     edit_prompt+=[line['text_vqa']]
        # if 'text_openbmb_MiniCPM_v2' in line.keys():
        #     edit_prompt+=line['text_openbmb_MiniCPM_v2']
        new_edit_prompt = []
        for ep in edit_prompt:
            if check_len(ep):
                new_edit_prompt.append(ep)
        if len(new_edit_prompt)==0:
            continue
        edit_prompt = new_edit_prompt
        
        edited_image_url = os.path.join(edited_prefix, line['edited_image'])

        raw_image_url = os.path.join(raw_prefix, line['raw_image']) if 'raw_image' in line.keys() else ''

        texts = line.get('texts', [])
        if 'text_openbmb_MiniCPM_v2' in line.keys():
            texts += line['text_openbmb_MiniCPM_v2']
        
        if 'mask_image' in line.keys():
            mask_image_url = os.path.join(mask_prefix, line['mask_image'])
        else:
            mask_image_url = ''

        if 'loc_x1y1x2y2' in line.keys():
            loc_x1y1x2y2 = line['loc_x1y1x2y2']
            x1, y1, x2, y2 = loc_x1y1x2y2
            if (x2 - x1) < 32 * 2 and (y2 - y1) < 32 * 2:
                continue
        else:
            loc_x1y1x2y2 = [0, 0, 0, 0]

        assert isinstance(edit_prompt,List)
        metadata.append({
            'type': dataset_type,
            'can_use_mask': can_use_mask,
            'edited_image_url': edited_image_url,
            'edit_prompt': edit_prompt,
            'raw_image_url': raw_image_url,
            'texts': texts,
            'loc_x1y1x2y2': loc_x1y1x2y2,
            'mask_image_url': mask_image_url,
        })

    assert len(metadata) > 0, f'{meta_file} is illegal'
    return index, metadata
def output_edit_prompt(item):
    edit_prompt = item['edit_prompt']
    texts = item['texts']
    if len(texts)>0 and random.random()>0.2:
        edit_prompt = random.choice(texts)
    if isinstance(edit_prompt,list):
        return edit_prompt[0]
    return edit_prompt
def random_irregular_mask_image():

    mask_data =  random.choice(MASK_DATA)
    return ImageOps.invert(read_image_url(os.path.join(MASK_ROOT,mask_data)))
def generate_mask_for_outpainting(width,height,revert=False):
    center_keep_ratio = random.randint(6,9)/10
    keep_width = int(center_keep_ratio*width)
    keep_height = int(center_keep_ratio*height)

    mask = np.ones([height,width])*255
    mask[int((height-keep_height)/2):-int((height-keep_height)/2),int((width-keep_width)/2):-int((width-keep_width)/2)] =0
    if revert:
        mask = 255-mask
    return Image.fromarray(mask).convert('L')
def generate_mask_resized(x1y1x2y2,width,height):
    x1 = x1y1x2y2[0]
    y1 = x1y1x2y2[1]
    x2 = x1y1x2y2[2]
    y2 = x1y1x2y2[3]
    x1 = int(x1/width*(640*SCALE_INDEX))
    y1 = int(y1/height*(368*SCALE_INDEX))
    x2 = int(x2/width*(640*SCALE_INDEX))
    y2 = int(y2/height*(368*SCALE_INDEX))

    mask = np.zeros((368*SCALE_INDEX,640*SCALE_INDEX),dtype=np.uint8)
    mask[int(y1):int(y2),int(x1):int(x2)] = 255
    return Image.fromarray(mask).convert('L')
def prepare_mask_and_masked_image(image, mask):
    image = np.array(image.convert("RGB"))
    image = image[None].transpose(0, 3, 1, 2)
    image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

    mask = np.array(mask.convert("L"))
    mask = mask.astype(np.float32) / 255.0
    mask = mask[None, None]
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    mask = torch.from_numpy(mask)

    masked_image = image * (mask < 0.5)

    return mask, masked_image
def visualize_masked_image(masked_image,save_path):
    if random.random()>0.5:
        # 反归一化，将像素值转换回 [0, 255] 的范围
        masked_image = masked_image.squeeze(0).detach().cpu().numpy()  # 去除批次维度
        masked_image = (masked_image + 1.0) * 127.5  # 反归一化
        masked_image = masked_image.transpose(1, 2, 0).astype(np.uint8)  # 转换为 HWC 格式

        # 将 NumPy 数组转换回 PIL Image
        masked_image_pil = Image.fromarray(masked_image)
        masked_image_pil.save(save_path)


class InpaintingDatasetSD3(Dataset):
    def __init__(self,image_maps) -> None:
        super(InpaintingDatasetSD3,self).__init__()
        self.data_identy = [image_map[0]['meta_file'] for image_map in image_maps]
        self.metadata = self.get_maps(image_maps)
        self.T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
        self.weight = []
        i2i_weight = 0
        t2i_weight = 0
        # step1 算公共权重
        for im in image_maps:
            if im[0]['type'] == 'Image2ImageDataset':
                i2i_weight+=im[1]
            elif im[0]['type'] == 'Text2ImageDataset':
                t2i_weight+=im[1]
            else:
                raise ValueError()
        # step2 按比例分配
        for im in image_maps:
            if im[0]['type'] == 'Image2ImageDataset':
                self.weight.append(im[1]*t2i_weight)
            elif im[0]['type'] == 'Text2ImageDataset':
                self.weight.append(im[1]*i2i_weight)
            else:
                raise ValueError()
        

    def __len__(self):
        return 999_999_999

    @staticmethod
    def get_maps(maps):
        metadata = {}

        # 创建线程池，定义最大线程数
        with ThreadPoolExecutor(max_workers=8) as executor:
            # 提交任务并行处理
            futures = [executor.submit(process_map, index, map) for index, map in enumerate(maps)]
            
            for future in as_completed(futures):
                index, data = future.result()
                metadata[index] = data

        return metadata

    def __getitem__(self, index):
        metadata =random.choices(self.metadata,weights=self.weight)[0]
        item = random.choice(metadata)
        # return self._getitem_from_image2imagedataset(item)
        if item['type'] == 'Image2ImageDataset':
            return self._getitem_from_image2imagedataset(item)
        elif item['type'] == 'Text2ImageDataset':
            return self._getitem_from_text2imagedataset(item)
        else:
            raise ValueError(f'type {metadata["type"]} is not supported yet')

    def _getitem_from_text2imagedataset(self,item):

        task = random.choices(['restore','text','outpainting'],
                        weights=[ 30,      30,        30     ])[0]

        if task == 'restore':      # aux task
            return self.image_restore_task(item)
        elif task == 'outpainting':# aux task
            return self.image_outpainting_task(item)
        else:                      # main task
            return self.text2image_task(item)

    def _getitem_from_image2imagedataset(self,item):
    
        task = random.choices(['inpainting','restore','text','outpainting'],
                        weights=[40,           20,      0,        20    ])[0]
       
        if task == 'inpainting':   # main task    
            return self.image_inpainting_task(item)
        elif task == 'restore':    # aux task
            return self.image_restore_task(item)
        elif task == 'text':       # aux task
            return self.text2image_task(item)
        elif task == 'outpainting':# aux task
            return self.image_outpainting_task(item)
        else:
            raise ValueError(f"{task} is not supported")

        
    def text2image_task(self,item):
        '''
        全部掩码，把任务转换成文生图任务
        因为是全掩码，只需要知道全图和提示词
        '''
        edit_prompt = item['edit_prompt'][0]
        edited_image_url = item['edited_image_url']
        
        
        edited_image = read_image_url(edited_image_url)
        original_width = edited_image.width
        original_height = edited_image.height
        mask = generate_mask_resized([0,0,edited_image.width,edited_image.height],edited_image.width,edited_image.height).convert('L')
        mask = mask.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        edited_image = edited_image.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./text2image.png')
        edit_prompt = output_edit_prompt(item)
        print_color('text2image:'+edit_prompt,'red')
       
        mem = {
                'prompt':edit_prompt,
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask,
                } 
        return mem

    def image_restore_task(self,item):
        '''
        unconditonal image restore
        '''
        edit_prompt = item['edit_prompt']
        edited_image_url = item['edited_image_url']
        
        edited_image = read_image_url(edited_image_url)
        original_width = edited_image.width
        original_height = edited_image.height
        mask = random_irregular_mask_image().convert('L')
        mask = mask.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        mask = mask.filter(ImageFilter.MaxFilter(size=MASK_SIZE_FILTER*13))
        edited_image = edited_image.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./image_restore.png')

        edit_prompt = ''#uncondition mark
        print_color('imagerestore:'+edit_prompt,'red')
        mem = {
                'prompt':edit_prompt,
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask,
                } 
        return mem

    def image_inpainting_task(self,item):
        '''
        main task
        '''
        edit_prompt = random.choice(item['edit_prompt'])
        edited_image_url = item['edited_image_url']
        loc_x1y1x2y2 = item['loc_x1y1x2y2']
        x1 = loc_x1y1x2y2[0]
        y1 = loc_x1y1x2y2[1]
        x2 = loc_x1y1x2y2[2]
        y2 = loc_x1y1x2y2[3]

        loc_x1y1x2y2=[x1,y1,x2,y2]


        edited_image = read_image_url(edited_image_url)
        original_width = edited_image.width
        original_height = edited_image.height
        mask = generate_mask_resized(loc_x1y1x2y2 ,edited_image.width,edited_image.height)
        edited_image = edited_image.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        mask, masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./inpainting.png')
       
        print_color('imageinpainting:'+edit_prompt,'red')
        mem = {
                'prompt':edit_prompt,
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask,
                } 
        return mem

    def image_outpainting_task(self,item):
        '''
        outpainting task
        '''
        edit_prompt = item['edit_prompt']
        edited_image_url = item['edited_image_url']
        
        edited_image = read_image_url(edited_image_url)
        original_width = edited_image.width
        original_height = edited_image.height
        edited_image = edited_image.resize((640*SCALE_INDEX,368*SCALE_INDEX))
        mask = generate_mask_for_outpainting(width=640*SCALE_INDEX,height=368*SCALE_INDEX)
        mask,masked_image = prepare_mask_and_masked_image(edited_image,mask)
        visualize_masked_image(masked_image,'./outpainting.png')
        edit_prompt = '' # 暂时定为无条件的outpainting

        print_color('outpainting:'+edit_prompt,'red')
        mem = {
                'prompt':edit_prompt,
                'edited_pixel_values':self.T(edited_image),
                'maked_images':masked_image,
                'mask':mask,
                } 
        return mem

