from PIL import Image
import jsonlines
from aoss_client.client import Client
from torch.utils.data import Dataset
import torch
from torchvision import transforms
import cv2
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed # 多线程加速读取jsonl
import os
import random
conf_path = '/mnt/afs2d/luotianhang/aoss.conf'
client = Client(conf_path)

def resize_image_pil(image,height,width):
    return image.resize((width,height))
def prepare_mask_and_masked_image(image, mask):
    image = np.array(image.convert("RGB"))
    image = image[None].transpose(0, 3, 1, 2)
    image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0

    mask = np.array(mask.convert("L"))
    mask = mask.astype(np.float32) / 255.0
    mask = mask[None, None]
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    mask = torch.from_numpy(mask)

    masked_image = image * (mask < 0.5)

    return mask, masked_image

def convert_source_image(source_image,loc_x1y1x2y2):
    width = source_image.width
    height = source_image.height

    new_image = Image.new('RGB',(width,height),0)
    crop_image = source_image.crop(loc_x1y1x2y2)
    new_image.paste(crop_image,(loc_x1y1x2y2[0],loc_x1y1x2y2[1]))
    return new_image.convert('RGB')

def make_mask_image(loc_x1y1x2y2,rwidth,rheight,twidth,theight):
    x1 = loc_x1y1x2y2[0]
    y1 = loc_x1y1x2y2[1]
    x2 = loc_x1y1x2y2[2]
    y2 = loc_x1y1x2y2[3]

    x1 = int(x1/rwidth*twidth)
    y1 = int(y1/rheight*theight)
    x2 = int(x2/rwidth*twidth)
    y2 = int(y2/rheight*theight)

    mask = np.zeros((theight,twidth),dtype=np.uint8)
    mask[int(y1):int(y2),int(x1):int(x2)] = 255
    return Image.fromarray(mask).convert('L')




HEIGHT = 360//2
WIDTH = 640//2
HEIGHT = 256
WIDTH = 256

def read_image_url(url):
    # print(url)
    try:
        if 's3://' in url:
            image_bytes = client.get(url)
            image_mem_view = memoryview(image_bytes)
            image_array = np.frombuffer(image_mem_view,np.uint8)
            image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
            image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
            image = Image.fromarray(image_np).convert('RGB')
        else:
            image = Image.open(url).convert('RGB')
        return image
    except:
        raise ValueError(f'{url}')


def process_map(index,map):
    metadata = []
    meta_file = map[0]['meta_file']
    source_prefix = map[0]['source_prefix']
    target_prefix = map[0]['target_prefix']
    
    for line in list(jsonlines.open(meta_file,'r')):
        source_image_url = os.path.join(source_prefix,line['source_image'])
        target_image_url = os.path.join(target_prefix,line['target_image'])
        prompt = line['text']
        child = line['child']
        adult = line['adult']
        metadata.append({
            'source_image_url':source_image_url,
            'target_image_url':target_image_url,
            'text':prompt,
            'child':child,
            'adult':adult,
        })
    return index,metadata

class ControlnetInpaintingDataset(Dataset):
    def __init__(self,image_maps,tokenize_captions) -> None:
        super(ControlnetInpaintingDataset,self).__init__()
        self.data_identy = [image_map[0]['meta_file'] for image_map in image_maps]
        self.metadata = self.get_maps(image_maps)

        self.weight = []
        for im in image_maps:
            self.weight.append(im[1])
        self.tokenize_captions = tokenize_captions

        self.T_targetimage = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5),(0.5))
        ])
        self.T_sourceimage = transforms.Compose([
            transforms.ToTensor(),
        ])
      

    def __len__(self):
        return 999_999_999
    def get_maps(self,image_maps):
        metadata = {}
        # 创建线程池，定义最大线程数
        with ThreadPoolExecutor(max_workers=8) as executor:
            # 提交任务并行处理
            futures = [executor.submit(process_map, index, map) for index, map in enumerate(image_maps)]
            
            for future in as_completed(futures):
                index, data = future.result()
                metadata[index] = data

        return metadata

    def __getitem__(self,index):
        metadata = random.choices(self.metadata,weights=self.weight)[0]
        item = random.choice(metadata)

        source_image_url = item['source_image_url']
        target_image_url = item['target_image_url']
        prompt = item['text']

        source_image = read_image_url(source_image_url).convert('RGB')
        target_image = read_image_url(target_image_url).convert('RGB')
        # width = source_image.width + target_image.width  # 总宽度
        # height = max(source_image.height, target_image.height)  # 取最高的高度
        # new_image = Image.new('RGB', (width, height))  # 创建新图片
        # new_image.paste(source_image, (0, 0))  # 左边放第一张图片
        # new_image.paste(target_image, (source_image.width, 0))  # 右边放第二张图片
        # new_image.save('temp.png')

        child = item['child']
        adult = item['adult']
        mem_loc=[]
        mem_pro=[]
        for c in child:
            mem_loc.append(c)
            mem_pro.append('child')
            # mem_sk[c]='child'
        for a in adult:
            mem_loc.append(a)
            mem_pro.append('adult')
            # mem_sk[a]='adult'


        # locs = list(mem_sk.keys())
        # prompts = list(mem_sk.values())

        index = random.randint(0,len(mem_loc)-1)
        # source_image.save('./temp2.png')
        source_image = convert_source_image(source_image,mem_loc[index])
        mask_image = make_mask_image(mem_loc[index],source_image.width,source_image.height,WIDTH,HEIGHT)
        source_image = resize_image_pil(source_image,height=HEIGHT,width=WIDTH).convert('RGB')
        target_image = resize_image_pil(target_image,height=HEIGHT,width=WIDTH).convert('RGB')
        mask, masked_image = prepare_mask_and_masked_image(target_image,mask_image)
        # source_image.save('./temp.png')
        # import sys
        # sys.exit()
        mem = {
            'source_image':self.T_sourceimage(source_image),
            'target_image':self.T_targetimage(target_image),
            'prompt':self.tokenize_captions(mem_pro[index])[0],
            'mask':mask,
            'masked_image':masked_image,
        }
        return mem


class ControlnetInpaintingSDXLDataset(Dataset):
    def __init__(self,image_maps,tokenize_captions) -> None:
        super(ControlnetInpaintingSDXLDataset,self).__init__()
        self.data_identy = [image_map[0]['meta_file'] for image_map in image_maps]
        self.metadata = self.get_maps(image_maps)
        self.weight = []
        for im in image_maps:
            self.weight.append(im[1])
        self.tokenize_captions = tokenize_captions
        self.T_targetimage = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5),(0.5))
        ])
        self.T_sourceimage = transforms.Compose([
            transforms.ToTensor(),
        ])
    def __len__(self):
        return 999_999_999

    def get_maps(self,image_maps):
        metadata = {}
        # 创建线程池，定义最大线程数
        with ThreadPoolExecutor(max_workers=8) as executor:
            # 提交任务并行处理
            futures = [executor.submit(process_map, index, map) for index, map in enumerate(image_maps)]
            
            for future in as_completed(futures):
                index, data = future.result()
                metadata[index] = data

        return metadata
    def __getitem__(self, index):
        metadata = random.choices(self.metadata,weights=self.weight)[0]
        item = random.choice(metadata)

        source_image_url = item['source_image_url']
        target_image_url = item['target_image_url']
        prompt = item['text']

        source_image = read_image_url(source_image_url).convert('RGB')
        target_image = read_image_url(target_image_url).convert('RGB')

        original_height = target_image.height
        original_width = target_image.width
       
        child = item['child']
        adult = item['adult']
        mem_loc=[]
        mem_pro=[]
        for c in child:
            mem_loc.append(c)
            mem_pro.append('child')
        for a in adult:
            mem_loc.append(a)
            mem_pro.append('adult')

        index = random.randint(0,len(mem_loc)-1)
        source_image = convert_source_image(source_image,mem_loc[index])
        mask_image = make_mask_image(mem_loc[index],source_image.width,source_image.height,WIDTH,HEIGHT)
        source_image = resize_image_pil(source_image,height=HEIGHT,width=WIDTH).convert('RGB')
        target_image = resize_image_pil(target_image,height=HEIGHT,width=WIDTH).convert('RGB')
        mask, masked_image = prepare_mask_and_masked_image(target_image,mask_image)

        mem_tokenizer = self.tokenize_captions(mem_pro[index])

        mem = {
            'source_image':self.T_sourceimage(source_image),
            'target_image':self.T_targetimage(target_image),
            'prompt':mem_tokenizer['tokenizer_0'],
            'pooled_prompt':mem_tokenizer['tokenizer_1'],
            'mask':mask,
            'masked_image':masked_image,
            'original_size':(original_height,original_width),
            'crop_top_left':(0,0),
        }
        return mem