from PIL import Image
import jsonlines
from aoss_client.client import Client
from torch.utils.data import Dataset
import torch
from torchvision import transforms
import cv2
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed # 多线程加速读取jsonl
import os
import random
conf_path = '/mnt/afs2d/luotianhang/aoss.conf'
client = Client(conf_path)

def resize_image_pil(image,height,width):
    return image.resize((width,height))

HEIGHT = 360
WIDTH = 640

def read_image_url(url):
    try:
        if 's3://' in url:
            image_bytes = client.get(url)
            image_mem_view = memoryview(image_bytes)
            image_array = np.frombuffer(image_mem_view,np.uint8)
            image_np = cv2.imdecode(image_array,cv2.IMREAD_COLOR)
            image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)
            image = Image.fromarray(image_np).convert('RGB')
        else:
            image = Image.open(url).convert('RGB')
        return image
    except:
        raise ValueError(f'{url}')


def process_map(index,map):
    metadata = []
    meta_file = map[0]['meta_file']
    source_prefix = map[0]['source_prefix']
    target_prefix = map[0]['target_prefix']
    
    for line in list(jsonlines.open(meta_file,'r')):
        source_image_url = os.path.join(source_prefix,line['source_image'])
        target_image_url = os.path.join(target_prefix,line['target_image'])
        prompt = line['text']

        metadata.append({
            'source_image_url':source_image_url,
            'target_image_url':target_image_url,
            'text':prompt,
        })
    return index,metadata

class ControlnetDataset(Dataset):
    def __init__(self,image_maps,tokenize_captions) -> None:
        super(ControlnetDataset,self).__init__()
        self.data_identy = [image_map[0]['meta_file'] for image_map in image_maps]
        self.metadata = self.get_maps(image_maps)

        self.weight = []
        for im in image_maps:
            self.weight.append(im[1])
        self.tokenize_captions = tokenize_captions

        self.T_targetimage = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5),(0.5))
        ])
        self.T_sourceimage = transforms.Compose([
            transforms.ToTensor(),
        ])

    def __len__(self):
        return 999_999_999
    def get_maps(self,image_maps):
        metadata = {}
        # 创建线程池，定义最大线程数
        with ThreadPoolExecutor(max_workers=8) as executor:
            # 提交任务并行处理
            futures = [executor.submit(process_map, index, map) for index, map in enumerate(image_maps)]
            
            for future in as_completed(futures):
                index, data = future.result()
                metadata[index] = data

        return metadata

    def __getitem__(self,index):
        metadata = random.choices(self.metadata,weights=self.weight)[0]
        item = random.choice(metadata)

        source_image_url = item['source_image_url']
        target_image_url = item['target_image_url']
        prompt = item['text']

        source_image = read_image_url(source_image_url).convert('RGB')
        target_image = read_image_url(target_image_url).convert('RGB')
        width = source_image.width + target_image.width  # 总宽度
        height = max(source_image.height, target_image.height)  # 取最高的高度
        new_image = Image.new('RGB', (width, height))  # 创建新图片
        new_image.paste(source_image, (0, 0))  # 左边放第一张图片
        new_image.paste(target_image, (source_image.width, 0))  # 右边放第二张图片
        new_image.save('temp.png')

        source_image = resize_image_pil(source_image,height=HEIGHT,width=WIDTH).convert('RGB')
        target_image = resize_image_pil(target_image,height=HEIGHT,width=WIDTH).convert('RGB')

        mem = {
            'source_image':self.T_sourceimage(source_image),
            'target_image':self.T_targetimage(target_image),
            'prompt':self.tokenize_captions(prompt)[0]
        }
        return mem