from abc import abstractmethod
from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
import bisect
import numpy as np
import os
import albumentations
from PIL import Image
from torch.utils.data import Dataset, ConcatDataset


class Txt2ImgIterableBaseDataset(IterableDataset):
    '''
    Define an interface to make the IterableDatasets for text2img data chainable
    '''
    def __init__(self, num_records=0, valid_ids=None, size=256):
        super().__init__()
        self.num_records = num_records
        self.valid_ids = valid_ids
        self.sample_ids = valid_ids
        self.size = size

        print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')

    def __len__(self):
        return self.num_records

    @abstractmethod
    def __iter__(self):
        pass
    

class ConcatDatasetWithIndex(ConcatDataset):
    """Modified from original pytorch code to return dataset idx"""
    def __getitem__(self, idx):
        if idx < 0:
            if -idx > len(self):
                raise ValueError("absolute value of index should not exceed dataset length")
            idx = len(self) + idx
        dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
        if dataset_idx == 0:
            sample_idx = idx
        else:
            sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
        return self.datasets[dataset_idx][sample_idx], dataset_idx


class ImagePaths(Dataset):
    def __init__(self, data_root, images_list_file, size=0, crop_size=0, random_crop=False, labels=None, clip_list_file=None):
        
        if isinstance(images_list_file, str) and images_list_file.endswith('.txt'): # 
            with open(images_list_file, "r") as f:
                paths = f.read().splitlines()
        else: # list containing relative paths
            paths = images_list_file
            
        self.img_paths = [os.path.join(data_root, p) for p in paths]
        self._length = len(self.img_paths)

        self.with_clip_feat=False
        if clip_list_file:
            self.with_clip_feat=True
            with open(clip_list_file, "r") as f:
                paths = f.read().splitlines()
            self.clip_paths = [os.path.join(data_root, p) for p in paths]

        self.labels = dict() if labels is None else labels

        # image preprocesser
        # 1. resize : size==0 跳过 ; 否则: 
        # - 如果指定了 crop_size , 将图片短边 resize 到指定的 size , 保持 aspect ratio
        # - 如果没指定 crop_size , 将图片 resize 成 (size,size) 大小
        # 2. crop : 如果指定 random_crop, 则随机裁剪，否则中心裁剪 ; 裁剪大小为 (crop_size, crop_size) ; 保证 crop_size <= size
        
        assert size>=0 and crop_size>=0
        self.size=size
        self.crop_size=crop_size
        img_augs = []
        if size>0:
            if crop_size==0:
                img_augs.append(albumentations.Resize(height=size,width=size))
            else:
                img_augs.append(albumentations.SmallestMaxSize(max_size=size))

        if crop_size==0:
            assert size>0
        else:
            if random_crop:
                img_augs.append(albumentations.RandomCrop(height=crop_size, width=crop_size))
            else:
                img_augs.append(albumentations.CenterCrop(height=crop_size, width=crop_size))
            # 为了防止有过小的图片
            self.pre_scaler = albumentations.SmallestMaxSize(max_size=crop_size)

        if len(img_augs)==0:
            self.preprocessor=lambda **kwargs: kwargs
        else:
            self.preprocessor=albumentations.Compose(img_augs)
            
        

        # self.size = size
        # assert not ((size is None) and (crop_size is not None))
        # if crop_size ==None:
        #     crop_size=size
        # self.crop_size=crop_size
        # self.random_crop = random_crop


        # if self.size is not None and self.size > 0:
        #     self.rescaler = albumentations.SmallestMaxSize(max_size = size)
        #     if not self.random_crop:
        #         self.cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
        #     else:
        #         self.cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
        #     self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
        # else:
        #     self.preprocessor = lambda **kwargs: kwargs

    def __len__(self):
        return self._length

    def preprocess_image(self, image_path):
        image = Image.open(image_path)
        if not image.mode == "RGB":
            image = image.convert("RGB")
        image = np.array(image).astype(np.uint8)
        if min(image.shape[:2])<self.crop_size: # 图片的短边小于 crop_size
            image = self.pre_scaler(image=image)["image"]
        image = self.preprocessor(image=image)["image"]
        image = (image/127.5 - 1.0).astype(np.float32)
        
        return image

    def __getitem__(self, i):
        example = dict()
        example["image"] = self.preprocess_image(self.img_paths[i])
        # example["image"] = self.preprocess_image(self.labels["file_path_"][i])
        for k in self.labels:
            example[k] = self.labels[k][i]

        if self.with_clip_feat:
            feat_path = self.clip_paths[i]
            example["clip_feat"] = np.load(feat_path)
            # feat_path = self.labels["file_path_"][i]
            # feat_path = feat_path.split("/")
            # feat_path[-2] = feat_path[-2]+'_clip'
            # feat_path[-1] = feat_path[-1]+'.npy'
            # feat_path='/'.join(feat_path)
            # assert os.path.exists(feat_path), 'no corresponding clip feature'
            # example["clip_feat"] = np.load(feat_path)
            
        return example


class NumpyPaths(ImagePaths):
    def preprocess_image(self, image_path):
        image = np.load(image_path).squeeze(0)  # 3 x 1024 x 1024
        image = np.transpose(image, (1,2,0))
        image = Image.fromarray(image, mode="RGB")
        image = np.array(image).astype(np.uint8)
        image = self.preprocessor(image=image)["image"]
        image = (image/127.5 - 1.0).astype(np.float32)
        return image
