from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np

from utils.augmentation import Augmentation, cropCenter
from utils.basic_cache import BasicCache
from utils.preprocess import median_blur
from tools.sen12_helper import sen12_tools

AUG_PROBS = {
    "fliplr": 0.5,
    "flipud": 0.5,
    "scale": 0,
    "scale_px": (1.0, 1.0),
    "translate": 0,
    "translate_perc": (0.0, 0.0),
    "rotate": 0,
    "rotate_angle": (-5, 5)
}

def null_norm(x):
    return x

class Sen12PatchPairDataset(Dataset):
    def __init__(self, config):
        super(Sen12PatchPairDataset, self).__init__()
        
        self.single_domain = config.single_domain

        #-- cache
        self.cache_dir = config.cache_dir if isinstance(config.cache_dir, str) else None
        self.cache_size = config.cache_size if isinstance(config.cache_size, (int, float)) else 0
        
        if self.cache_dir is not None:
            self.cache = BasicCache(self.cache_dir, size=self.cache_size, scheme="fill", clear=False, overwrite=False)
        else:
            self.cache = None

        #-- list file
        self.ua = sen12_tools(config.base_dir, config.list_file)
        self.lut = self.ua.file_list

        #-- transform
        func = []
        
        if config.augment:
            if config.augment is True or len(config.augment) == 0:
                config.augment = AUG_PROBS.copy()

            self.augmentor = Augmentation(probs=config.augment)
        else:
            self.augmentor = None

        func.append(transforms.ToTensor())
        self.transforms = transforms.Compose(func)

        #-- normalize but not used
        if "sar" in config.normalize:
            self.sar_norm = transforms.Normalize(mean=[config.normalize.sar[0]], std=[config.normalize.sar[1]])
        else:
            self.sar_norm = null_norm

        if "opt" in config.normalize:
            self.opt_norm = transforms.Normalize(mean=[config.normalize.opt[0]], std=[config.normalize.opt[1]])
        else:
            self.opt_norm = null_norm

        print(f"Patch Pair Dataset created with {self.__len__()} pairs")
    
    def __getitem__(self, index):
        #-- augmentor
        if self.augmentor:
            self.augmentor.refresh_random_state()

        img_sar, img_opt, _, _ = self._load_and_label(index)
        img_sar = median_blur(img_sar)

        #-- single_domain
        if self.single_domain == "sar":
            self.opt_norm = self.sar_norm
        elif self.single_domain == "opt":
            self.sar_norm = self.opt_norm


        img_sar = np.mean(img_sar, axis=2)
        img_opt = np.mean(img_opt, axis=2)

        # contiguous
        img_sar = np.ascontiguousarray(img_sar)
        img_opt = np.ascontiguousarray(img_opt)

        # normalize to [0, 1]
        # if self.stretch_contrast:
        if True:
            img_sar = (img_sar - img_sar.min())/(img_sar.ptp())
            img_opt = (img_opt - img_opt.min())/(img_opt.ptp())

        # normalize
        img_sar_tensor = self.sar_norm( self.transforms(img_sar).float() )
        img_opt_tensor = self.opt_norm( self.transforms(img_opt).float() )

        class_id = index

        return (img_sar_tensor, img_opt_tensor), class_id
    
    def __len__(self):
        return len(self.lut)

    def _load_and_label(self, index, drop=False):
        '''
        根据index-->point映射返回经过归一化的sar和可见光图像对，

        Retrun:
            {"SAR" : sar块, 
            "OPT" : 可见光块, 
            "Y" : 0/1匹配标签, 
            "INFO" : 信息: "SAR":SAR索引,"OPT":OPT索引,"city":城市,"supervised","WKT":缓冲区编码
            }
        Note:
            将读取
        '''
        data = self._try_cache(index)

        cache_key = self._index_to_point(index)[1]
        
        if not data:
            data = {"SAR": None, "OPT": None, "Y": None, "INFO": None}

        while (data["OPT"] is None) and (data["SAR"] is None):
            df_city, wkt = self._index_to_point(index)

            group = (
                {'OPT': self.lut[index][0], 
                'SAR': self.lut[index][1]},
                wkt, index
                )

            imgs, idxs = self._get_group_patches(group)

            if len(idxs["OPT"]) > 0 and len(idxs["SAR"]) > 0:
                data["OPT"] = imgs["OPT"][0]
                data["SAR"] = imgs["SAR"][0]
                data["INFO"] = {
                    "SAR": idxs["SAR"][0], 
                    "OPT": idxs["OPT"][0], 
                    "city": df_city, 
                    "supervised": None, 
                    "WKT": cache_key}
                # Urban Atlas is always corresponding on a patch level. Use sub patches for negatives
                data["Y"] = np.ones(1)

                # Try and cache the data point
                if self.cache is not None:
                    self.cache[cache_key] = data

            else:
                if drop:
                    break
                index += 1
                if index >= self.__len__():
                    break

        if len(data["SAR"].shape) < 3:
            data["SAR"] = np.expand_dims(data["SAR"], axis=2)

        if len(data["OPT"].shape) < 3:
            data["OPT"] = np.expand_dims(data["OPT"], axis=2)

        return data["SAR"], data["OPT"], data["Y"], data["INFO"]

    def _try_cache(self, index):
        if self.cache is not None:
            _, wkt = self._index_to_point(index)
            # Try get data for the point
            data = self.cache[wkt]

            return data

    def _index_to_point(self, index) -> tuple:
        '''
        ids-->city, point(含wkt属性)
        '''
        wkt = self.ua.get_wkt(self.lut[index-1][0])
        city_end_ids = wkt.find('_', wkt.__len__()-7)
        city = wkt[:city_end_ids]

        return city, wkt

    def _get_group_patches(
        self, grp, opt_transform=None, sar_transform=None, channels=slice(0, None)
    ):
        '''
        得到经过归一化的sar和可见光对图像
        '''
        imgs = {"OPT": [], "SAR": []}
        idxs = {"OPT": [], "SAR": []}

        pair = grp[0]

        for modal, filename in pair.items():
            transform = sar_transform if modal == "SAR" else opt_transform
            im = self.ua.get_patch(filename, transform=transform)

            if im is not None:
                # im = im[channels,:,:].data
                im = im[channels,:,:]
                imgs[modal].append(im)
                idxs[modal].append(grp[2])
                
        return imgs, idxs
        

if __name__ == '__main__':

    import torch 
    import torchvision
    import matplotlib.pyplot as plt
    import numpy as np


    class attributes:
        def __init__(self):
            self.cache_dir = "cache/train"
            self.cache_size = 50000
            self.base_dir = "E:/workspace/SOMatch/cache/sen12/train"
            self.list_file = "E:/workspace/SOMatch/tmp/json/sen12_list/overlap_subset.json"
            self.augment = True
            self.normalize = {}
            self.single_domain = ""

    def imshow(img):
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.show()

    config = attributes()
    dataset = Sen12PatchPairDataset(config)
    batch_size = 4
    trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
    dataIter = iter(trainloader)
    for i in range(15):
        (img_sar_tensor, img_opt_tensor), class_id = dataIter.next()
        images = torch.cat((img_sar_tensor, img_opt_tensor), dim=0)

        imshow(torchvision.utils.make_grid(images))
