import operator
from functools import partial
from pickle import FALSE
from tokenize import group

import numpy as np

import torch
from torchvision import transforms
from torch.utils.data import Dataset
from utils.augmentation import Augmentation, cropCenter
from utils.basic_cache import BasicCache

from tools.sen12_helper import sen12_tools

AUG_PROBS = {
    "fliplr": 0.5,
    "flipud": 0.5,
    "scale": 0,
    "scale_px": (1.0, 1.0),
    "translate": 0,
    "translate_perc": (0.0, 0.0),
    "rotate": 0,
    "rotate_angle": (-5, 5)
}

def null_norm(x):
    return x

class point_for_sen12:
    def __init__(self, wkt) -> None:
        self.wkt = wkt

class Sen12Dataset(Dataset):
    def __init__(self, config):
        super()

        # 裁剪尺寸，256、128
        self.crop_size_a = config.crop_a if isinstance(config.crop_a, (int, float)) else None
        self.crop_size_b = config.crop_b if isinstance(config.crop_b, (int, float)) else None
        self.stretch_contrast = config.stretch_contrast if isinstance(config.stretch_contrast, bool) else False

        self.return_all = config.return_all if isinstance(config.return_all, bool) else False
        # 单源，仅输出一个可见光图像
        self.single_domain = config.single_domain if isinstance(config.single_domain, bool) else False
    
        # If cache is specified then we will save the patches to the local disk somewhere to prevent needing to reload them all the time
        self.cache_dir = config.cache_dir if isinstance(config.cache_dir, str) else None
        self.cache_size = config.cache_size if isinstance(config.cache_size, (int, float)) else 0

        if self.cache_dir is not None:
            self.cache = BasicCache(self.cache_dir, size=self.cache_size, scheme="fill", clear=False, overwrite=False)
        else:
            self.cache = None

        self.ua = sen12_tools(config.base_dir, config.list_file)
        self.lut = self.ua.file_list

        func = []
        
        if config.augment:
            # If it is true like then just use the default augmentation parameters - this keeps things backwards compatible
            if config.augment is True or len(config.augment) == 0:
                config.augment = AUG_PROBS.copy()

            self.augmentor = Augmentation(probs=config.augment)
        else:
            self.augmentor = None

        func.append(transforms.ToTensor())
        self.transforms = transforms.Compose(func)

        if "sar" in config.normalize:
            self.sar_norm = transforms.Normalize(mean=[config.normalize.sar[0]], std=[config.normalize.sar[1]])
        else:
            self.sar_norm = null_norm

        if "opt" in config.normalize:
            self.opt_norm = transforms.Normalize(mean=[config.normalize.opt[0]], std=[config.normalize.opt[1]])
        else:
            self.opt_norm = null_norm

        print(f"UrbanAtlas Dataset created with {self.__len__()} pairs")

    def __getitem__(self, index):
        raise NotImplementedError("You cannot call the parent class, please use a specific child class implementation")

    def cropCenterT(self, img, bounding, shift=(0,0,0,0)):
        imshape = [x+y*2 for x,y in zip(img.shape, shift)]
        bounding = list(bounding)
        start = tuple(map(lambda a, da: a//2-da//2, imshape, bounding))
        end = tuple(map(operator.add, start, bounding))
        slices = tuple(map(slice, start, end))
        return img[slices]

    def _index_to_point(self, index) -> tuple:
        '''
        ids-->city, point(含wkt属性)
        '''
        wkt = self.ua.get_wkt(self.lut[index][0])
        pt = point_for_sen12(wkt)
        city_end_ids = wkt.find('_', wkt.__len__()-7)
        city = wkt[:city_end_ids]

        return city, pt

    def _normalize_scale(self, x, in_range=(0, 255)):
        # clip 限定到 x 到 in_range 范围中，超出的作边界处理
        return (x.clip(*in_range) - in_range[0])/(in_range[1] - in_range[0])

    def _toDb(self, x, scale=True):
        '''
        SAR图像归一化
        '''
        x = 10*np.ma.log10(x.astype(np.float32))
        if scale:
            # mu = x.mean()
            # std = x.std()
            # x = self._normalize_scale(x, (mu-3*std, mu+3*std))
            # Approximate 3 sigma scaling
            # x = self._normalize_scale(x, (10, 30))
            x = self._normalize_scale(x, (10, 20))

        return x #if not scale else self._normalize_scale(x, in_range=(1, 48))

    def _get_group_patches(
        self, grp, opt_transform=None, sar_transform=None, channels=slice(0, None)
    ):
        '''
        得到经过归一化的sar和可见光对图像
        '''
        imgs = {"OPT": [], "SAR": []}
        idxs = {"OPT": [], "SAR": []}
        
        pair = grp[0]
        
        for modal, filename in pair.items():
            transform = sar_transform if modal == "SAR" else opt_transform
            im = self.ua.get_patch(filename, transform=transform)

            if im is not None:
                im = im[channels,:,:].data
                imgs[modal].append(im)
                idxs[modal].append(grp[2])
                
        return imgs, idxs

    def _try_cache(self, index):
        if self.cache is not None:
            _, point = self._index_to_point(index)
            # Try get data for the point
            data = self.cache[point.wkt]

            if data is not None:
                # Overwrite the values for info as we not actually sure about them any more

                #  we have a dict in a 0-d numpy array
                data["INFO"] = data["INFO"].item()
                data["INFO"]["supervised"] = None
                data["INFO"]["SAR"] = None
                data["INFO"]["OPT"] = None
                data["INFO"]["WKT"] = point.wkt

                return data

    def _load_and_label(self, index, drop=False):
        '''
        根据index-->point映射返回经过归一化的sar和可见光图像对，

        Retrun:
            {"SAR" : sar块, 
            "OPT" : 可见光块, 
            "Y" : 0/1匹配标签, 
            "INFO" : 信息: "SAR":SAR索引,"OPT":OPT索引,"city":城市,"supervised","WKT":缓冲区编码
            }
        Note:
            将读取
        '''
        data = self._try_cache(index)

        cache_key = self._index_to_point(index)[1].wkt
        
        if not data:
            data = {"SAR": None, "OPT": None, "Y": None, "INFO": None}

        while (data["OPT"] is None) and (data["SAR"] is None):
            df_city, point = self._index_to_point(index)

            group = (
                {'OPT': self.lut[index][0], 
                'SAR': self.lut[index][1]},
                point.wkt, index
                )

            imgs, idxs = self._get_group_patches(
                group, 
                opt_transform=None,
                sar_transform=None,
                channels=slice(0, None))

            if len(idxs["OPT"]) > 0 and len(idxs["SAR"]) > 0:
                data["OPT"] = imgs["OPT"][0]
                data["SAR"] = imgs["SAR"][0]
                data["INFO"] = {
                    "SAR": idxs["SAR"][0], 
                    "OPT": idxs["OPT"][0], 
                    "city": df_city, 
                    "supervised": None, 
                    "WKT": cache_key}
                # Urban Atlas is always corresponding on a patch level. Use sub patches for negatives
                data["Y"] = np.ones(1)

                # Try and cache the data point
                if self.cache is not None:
                    self.cache[cache_key] = data

                data["INFO"]["supervised"] = 111

            else:
                if drop:
                    break
                index += 1

        if len(data["SAR"].shape) < 3:
            data["SAR"] = np.expand_dims(data["SAR"], axis=2)

        if len(data["OPT"].shape) < 3:
            data["OPT"] = np.expand_dims(data["OPT"], axis=2)

        return data["SAR"], data["OPT"], data["Y"], data["INFO"]

    def __len__(self):
        return len(self.ua)

class Sen12DatasetSiameseTriplet(Sen12Dataset):
    def multivariate_gaussian(self, pos, mu, Sigma):
        n = mu.shape[0]
        Sigma_det = np.linalg.det(Sigma)
        Sigma_inv = np.linalg.inv(Sigma)
        N = np.sqrt((2*np.pi)**n * Sigma_det)
        # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
        # way across all the input variables.
        fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)
        return np.exp(-fac / 2) / N

    def get_gt_heatmap(self, shift_x=0, shift_y=0, w=64, h=64, sigma=1):
        '''
        返回w,h图中，有偏移中心点shift_x,shift_y处的sigma参数的高斯核的热力图
        '''
        x = int(w//2 + shift_x)
        y = int(h//2 + shift_y)

        if sigma is None:
            hm = np.zeros((h, w))
            hm[y, x] = 1
        else:
            X = np.linspace(0, w-1, w)
            Y = np.linspace(0, h-1, h)
            X, Y = np.meshgrid(X, Y)

            mu = np.array([x, y])
            Sigma = np.array([[sigma , 0], [0,  sigma]])
            pos = np.empty(X.shape + (2,))
            pos[:, :, 0] = X
            pos[:, :, 1] = Y
            hm = self.multivariate_gaussian(pos, mu, Sigma)

        return hm[np.newaxis, :, :]

    # def get_gt_heatmap(self, shift_x=0, shift_y=0, search_rad_w=64, search_rad_h=64):
    #     gt_map = torch.zeros(1, 2 * search_rad_h + 1, 2 * search_rad_w + 1)
    #     gt_map[:, search_rad_h + shift_y - 1 : search_rad_h + shift_y + 1, search_rad_w + shift_x - 1 : search_rad_w + shift_x + 1] = 1

    #     return gt_map

    def __getitem__(self, index):
        
        # 图像增广
        if self.augmentor:
            self.augmentor.refresh_random_state()

        img_sar, img_opt, y, img_info = self._load_and_label(index)
        # !!!!!!!!!!!!!!!!!!!!!!!!!!
        from utils.preprocess import median_blur
        img_sar = median_blur(img_sar)

        
        # 单源，用可见光代替sar图像
        if self.single_domain:
            self.opt_norm = self.sar_norm
            # self.sar_norm = self.opt_norm

        assert self.crop_size_a <= img_sar.shape[1], "The input image is too small to crop"
        assert self.crop_size_b <= img_opt.shape[1], "The input image is too small to crop"

        a_sz, b_sz = self.crop_size_a, self.crop_size_b
        # fa_sz = (a_sz - 6) // 2 - 1
        # fb_sz = (b_sz - 6) // 2 - 1
        # hm_size = np.abs(fa_sz - fb_sz) + 1
        
        max_shift = min(min(a_sz//4, b_sz//4), (np.abs(a_sz - b_sz))//4)
        shift_x = (2*np.random.randint(2) - 1)*(np.random.randint(max_shift) + 1)
        shift_y = (2*np.random.randint(2) - 1)*(np.random.randint(max_shift) + 1)

        if self.crop_size_a > self.crop_size_b:
            if img_sar.shape[1] - self.crop_size_a > 0:
                # Also ensure we don't shift the keypoint out of the search region
                # max_shift = min((fa_sz - fb_sz)//4, max_shift)
                # max_shift_x = min((fa_sz - fb_sz)//4 - shift_x//2, max_shift)
                # max_shift_y = min((fa_sz - fb_sz)//4 - shift_y//2, max_shift)
                max_shift_x = min((a_sz - b_sz)//4 - np.abs(shift_x//2), max_shift)
                max_shift_y = min((a_sz - b_sz)//4 - np.abs(shift_y//2), max_shift)
                shift_x_s = (2*np.random.randint(2) - 1)*(np.random.randint(max_shift_x))
                shift_y_s = (2*np.random.randint(2) - 1)*(np.random.randint(max_shift_y))
            else:
                shift_x_s = 0
                shift_y_s = 0

            # 各模态图像取通道均值
            img_sar = np.mean(img_sar, axis=2)
            img_opt = np.mean(img_opt, axis=2)

            # 内存变连续
            search_img = np.ascontiguousarray(cropCenter(img_sar, (self.crop_size_a, self.crop_size_a), (shift_x_s, shift_y_s)))
            template_img = np.ascontiguousarray(cropCenter(img_opt, (self.crop_size_a, self.crop_size_a), (shift_x_s, shift_y_s)))
            search_hard = np.ascontiguousarray(cropCenter(img_sar, (self.crop_size_b, self.crop_size_b), (shift_x, shift_y)))
            template_hard = np.ascontiguousarray(cropCenter(img_opt, (self.crop_size_b, self.crop_size_b), (shift_x, shift_y)))

            # 归一化到[0, 1]
            if self.stretch_contrast:
                search_img = (search_img - search_img.min())/(search_img.ptp())

            # 归一化
            search_img = self.sar_norm( self.transforms(search_img).float() )
            template_img = self.opt_norm( self.transforms(template_img).float() )
            search_hard = self.sar_norm( self.transforms(search_hard).float() )
            template_hard = self.opt_norm( self.transforms(template_hard).float() )

        shift_x = shift_x - shift_x_s
        shift_y = shift_y - shift_y_s
        # scale = 2
        scale = 1
        hm_size = np.abs(self.crop_size_a - self.crop_size_b) + 1

        y_hn = self.get_gt_heatmap(
            shift_x=np.abs(shift_x)//scale*np.where(shift_x<0, -1, 1), 
            shift_y=np.abs(shift_y)//scale*np.where(shift_y<0, -1, 1), 
            w=hm_size, h=hm_size, sigma=None
        )

        y_hn = y_hn/y_hn.max()

        # search_rad = (search_img.size(-1) - template_hard.size(-1)) // 2
        # y_hn = self.get_gt_heatmap(shift_x, shift_y, search_rad, search_rad)
        # y_hn = y_hn.to(search_img.device)

        if self.return_all:
            imgs = (search_img, template_img, template_hard, search_hard)
        else:
            imgs = (search_img, template_img, template_hard)

        return imgs, y_hn

if __name__ == '__main__':
    wkt = "E:\\datasets\\sen1-2\\ROIs1158_spring\\s1_0\\ROIs1158_spring_s1_0_p28.png"
    pt = point_for_sen12(wkt)
    city_end_ids = wkt.find('_', wkt.__len__-7)
    city = wkt[:city_end_ids]
    print(pt.wkt)
    print(city)