import random

import numpy as np

import torch
from torch.utils.data import Dataset
from torchvision import transforms

from utils.basic_cache import BasicCache
from utils.augmentation import Augmentation
from tools.dataset_tools import dataset_tools

from datasets.preprocessor import Preprocessor

DEF_AUG_PROBS = {
        "fliplr": 0.5,
        "flipud": 0.5,
        "scale": 0,
        "scale_px": (1.0, 1.0),
        "translate": 0,
        "translate_perc": (0.0, 0.0),
        "rotate": 0,
        "rotate_angle": (-5, 5)
    }


class PatchPairDataset(Dataset):
    # default args of augment
    AUG_PROBS = DEF_AUG_PROBS
    # the tool to read patch or handle the direction of dataset
    dt = None 
    # list of list : each element is a list of matched patchs info
    pl = None
    # if single_domain is "sar", the optical patch will be sar instead
    # and vice versa
    single_domain = ""
    # cache
    cache_dir = None
    cache_size = 0
    cache = None
    # augmentor
    augmentor = None
    # transforms
    transforms = None
    # crop size / shift of template center 
    # radius of gt heatmap / which is search patch
    crop_size = 208
    shift_x = 15
    shift_y = 15
    search_rad = 32
    fixed = False


    def __init__(self, config=None, **kwargs):
        super(PatchPairDataset, PatchPairDataset).__init__(self)

        if config == None:
            return

        self.single_domain = config.single_domain
            
        #-- cache
        self.cache_dir = config.cache_dir if "cache_dir" in dir(config) else None
        self.cache_size = config.cache_size if "cache_size" in dir(config) else 0
        if self.cache_dir is not None:
            self.cache = BasicCache(self.cache_dir, size=self.cache_size, scheme="fill", clear=False, overwrite=False)
        else:
            self.cache = None
        
        #-- transform
        func = []
        
        if config.augment:
            if config.augment is True or len(config.augment) == 0:
                config.augment = self.AUG_PROBS.copy()

            self.augmentor = Augmentation(probs=config.augment)
        else:
            self.augmentor = None

        func.append(transforms.ToTensor())
        self.transforms = transforms.Compose(func)

        if "preprocessor" in dir(config):
            self.preprocessor = Preprocessor(**config.preprocessor.toDict())
        else:
            self.preprocessor = Preprocessor()

        # be called by super    
        if "super" in kwargs:
            return

        #-- dataset
        self.dt = dataset_tools(config.base_dir, config.list_file)
        self.pl = self.dt.get_pair_list()

        print(f"Patch Pair Dataset created with {self.__len__()} pairs")


    def __getitem__(self, index):
        #-- augmentor
        if self.augmentor:
            self.augmentor.refresh_random_state()

        img_sar, img_opt, _, _ = self._load_data(index)

        if self.augmentor is not None:
            img_sar = self.augmentor(img_sar)
            img_opt = self.augmentor(img_opt)

        if self.single_domain == "sar":
            img_opt = img_sar
        elif self.single_domain == "opt":
            img_sar = img_opt

        
        img_sar = img_sar[:, :, 0]
        img_opt = img_opt[:, :, 0]
        img_sar = np.ascontiguousarray(img_sar)
        img_opt = np.ascontiguousarray(img_opt)
        img_sar = (img_sar - img_sar.min())/(img_sar.ptp())
        img_opt = (img_opt - img_opt.min())/(img_opt.ptp())
        img_sar = self.preprocessor.process(img_sar)
        
        img_opt = np.expand_dims(img_opt, axis=2)
        img_sar = np.expand_dims(img_sar, axis=2)
        
        img_sar_tensor = self.transforms(img_sar).float()
        img_opt_tensor = self.transforms(img_opt).float()

        labels = torch.tensor(1)

        return (img_sar_tensor, img_opt_tensor), labels


    def __len__(self):
        return len(self.pl)


    def _try_cache(self, index):
        """
        get data from cache if cache is not None
        """
        if self.cache is not None:
            wkt = self._index_to_wkt(index)
            # Try get data for the point
            data = self.cache[wkt]

            return data


    def _index_to_wkt(self, index):
        '''
        index to wkt
        '''
        wkt = self.dt.get_wkt(self.pl[index][0])

        return wkt


    def _get_patches(
        self, pair: dict, opt_transform=None, sar_transform=None, **kwargs
    ):
        """
        open the image patch, transform and return
        """
        imgs = {"OPT": [], "SAR": []}

        for modal, filename in pair.items():
            transform = sar_transform if modal == "SAR" else opt_transform
            im = self.dt.get_patch(
                filename, transform=transform, modal=modal
            )

            if im is not None:
                im = im[:,:]
                imgs[modal].append(im)
                
        return imgs


    def _normalize_scale(self, x, in_range=(0, 255)):
        # clip 限定到 x 到 in_range 范围中，超出的作边界处理
        return (x.clip(*in_range) - in_range[0])/(in_range[1] - in_range[0])


    def _load_data(self, index, drop=False):
        '''
        index to transformed image pairs and other info 
        form caches if exists or files

        Return:
        ------
            {"SAR" : sar patch, 
            "OPT" : opt patch, 
            "LABEL" : labels, 
            "INFO" : other information
            }
        '''

        data = self._try_cache(index)

        cache_key = self._index_to_wkt(index)
        
        if not data:
            data = {"SAR": None, "OPT": None, "Y": None, "INFO": None}

        while (data["OPT"] is None) and (data["SAR"] is None):
            pair = {
                'OPT': self.pl[index][0], 
                'SAR': self.pl[index][1]
            }

            imgs = self._get_patches(pair, 
                opt_transform=self._normalize_scale, 
                sar_transform=self._normalize_scale
            )

            if len(imgs["OPT"]) > 0 and len(imgs["SAR"]) > 0:
                data["OPT"] = imgs["OPT"][0]
                data["SAR"] = imgs["SAR"][0]
                data["INFO"] = {}
                data["LABEL"] = 0
                
                if self.cache is not None:
                    self.cache[cache_key] = data

            else:
                if drop:
                    break

                # HACK: you shouldn't come here
                index += 1

        return  data["SAR"], data["OPT"], data["LABEL"], data["INFO"]



if __name__ == "__main__":
    import cv2 as cv

    class config:
        def __init__(self):
            self.single_domain = ""
            self.cache_dir = None
            self.cache_size = 0
            self.base_dir = None
            self.list_file = "E:/workspace/SOMatch/tmp/json/sen12_tt_harris/pt_s100.json"
            self.augment = True


    cfg = config()

    ppd = PatchPairDataset(cfg)

    (img0, img1), _ = ppd[0]
    # img0, img1, gt_map = ppd._crop_patchs(img0, img1)

    img0 = img0.numpy()
    img1 = img1.numpy()
    # gt_map = gt_map.numpy()
    img0 = np.transpose(img0, [1, 2, 0])
    img1 = np.transpose(img1, [1, 2, 0])

    cv.imwrite("image/dataset_test/img0.png", img0*255)
    cv.imwrite("image/dataset_test/img1.png", img1*255)
    # cv.imwrite("image/dataset_test/gt_map.png", gt_map*255)

