import logging

import numpy as np
import torch

from tools.dataset_tools import pm_dst_tools

from datasets.patch_pair_dataset import PatchPairDataset
from datasets.patch_clipper import PatchClipper

logger = logging.getLogger("match")

class IntegrationMatchDataset(PatchPairDataset, PatchClipper):

    def __init__(self, config=None, **kwargs):
        self.mode = config.mode

        super(IntegrationMatchDataset, self).__init__(config, super=True)
        if not self.mode == "patch":
            settings = config.clipper.toDict()

            if self.mode == "integration":
                settings["name"] = settings["name"] + " location"            
            super(PatchPairDataset, self).__init__(**settings)

        #-- dataset
        self.dt = pm_dst_tools(config.base_dir, config.list_file)
        self.pl = self.dt.get_pair_list()

        if not "search_domain" in dir(self):
            self.search_domain = "None"

        logger.info(f"Integration Match Dataset created with {self.__len__()} pairs ---- search domain is {self.search_domain}")

    def __getitem__(self, index):
        sar_tensor, opt_tensor, labels = self.get_pair_label(index)

        if self.mode == "patch":
            return (sar_tensor, opt_tensor), (labels)

        sar, opt, gt_map, shift = self.crop(sar_tensor, opt_tensor)

        patch = None
        info = None
        if self.mode == "integration":
            patch = (sar_tensor, opt_tensor)
            info = (labels, sar, opt, gt_map, shift)
        elif self.mode == "template":
            patch = (sar, opt)
            info = (gt_map, shift)

        return patch, info

    def get_pair_label(self, index):
        #-- augmentor
        if self.augmentor:
            self.augmentor.refresh_random_state()

        img_sar, img_opt, label, _ = self.load_data(index)

        if self.augmentor is not None:
            img_sar = self.augmentor(img_sar)
            img_opt = self.augmentor(img_opt)

        if self.single_domain == "sar":
            img_opt = img_sar
        elif self.single_domain == "opt":
            img_sar = img_opt

        
        img_sar = img_sar[:, :, 0]
        img_opt = img_opt[:, :, 0]
        img_sar = np.ascontiguousarray(img_sar)
        img_opt = np.ascontiguousarray(img_opt)
        img_sar = (img_sar - img_sar.min())/(img_sar.ptp())
        img_opt = (img_opt - img_opt.min())/(img_opt.ptp())
        img_sar = self.preprocessor.process(img_sar)
        
        img_opt = np.expand_dims(img_opt, axis=2)
        img_sar = np.expand_dims(img_sar, axis=2)
        
        img_sar_tensor = self.transforms(img_sar).float()
        img_opt_tensor = self.transforms(img_opt).float()

        labels = torch.tensor(label)

        return img_sar_tensor, img_opt_tensor, labels

    def load_data(self, index, drop=False):
        '''
        index to transformed image pairs and other info 
        form caches if exists or files

        Return:
        ------
            {"SAR" : sar patch, 
            "OPT" : opt patch, 
            "LABEL" : labels, 
            "INFO" : other information
            }
        '''
        data = {"OPT" : None, "SAR" : None}

        while (data["OPT"] is None) and (data["SAR"] is None):
            pair = {
                'OPT': self.pl[index][0], 
                'SAR': self.pl[index][1]
            }

            imgs = self._get_patches(pair, 
                opt_transform=self._normalize_scale, 
                sar_transform=self._normalize_scale
            )

            if len(imgs["OPT"]) > 0 and len(imgs["SAR"]) > 0:
                data["OPT"] = imgs["OPT"][0]
                data["SAR"] = imgs["SAR"][0]
                data["INFO"] = {}
                data["LABEL"] = self.pl[index][2]
                
                if self.cache is not None:
                    self.cache[cache_key] = data

            else:
                if drop:
                    break

                # HACK: you shouldn't come here
                index += 1

        return  data["SAR"], data["OPT"], data["LABEL"], data["INFO"]


if __name__ == "__main__":
    from dotmap import DotMap

    mode_ = "integration"
    # mode_ = "template"
    # mode_ = "patch"

    class config:
        type = "IntegrationMatchDataset"
        batch_size = 20
        workers = 0
        shuffle = True
        base_dir = ""
        list_file = "./json/os-select/1-s-test-pair.json"
        augment = True, 
        mode = mode_
        clipper = DotMap({
            "name" : "shift template",
            "template_size" : 144,
            "shift_x" : 15,
            "shift_y" : 15,
            "search_rad" : 32,
            "search_domain" : "B"
        })
        single_domain = ""

    cfg = config()
    imd = IntegrationMatchDataset(cfg)

    data, info = imd[10]

    if mode_ == "patch":
        print(data[0].shape, data[1].shape)
        print(info)
    elif mode_ == "template":
        print(data[0].shape, data[1].shape)
        print(info[0].shape, info[1])
    elif mode_ == "integration":
        print(data[0].shape, data[1].shape)
        print(info[0], info[1], info[2])
        print(info[3].shape, info[4])
