import os
import tifffile

from PIL import Image
import numpy as np
import torch.utils.data as data
import random
from utils.mypath import MyPath


class Vaihingen(data.Dataset):
    def __init__(self,
                 root=MyPath.db_root_dir('Vaihingen'),
                 split='val',
                 transform=None,
                 retname=True,
                 do_edge=False,
                 do_semseg=False,
                 do_height=False,
                 height=320,
                 width=320,
                 ):
        self.image_num = 0
        self.height = 0
        self.width = 0
        self.input_height = height
        self.input_width = width
        self.root = root
        self.transform = transform
        self.n_samples = 2000

        if isinstance(split, str):
            self.split = [split]
        else:
            split.sort()
            self.split = split

        self.retname = retname

        # Original Images
        self.im_ids = []
        self.images = []
        _image_dir = os.path.join(root, 'image')

        # Edge Detection
        self.do_edge = do_edge
        self.edges = []
        _edge_gt_dir = os.path.join(root, 'edge')

        # Semantic segmentation
        self.do_semseg = do_semseg
        self.semsegs = []
        _semseg_gt_dir = os.path.join(root, 'semantic_label_no_boundary')

        # height
        self.do_height = do_height
        self.heights = []
        _height_gt_dir = os.path.join(root, 'normalized_DSM')

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(root, 'gt_sets')

        print('Initializing dataloader for Vaihingen {} set'.format(''.join(self.split)))
        for splt in self.split:
            with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), 'r') as f:
                lines = f.read().splitlines()

            for ii, line in enumerate(lines):
                # Images
                _image = os.path.join(_image_dir, line)
                assert os.path.isfile(_image)
                self.images.append(self._load_img(_image))
                self.im_ids.append(line.rstrip('\n'))
                img_height = self.images[-1].shape[0]
                img_width = self.images[-1].shape[1]
                self.image_num += (img_height // self.input_height) * (img_width // self.input_width)

                # Edges
                _edge = os.path.join(_edge_gt_dir, line.replace('.tif', '_noBoundary.npy'))
                assert os.path.isfile(_edge)
                self.edges.append(self._load_edge(_edge))

                # Semantic Segmentation
                _semseg = os.path.join(_semseg_gt_dir, line.replace('.tif', '_noBoundary.tif'))
                assert os.path.isfile(_semseg)
                self.semsegs.append(self._load_semseg(_semseg))

                # height Prediction
                _height = os.path.join(_height_gt_dir, line.replace('top_mosaic_09cm', 'dsm_09cm_matching'))
                _height = _height.replace('.tif', '_normalized.jpg')
                assert os.path.isfile(_height)
                self.heights.append(self._load_height(_height))

        if self.do_edge:
            assert (len(self.images) == len(self.edges))
        if self.do_semseg:
            assert (len(self.images) == len(self.semsegs))
        if self.do_height:
            assert (len(self.images) == len(self.heights))

        # Display stats
        # print('Number of dataset images: {:d}'.format(self.image_num))

    def __getitem__(self, idx):
        sample = {}
        if 'train' in self.split:
            image, height ,semseg, edge = self.random_cut(self.input_width, self.input_height)
            sample['image'] = image
            if self.do_edge:
                sample['edge'] = edge
            if self.do_semseg:
                sample['semseg'] = semseg
            if self.do_height:
                sample['height'] = height
                # sample['height'] = sample['height'] / 255
                sample['height'][sample['height'] < 1e-9] = 1e-9
            if self.retname:
                sample['meta'] = {'im_size': (self.input_height, self.input_width)}
        else:
            if idx == 0:
                self.img_id = 0
            width1 = self.width
            width2 = self.width + self.input_width
            height1 = self.height
            height2 = self.height + self.input_height
            sample['image'] = self.images[self.img_id][width1:width2, height1:height2]
            if self.do_edge:
                sample['edge'] = self.edges[self.img_id][width1:width2, height1:height2]
            if self.do_semseg:
                sample['semseg'] = self.semsegs[self.img_id][width1:width2, height1:height2]
            if self.do_height:
                sample['height'] = self.heights[self.img_id][width1:width2, height1:height2]
                # sample['height'] = sample['height'] / 255
                sample['height'][sample['height'] < 1e-9] = 1e-9
            if self.retname:
                sample['meta'] = {'image': str(self.im_ids[self.img_id]),
                                  'im_size': (self.input_height, self.input_width)}
            if width2 + self.input_width <= self.images[self.img_id].shape[0]:
                self.width = width2
            elif height2 + self.input_height <= self.images[self.img_id].shape[1]:
                self.height = height2
                self.width = 0
            else:
                self.img_id += 1
                self.width = 0
                self.height = 0

        if self.transform is not None:
            sample = self.transform(sample)

        return sample

    def __len__(self):
        if 'train' in self.split:
            return self.n_samples
        else:
            return self.image_num

    def _load_img(self, path):
        _img = np.array(tifffile.imread(path)).astype(np.float32)
        return _img

    def _load_edge(self, path):
        _edge = np.load(path).astype(np.float32)
        return _edge


    def _load_semseg(self, path):
        # Note: We ignore the background class as other related works.
        raw_semseg = np.array(tifffile.imread(path)).astype(np.float32)
        seg_mask = np.zeros([raw_semseg.shape[0], raw_semseg.shape[1]])

        surface_mask = (raw_semseg == np.array([255, 255, 255])).all(axis=2)
        building_mask = (raw_semseg == np.array([0, 0, 255])).all(axis=2)
        vegetation_mask = (raw_semseg == np.array([0, 255, 255])).all(axis=2)
        tree_mask = (raw_semseg == np.array([0, 255, 0])).all(axis=2)
        car_mask = (raw_semseg == np.array([255, 255, 0])).all(axis=2)
        background_mask = (raw_semseg == np.array([255, 0, 0])).all(axis=2)
        invalid_mask = (raw_semseg == np.array([0, 0, 0])).all(axis=2)


        seg_mask[surface_mask] = 0
        seg_mask[building_mask] = 1
        seg_mask[vegetation_mask] = 2
        seg_mask[tree_mask] = 3
        seg_mask[car_mask] = 4
        seg_mask[background_mask] = 5
        seg_mask[invalid_mask] = 255

        return np.expand_dims(seg_mask, axis=2)


    def _load_height(self, path):
        _height = np.array(Image.open(path)).astype(np.float32)
        return _height

    def random_cut(self, w, h, cutmix=False):
        img_id = random.randint(0, len(self.images) - 1)

        whole_img = self.images[img_id]
        whole_height = self.heights[img_id]
        whole_semseg = self.semsegs[img_id]
        whole_edge = self.edges[img_id]

        width1 = random.randint(0, whole_img.shape[0] - w)
        height1 = random.randint(0, whole_img.shape[1] - h)
        width2 = width1 + w
        height2 = height1 + h

        image = whole_img[width1:width2, height1:height2]
        height = whole_height[width1:width2, height1:height2]
        semseg = whole_semseg[width1:width2, height1:height2]
        edge = whole_edge[width1:width2, height1:height2]

        return image, height, semseg, edge




