import os

import cv2
import matplotlib.pyplot as plt
import mindspore.dataset as ds
import numpy as np
from PIL import Image


class NaicDataset:
    CLASSES_L1 = ('水体', '交通运输', '建筑', '耕地', '草地', '林地', '裸土', '其它')
    CLASSES_L2 = ('水体', '道路', '建筑物', '机场', '火车站', '光伏', '停车场', '操场', '普通耕地', '农业大棚', '自然草地',
                  '绿地绿化', '自然林', '人工林', '自然裸土', '人为裸土', '其它')

    PALETTE = [[0, 0, 255], [0, 255, 0], [255, 0, 0], [255, 255, 0], [255, 0, 255], [0, 255, 255],
               [106, 90, 205], [128, 138, 135], [56, 94, 15], [255, 97, 0], [176, 224, 230], [65, 105, 225],
               [128, 42, 42], [188, 143, 143], [255, 128, 64], [160, 32, 240], [250, 235, 215], [0, 201, 87],
               [192, 192, 192], [255, 192, 203],
               ]

    def __init__(self, root, img_dir='images', ann_dir='labels', list_file='train.txt', level=1, use_all_level=True,
                 ignore_classes=()):
        self.root = os.path.expanduser(root)
        self.list_file = list_file
        self.img_dir = os.path.join(self.root, img_dir)
        self.ann_dir = os.path.join(self.root, ann_dir)
        self.use_all_level = use_all_level
        self.level = level

        self.class_names = list(self.CLASSES_L2 if self.level == 2 or self.use_all_level else self.CLASSES_L1)
        self.ignore_classes = ignore_classes
        if self.ignore_classes is not None and (self.level == 2 or self.use_all_level):
            class_names = []
            self.class_map = np.ones(256, dtype=np.uint8) * 255
            self.class_map[17:17 + len(self.CLASSES_L1)] = np.arange(len(self.CLASSES_L1)) + 17
            for i, class_name in enumerate(self.class_names):
                if i not in self.ignore_classes:
                    self.class_map[i] = len(class_names)
                    class_names.append(class_name)
            self.class_names = class_names
        else:
            self.class_map = None
        self.num_classes = len(self.class_names)
        self.with_background = True
        self.cls2idx = {v: i for i, v in enumerate(self.class_names)}

        with open(os.path.join(self.root, self.list_file), 'r') as f:
            self.samples = [int(s.strip()) for s in f.readlines() if s.strip()]

        self._step = 0

    def __len__(self):
        return len(self.samples)

    def _load_seg(self, index):
        anno_file = os.path.join(self.ann_dir, f'{self.samples[index]}.png')
        assert os.path.exists(anno_file)
        mask = np.array(Image.open(anno_file))
        if self.level == 1:
            mask = mask // 100 - 1 + 17 * int(self.use_all_level)
        else:
            mask = mask % 100 - 1
        mask = mask.astype(np.uint8)
        if self.class_map is not None:
            mask = self.class_map[mask]
        return mask

    def _load_img(self, index):
        return cv2.imread(os.path.join(self.img_dir, f'{self.samples[index]}.tif'))

    def __getitem__(self, index):
        return self._load_img(index), self._load_seg(index)

    def __iter__(self):
        self._step = 0
        return self

    def __next__(self):
        if self._step >= len(self):
            raise StopIteration
        data = self[self._step]
        self._step += 1
        return data


class SegDataset:
    def __init__(self,
                 db_cfg: dict,
                 image_mean,
                 image_std,
                 data_file='',
                 batch_size=32,
                 crop_size=257,
                 max_scale=2.0,
                 min_scale=0.5,
                 ignore_label=255,
                 num_readers=2,
                 num_parallel_calls=4,
                 shard_id=None,
                 shard_num=None,
                 is_train=True):
        self.db = NaicDataset(**db_cfg)
        self.data_file = data_file
        self.batch_size = batch_size
        self.crop_size = crop_size
        self.image_mean = np.array(image_mean, dtype=np.float32) * 255.
        self.image_std = np.array(image_std, dtype=np.float32) * 255.
        self.max_scale = max_scale
        self.min_scale = min_scale
        self.ignore_label = ignore_label
        self.num_classes = self.db.num_classes
        self.num_readers = num_readers
        self.num_parallel_calls = num_parallel_calls
        self.shard_id = shard_id
        self.shard_num = shard_num
        self.is_train = is_train
        assert max_scale > min_scale

    def transforms(self, image, label):
        # bgr image
        image_out = image
        label_out = label.astype(np.int32)

        # sc = np.random.uniform(self.min_scale, self.max_scale)
        # new_h, new_w = int(sc * image_out.shape[0]), int(sc * image_out.shape[1])
        # image_out = cv2.resize(image_out, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
        # label_out = cv2.resize(label_out, (new_w, new_h), interpolation=cv2.INTER_NEAREST)

        image_out = (image_out - self.image_mean) / self.image_std
        # h_, w_ = max(new_h, self.crop_size), max(new_w, self.crop_size)
        # pad_h, pad_w = h_ - new_h, w_ - new_w
        # if pad_h > 0 or pad_w > 0:
        #     image_out = cv2.copyMakeBorder(image_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)
        #     label_out = cv2.copyMakeBorder(label_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=self.ignore_label)
        # offset_h = np.random.randint(0, h_ - self.crop_size + 1)
        # offset_w = np.random.randint(0, w_ - self.crop_size + 1)
        # image_out = image_out[offset_h: offset_h + self.crop_size, offset_w: offset_w + self.crop_size, :]
        # label_out = label_out[offset_h: offset_h + self.crop_size, offset_w: offset_w + self.crop_size]
        #
        # if np.random.uniform(0.0, 1.0) > 0.5:
        #     image_out = image_out[:, ::-1, :]
        #     label_out = label_out[:, ::-1]

        image_out = image_out.transpose((2, 0, 1))
        image_out = image_out.copy()
        label_out = label_out.copy()
        return image_out, label_out

    def get_dataset(self, repeat=1):
        data_set = ds.GeneratorDataset(self.db, ["data", "label"],
                                       shuffle=self.is_train,
                                       num_parallel_workers=self.num_readers,
                                       num_shards=self.shard_num,
                                       shard_id=self.shard_id)
        data_set = data_set.map(operations=self.transforms,
                                input_columns=["data", "label"],
                                output_columns=["data", "label"],
                                num_parallel_workers=self.num_parallel_calls)
        data_set = data_set.shuffle(buffer_size=self.batch_size * 10)
        data_set = data_set.batch(self.batch_size, drop_remainder=True)
        data_set = data_set.repeat(repeat)
        return data_set


def create_dataset(data_dir, repeat=400, batch_size=32, train=True, **kwargs):
    kwargs.setdefault('image_mean', [0.5, 0.5, 0.5])
    kwargs.setdefault('image_std', [0.5, 0.5, 0.5])
    return SegDataset(
        db_cfg=dict(root=data_dir, ignore_classes=(3, 4, 5), list_file='train.txt' if train else 'val.txt'),
        batch_size=batch_size,
        is_train=train,
        **kwargs
    ).get_dataset(repeat)


def test():
    dataset = SegDataset(dict(root=os.path.expanduser('~/data/seg_naic/round2')), [0, 0, 0], [1, 1, 1],
                         is_train=False).get_dataset()
    data = dataset.create_dict_iterator().get_next()
    img = data['data'].asnumpy()[0]  # type: np.ndarray
    img = np.transpose(img, (1, 2, 0))  # .astype(np.uint8)
    plt.imshow(img)
    plt.show()


if __name__ == '__main__':
    test()
