# Copyright 2021, 2022 LuoJiaNET Research and Development Group, Wuhan University
# Copyright 2021, 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prepare Cityscapes dataset"""
import random

import cv2
import numpy as np

import luojianet_ms.dataset as ds


def normalize(image, label):
    """normalize"""
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    image = cv2.imdecode(np.frombuffer(image, dtype=np.uint8), cv2.IMREAD_COLOR)
    label = cv2.imdecode(np.frombuffer(label, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
    _img1 = image / 255.0
    _img2 = _img1 - mean
    _img3 = _img2 / std
    out_img = _img3.transpose((2, 0, 1))
    return out_img.astype(np.float32), label.astype(np.int32)


def train_preprocess(image, label, crop_size=None, ignore_label=255):
    """train_preprocess"""
    min_scale, max_scale = 0.5, 2.0

    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    image = cv2.imdecode(np.frombuffer(image, dtype=np.uint8), cv2.IMREAD_COLOR)
    label = cv2.imdecode(np.frombuffer(label, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)

    # flip images
    if random.random() < 0.5:
        flipped_img = image[:, ::-1, :]
        flipped_lbl = label[:, ::-1]
    else:
        flipped_img = image
        flipped_lbl = label

    # scale images
    h, w = flipped_img.shape[0], flipped_img.shape[1]
    random_scale = random.uniform(min_scale, max_scale)
    new_size = (int(round(w * random_scale)), int(round(h * random_scale)))
    scaled_img = cv2.resize(flipped_img, (new_size[0], new_size[1]), interpolation=cv2.INTER_CUBIC)
    scaled_lbl = cv2.resize(flipped_lbl, (new_size[0], new_size[1]), interpolation=cv2.INTER_NEAREST)

    new_h, new_w = scaled_img.shape[0], scaled_img.shape[1]
    pad_h, pad_w = max(0, crop_size[0] - new_h), max(0, crop_size[1] - new_w)
    if pad_h > 0 or pad_w > 0:
        pad_img = cv2.copyMakeBorder(scaled_img, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)
        pad_lbl = cv2.copyMakeBorder(scaled_lbl, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=ignore_label)
    else:
        pad_img = scaled_img
        pad_lbl = scaled_lbl

    # crop images
    h, w = pad_img.shape[0], pad_img.shape[1]
    i = random.randint(0, h - crop_size[0])
    j = random.randint(0, w - crop_size[1])
    cropped_img = pad_img[i:i + crop_size[0], j:j + crop_size[1], :]
    cropped_lbl = pad_lbl[i:i + crop_size[0], j:j + crop_size[1]]

    # normalization
    _img0 = cropped_img / 255.0
    _img1 = _img0 - mean
    _img2 = _img1 / std
    out_img = _img2.transpose((2, 0, 1))
    return out_img.astype(np.float32), cropped_lbl.astype(np.uint8)


def CityScapesDataset(mindrecord_file, process_option='train', ignore_label=255, crop_size=(769, 769),
                      num_shards=1, shard_id=None, shuffle=True):
    """CityScapesDataset"""
    if process_option == 'train':
        dataset = ds.MindDataset(mindrecord_file, columns_list=['image', 'label'], num_parallel_workers=2,
                                 num_shards=num_shards, shard_id=shard_id, shuffle=shuffle)
        preprocess = lambda _img, _lbl: train_preprocess(_img, _lbl, crop_size=crop_size, ignore_label=ignore_label)
    elif process_option == 'eval':
        dataset = ds.MindDataset(mindrecord_file, columns_list=['image', 'label'],
                                 num_parallel_workers=2, shuffle=shuffle)
        preprocess = normalize
    else:
        raise ValueError("Unknown option")

    dataset = dataset.map(operations=preprocess, input_columns=['image', 'label'],
                          output_columns=["image", "label"], num_parallel_workers=4)

    return dataset

"""
Dataset conversion
"""

import glob
import os
import random

import imageio
import luojianet_ms.dataset as ds
import numpy as np

# from utils import DataTransform


def populate_data_list(input_path, depth_path, gt_path, image_type):
    """
    populate the data list
    """
    image_list = glob.glob(os.path.join(input_path, f"*.{image_type}"))

    postfix = not os.path.exists(image_list[0].replace(input_path, depth_path))

    data_list = [
        {
            "data": image,
            "depth": image.replace(input_path, depth_path) + \
                     ("_depth_estimate.png" if postfix else ""),
            "gt": image.replace(input_path, gt_path) if gt_path else None
        } for image in image_list
    ]

    random.shuffle(data_list)
    return data_list


class DataLoader:
    """
    The dataloader
    """

    def __init__(self, input_path,
                 type_="train",
                 image_type="png",
                 patch_size=128,
                 transform=None):
        self.samples = []
        self._load_samples(input_path)
        print(">>> load sample success. <<< size is:", len(self.samples))
        # self.data_list = populate_data_list(
        # input_path, depth_path, gt_path, image_type=image_type)
        self.type_ = type_
        self.patch_size = patch_size
        if transform is None:
            transform = self.transform
        self._transform = transform

    def _load_samples(self, data_file_lst):
        data_paths = self.load_datafile_paths(data_file_lst)
        for data_path in data_paths:
            self.samples.append(data_path)

    def load_datafile_paths(self, data_file):
        if not os.path.exists(data_file):
            raise RuntimeError("data_file: {} not exit".format(data_file))

        if os.path.isfile(data_file):
            with open(data_file) as f:
                lines = f.readlines()
                for line in lines:
                    names = line.split()
                    assert len(names) > 1
                    image_path = os.path.join(os.path.dirname(data_file), names[0])
                    annot = names[1]
                    yield image_path, annot

    def read_image(self, uri, is_grayscale=False):
        """
        read an image and transfer it into [0,1] domain
        """
        image = imageio.imread(uri)
        image = image.transpose(2, 0, 1).astype(np.float32) if not is_grayscale else \
            image[np.newaxis].astype(np.float32)
        return image / 255.0

    def read_label(self, uri):
        """
        read an image and transfer it into [0,1] domain
        """
        image = imageio.imread(uri)
        image = image.astype(np.int32)
        # print('label.shape', image.shape)
        # image = image.astype(np.int32)
        return image


    def crop(self, *images):
        """
        return the crop result of an image list
        """
        h, w = images[0].shape[-2:]
        # print(h,w)
        # exit(0)
        start_h = np.random.randint(0, h - self.patch_size)
        start_w = np.random.randint(0, w - self.patch_size)

        result_list = [
            image[:, start_h:start_h + self.patch_size,
            start_w:start_w + self.patch_size].copy()
            for image in images
        ]
        return result_list

    def transform(self, *images):
        """
        data agmentation
        """
        result_list = [img for img in images]
        if random.random() > 0.5:
            result_list = [
                np.flip(img, axis=1)
                for img in result_list
            ]
        if random.random() > 0.5:
            result_list = [
                np.flip(img, axis=2)
                for img in result_list
            ]
        if random.random() > 0.5:
            result_list = [
                img.transpose((0, 2, 1))
                for img in result_list
            ]
        return result_list

    def __getitem__(self, index):
        image_path, label_path = self.samples[index]

        # image_path = data_dict["data"]
        # print('image_path: ', image_path)
        image = self.read_image(image_path)

        # print('image.shape: ', image.shape)

        # depth_path = data_dict["depth"]
        label = self.read_label(label_path)

        # if self.type_ == "test":
        # images = np.concatenate(
        # [image, image.copy(), image.copy(), depth], axis=0)
        # return (images,)

        # gt_path = data_dict["gt"]
        # gt = self.read_image(gt_path)

        # image_patch, label_patch = self.crop(image, label)

        # image, label = self._transform(image, label)

        # images = np.concatenate(
        # [image, image.copy(), image.copy(), depth], axis=0)

        return image, label

    def __len__(self):
        return len(self.samples)


def make_dataset(input_path,
                 type_="train",
                 image_type="png",
                 patch_size=512,
                 transform=None,
                 batch_size=4,
                 shuffle_size=10,
                 num_parallel_workers=8):
    """
    return a luojianet_ms dataset
    """
    if type_ == 'train':
        dataset_path = os.path.join(input_path, "train.txt")
    else:
        dataset_path = os.path.join(input_path, "val.txt")

    dataset = DataLoader(dataset_path,
                         type_=type_,
                         image_type=image_type,
                         patch_size=patch_size,
                         transform=transform)
    if type_ == "train" or "eval":
        train_dataset = ds.GeneratorDataset(
            dataset, ['input', 'gt'], num_parallel_workers=num_parallel_workers)

        # train_dataset = train_dataset.map(
        # operations=[DataTransform()], input_columns=['input'])

        # train_dataset = train_dataset.shuffle(buffer_size=shuffle_size)
        train_dataset = train_dataset.batch(batch_size)
        return train_dataset
    if type_ == "test":
        test_dataset = ds.GeneratorDataset(
            dataset, ['input'], num_parallel_workers=num_parallel_workers)

        # test_dataset = test_dataset.map(
        #     operations=[DataTransform()], input_columns=['input'])
        # test_dataset = test_dataset.batch(batch_size)
        return test_dataset
    return None
