# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Video transforms functions."""

import inspect
import random
import numbers
import cv2
import numpy as np
from mindspore import Tensor
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as PY
import mindspore.dataset.transforms.py_transforms as PY_trans
from mindvideo.common.utils.class_factory import ClassFactory, ModuleType


def register_builtin_transforms():
    """ register MindSpore builtin dataset class. """
    for module_name in set(dir(C) + dir(PY)):
        if module_name.startswith('__'):
            continue
        transforms = getattr(C, module_name, None) \
            if getattr(C, module_name, None) \
            else getattr(PY, module_name)
        if inspect.isclass(transforms):
            ClassFactory.register_cls(transforms, ModuleType.PIPELINE)


@ClassFactory.register(ModuleType.PIPELINE)
class Reshape(PY_trans.PyTensorOperation):
    """
    Reshape data.
    Args:
        shape(tuple), shape of output.
    Eaxamples:
        >>> #  Reshape the video shape.
        >>> transforms_list1 = [transform.Reshape(16,224,224,3)]
        >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
        ...                                                 input_columns=["video"])
    """

    def __init__(self, shape):
        self.shape = shape

    def __call__(self, x):
        return x.reshape(self.shape)


@ClassFactory.register(ModuleType.PIPELINE)
class VideoRescale(PY_trans.PyTensorOperation):
    """
    Rescale the input video frames with the given rescale and shift. This operator will rescale the input video
    with: output = image * rescale + shift.
    Args:
        rescale (float): Rescale factor.
        shift (float): Shift factor.
    Eaxamples:
        >>> #  Rescale the input video frames with the given rescale and shift.
        >>> transforms_list1 = [transform.VideoRescale(0.5,0.5)]
        >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
        ...                                                 input_columns=["video"])
    """

    def __init__(self, rescale=1 / 255.0, shift=-1):
        self.rescale = rescale
        self.shift = shift

    def __call__(self, x):
        """
        Args:
          Video(list): Video to be rescaled.
        Returns:
          seq video: Rescaled of seq video.
        """
        return x * self.rescale + self.shift


@ClassFactory.register(ModuleType.PIPELINE)
class ReOrder(PY_trans.PyTensorOperation):
    """
    Rearrange the order of dims of data.
    Args:
        new_order(tuple), new_order of output.
    Eaxamples:
        >>> #  Convert the input video frames in type numpy
        >>> transforms_list1 = [transform.VideoToTensor((3,0,1,2))]
        >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
        ...                                                 input_columns=["video"])
    """

    def __init__(self, order):
        self.order = tuple(order)

    def __call__(self, x):
        """
        Args:
          Video(list): Video to be reordered.
        Returns:
          seq video: Reordered of seq video.
        """
        if isinstance(x, np.ndarray):
            return np.transpose(x, self.order)
        if isinstance(x, Tensor):
            return x.transpose(x, self.order)
        raise AssertionError("""The type of input should be np.array or
                                    mindspore.Tensor but got {}."""
                             .format(type(x).__name__))


@ClassFactory.register(ModuleType.PIPELINE)
class VideoToTensor(PY_trans.PyTensorOperation):
    """
    Convert the input video frames in type numpy.ndarray of shape (T, H, W, C)
    in the range [0, 255] to numpy.ndarray of shape (C, T, H, W)  in the range
    [-1.0, 1.0] with the desired dtype.
    Args:
       new_order(tuple), new_order of output.
    Eaxamples:
      >>> #  Convert the input video frames in type numpy
      >>> transforms_list1 = [transform.VideoToTensor((3,0,1,2))]
      >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
      ...                                                 input_columns=["video"])
   """

    def __init__(self, order=(3, 0, 1, 2)):
        self.order = tuple(order)

    def __call__(self, x):
        """
        Args:
           Video(list): Video to be tensor.
        Returns:
           seq video: Tensor of seq video.
        """
        if isinstance(x, np.ndarray):
            x = x / 255.0
            x = x * 2 - 1.0
            return np.transpose(x, self.order).astype(np.float32)
        raise AssertionError("""The type of input should be numpy.ndarray
                                                but got {}.""".format(type(x).__name__))


@ClassFactory.register(ModuleType.PIPELINE)
class VideoResize(PY_trans.PyTensorOperation):
    """
    Resize the given video sequences (t x h x w x c) at the given size.
    Args:
       size(tuple or int): Desired output size after resize.
    Eaxamples:
      >>> # Resize the given video sequences to a square.
      >>> transforms_list1 = [transform.VideoShortEdgeResize((80))]
      >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
      ...                                                 input_columns=["video"])
      >>> # Resize the given video sequences to portrait style.
      >>> transforms_list1 = [transform.VideoShortEdgeResize((80,120))]
      >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
      ...                                                 input_columns=["video"])
   """

    def __init__(self, size, interpolation="bilinear"):
        self.size = size
        self.inter = interpolation
        self._opencv_mindspore_interpolation_map = {
            "nearest": cv2.INTER_NEAREST,
            "linear": cv2.INTER_LINEAR,
            "bilinear": cv2.INTER_AREA,
            "bicubic": cv2.INTER_CUBIC,
        }

    def __call__(self, x):
        """
        Args:
           Video(list): Video to be resized.
        Returns:
           seq video: ResizeD seq video.
        """
        resized_img_array_list = [
            cv2.resize(
                img_array,
                self.size,  # The input order for OpenCV is w, h.
                interpolation=self._opencv_mindspore_interpolation_map[self.inter],
            )
            for img_array in x
        ]
        img_array = np.concatenate(
            [np.expand_dims(arr, axis=0) for arr in resized_img_array_list],
            axis=0,
        )
        return img_array


@ClassFactory.register(ModuleType.PIPELINE)
class VideoShortEdgeResize(PY_trans.PyTensorOperation):
    """
    Resize the given video sequences (t, h, w, x, c) at the given size.
    And make sure the smallest dimension in (h, w) is 256 pixels.
    Args:
       size(int): Desired output size after resize.
    Eaxamples:
       >>> # Resize the given video sequences
       >>> transforms_list1 = [transform.VideoShortEdgeResize((80))]
       >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
       ...                                                 input_columns=["video"])
    """

    def __init__(self, size, interpolation="bilinear"):
        self.size = size
        self.inter = interpolation
        self._opencv_mindspore_interpolation_map = {
            "nearest": cv2.INTER_NEAREST,
            "linear": cv2.INTER_LINEAR,
            "bilinear": cv2.INTER_AREA,
            "bicubic": cv2.INTER_CUBIC,
        }

    def __call__(self, x):
        """
        Args:
            Video(list): Video to be resized.
        Returns:
            seq video: ResizeD seq video.
        """
        _, h, w, _ = x.shape
        if h < w:
            scale = self.size * 1.0 / h
        else:
            scale = self.size * 1.0 / w
        new_size = (int(scale * h), int(scale * w))
        # print(self.size)
        resized_img_array_list = [
            cv2.resize(
                img_array,
                new_size,  # The input order for OpenCV is w, h.
                interpolation=self._opencv_mindspore_interpolation_map[self.inter],
            )
            for img_array in x
        ]
        img_array = np.concatenate(
            [np.expand_dims(arr, axis=0) for arr in resized_img_array_list],
            axis=0,
        )
        return img_array


@ClassFactory.register(ModuleType.PIPELINE)
class VideoRandomCrop(PY_trans.PyTensorOperation):
    """
    Crop the given video sequences (t x h x w x c) at a random location.

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
        int instead of sequence like (h, w), a square crop (size, size) is
        made.
    Eaxamples:
       >>> # Randomly crop the given video at a random location.
       >>> transforms_list1 = [transform.VideoRandomCrop((120,120))]
       >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
       ...                                                 input_columns=["video"])
    """

    def __init__(self, size):
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = tuple(size)

    @staticmethod
    def get_params(img, output_size):
        """
        Get parameters for ``crop`` for a random crop.

        Args:
            img (PIL Image): Image to be cropped.
            output_size (tuple): Expected output size of the crop.
        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
        """
        _, h, w, _ = img.shape
        th, tw = output_size
        if w == tw and h == th:
            return 0, 0, h, w

        i = random.randint(0, h - th) if h != th else 0
        j = random.randint(0, w - tw) if w != tw else 0
        return i, j, th, tw

    def __call__(self, video):
        """
        Args:
            Video(list): Video to be cropped.
        Returns:
            seq video: Randomly cropped seq video.
        """

        i, j, h, w = self.get_params(video, self.size)

        video = video[:, i:i + h, j:j + w, :]
        return video

    def __repr__(self):
        return self.__class__.__name__ + '(size={0})'.format(self.size)


@ClassFactory.register(ModuleType.PIPELINE)
class VideoCentorCrop(PY_trans.PyTensorOperation):
    """
    Crop each frame of the input video at the center to the given size.
    If input frame of video size is smaller than output size,
    input video will be padded with 0 before cropping.

    Args:
       size (Union[int, sequence]): The output size of the cropped image.
           If size is an integer, a square crop of size (size, size) is returned.
           If size is a sequence of length 2, it should be (height, width).Default:(224,224)

    Eaxamples:
       >>> # crop video frame to a square
       >>> transforms_list1 = [transform.CenterCrop(50)]
       >>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list1,
       ...                                                 input_columns=["video"])
       >>> # crop imge to portrait style
       >>> transforms_list2 = [transform.CenterCrop((60, 40))]
       >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list2,
       ...                                                     input_columns=["video"])
    """

    def __init__(self, size=(224, 224)):
        self.size = size

    def __call__(self, video):
        """
        Args:
            Video(list): Video to be cropped.
        Returns:
            seq video: Cropped seq video.
        """
        _, h, w, _ = video.shape
        th, tw = self.size
        i = int(np.round((h - th) / 2.))
        j = int(np.round((w - tw) / 2.))

        return video[:, i:i + th, j:j + tw, :]


@ClassFactory.register(ModuleType.PIPELINE)
class VideoRandomHorizontalFlip(PY_trans.PyTensorOperation):
    """Flip every frame of the video with a given probability.
    Args:
        p (float): probability of the image being flipped. Default value is 0.5'
    Eaxamples:
       >>> # Flip every frame of the video
       >>> transforms_list1 = [transform.VideoRandomHorizontalFlip(0.3)]
       >>> video_folder_dataset = video_folder_dataset_1.map(operations=transforms_list1,
       ...                                                 input_columns=["video"])
    """

    def __init__(self, prob=0.5):
        self.p = prob

    def __call__(self, video):
        """
        Args:
            video (seq Images): seq video to be flipped.
        Returns:
            seq video: Randomly flipped seq video.
        """
        if random.random() < self.p:
            # t x  h  x w
            return np.flip(video, axis=2).copy()
        return video
