import random

from PIL import Image
import cv2

class Compose(object):
    def __init__(self, transforms):
        self.transforms = transforms

    def __call__(self, img, mask, manual_random=None):
        assert img.size == mask.size
        for t in self.transforms:
            img, mask = t(img, mask, manual_random)
        return img, mask


class RandomHorizontallyFlip(object):
    def __call__(self, img, mask, manual_random=None):
        if manual_random is None:
            if random.random() < 0.5:
                return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
            return img, mask
        else:
            if manual_random < 0.5:
                return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
            return img, mask


class Resize(object):
    def __init__(self, size):
        self.size = tuple(reversed(size))  # size: (h, w)

    def __call__(self, img, mask, manual_random=None):
        assert img.size == mask.size
        return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST)


# class RandomCrop(torch.nn.Module):
#     """Crop the given image at a random location.
#     If the image is torch Tensor, it is expected
#     to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,
#     but if non-constant padding is used, the input is expected to have at most 2 leading dimensions

#     Args:
#         size (sequence or int): Desired output size of the crop. If size is an
#             int instead of sequence like (h, w), a square crop (size, size) is
#             made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
#         padding (int or sequence, optional): Optional padding on each border
#             of the image. Default is None. If a single int is provided this
#             is used to pad all borders. If sequence of length 2 is provided this is the padding
#             on left/right and top/bottom respectively. If a sequence of length 4 is provided
#             this is the padding for the left, top, right and bottom borders respectively.

#             .. note::
#                 In torchscript mode padding as single int is not supported, use a sequence of
#                 length 1: ``[padding, ]``.
#         pad_if_needed (boolean): It will pad the image if smaller than the
#             desired size to avoid raising an exception. Since cropping is done
#             after padding, the padding seems to be done at a random offset.
#         fill (number or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
#             length 3, it is used to fill R, G, B channels respectively.
#             This value is only used when the padding_mode is constant.
#             Only number is supported for torch Tensor.
#             Only int or tuple value is supported for PIL Image.
#         padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
#             Default is constant.

#             - constant: pads with a constant value, this value is specified with fill

#             - edge: pads with the last value at the edge of the image.
#               If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2

#             - reflect: pads with reflection of image without repeating the last value on the edge.
#               For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
#               will result in [3, 2, 1, 2, 3, 4, 3, 2]

#             - symmetric: pads with reflection of image repeating the last value on the edge.
#               For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
#               will result in [2, 1, 1, 2, 3, 4, 4, 3]
#     """

#     @staticmethod
#     def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
#         """Get parameters for ``crop`` for a random crop.

#         Args:
#             img (PIL Image or Tensor): Image to be cropped.
#             output_size (tuple): Expected output size of the crop.

#         Returns:
#             tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
#         """
#         _, h, w = F.get_dimensions(img)
#         th, tw = output_size

#         if h + 1 < th or w + 1 < tw:
#             raise ValueError(f"Required crop size {(th, tw)} is larger then input image size {(h, w)}")

#         if w == tw and h == th:
#             return 0, 0, h, w

#         i = torch.randint(0, h - th + 1, size=(1,)).item()
#         j = torch.randint(0, w - tw + 1, size=(1,)).item()
#         return i, j, th, tw

#     def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
#         super().__init__()
#         _log_api_usage_once(self)

#         self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))

#         self.padding = padding
#         self.pad_if_needed = pad_if_needed
#         self.fill = fill
#         self.padding_mode = padding_mode

#     def forward(self, img):
#         """
#         Args:
#             img (PIL Image or Tensor): Image to be cropped.

#         Returns:
#             PIL Image or Tensor: Cropped image.
#         """
#         if self.padding is not None:
#             img = F.pad(img, self.padding, self.fill, self.padding_mode)

#         _, height, width = F.get_dimensions(img)
#         # pad the width if needed
#         if self.pad_if_needed and width < self.size[1]:
#             padding = [self.size[1] - width, 0]
#             img = F.pad(img, padding, self.fill, self.padding_mode)
#         # pad the height if needed
#         if self.pad_if_needed and height < self.size[0]:
#             padding = [0, self.size[0] - height]
#             img = F.pad(img, padding, self.fill, self.padding_mode)

#         i, j, h, w = self.get_params(img, self.size)

#         return F.crop(img, i, j, h, w)

#     def __repr__(self) -> str:
#         return f"{self.__class__.__name__}(size={self.size}, padding={self.padding})"
