# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""the module is used to process images."""

import copy

from numpy import random
import numpy as np
import cv2
from PIL import Image

from mindspore.dataset.vision.c_transforms import Resize as Resize_
from mindspore.dataset.vision.c_transforms import Normalize as Normalize_

from datasets.pipelines.formatting import Collect
from datasets.utils.postprocess import (choose_candidate_by_constraints,
                                        correct_bbox_by_candidates, rand_init)

from mindvision.common.dataset.builder import build_transforms
from mindvision.common.utils.class_factory import ClassFactory, ModuleType


@ClassFactory.register(ModuleType.PIPELINE)
class RandomExpand:
    """Expand operation for image."""

    def __init__(self,
                 mean=(0, 0, 0),
                 to_rgb=True,
                 ratio_range=(1, 4)):
        if to_rgb:
            self.mean = mean[::-1]
        else:
            self.mean = mean
        self.min_ratio, self.max_ratio = ratio_range

    def __call__(self, results):
        if random.randint(2):
            return results

        img = results.get("image")
        boxes = results.get("bboxes")

        h, w, c = img.shape
        ratio = random.uniform(self.min_ratio, self.max_ratio)
        expand_img = np.full((int(h * ratio), int(w * ratio), c),
                             self.mean).astype(img.dtype)

        left = int(random.uniform(0, w * ratio - w))
        top = int(random.uniform(0, h * ratio - h))
        expand_img[top:top + h, left:left + w] = img

        img = expand_img
        boxes += np.tile((left, top), 2)

        results['image'] = img
        results['bboxes'] = boxes

        return results


@ClassFactory.register(ModuleType.PIPELINE)
class Resize(Resize_):
    """Resize operation for image."""

    def __init__(self, img_width, img_height):
        self.img_width = img_width
        self.img_height = img_height

    def __call__(self, results):
        img = results.get("image")
        gt_bboxes = results.get("bboxes")

        img_data = img
        img_data = cv2.resize(
            img_data, (self.img_width, self.img_height), interpolation=cv2.INTER_LINEAR)

        h, w = img_data.shape[:2]
        h_scale = self.img_height / h
        w_scale = self.img_width / w

        scale_factor = np.array(
            [w_scale, h_scale, w_scale, h_scale], dtype=np.float32)

        img_shape = (self.img_height, self.img_width, 1.0)
        img_shape = np.asarray(img_shape, dtype=np.float32)

        gt_bboxes = gt_bboxes * scale_factor

        gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
        gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)

        results['image'] = img_data
        results['image_shape'] = img_shape
        results['bboxes'] = gt_bboxes

        return results


@ClassFactory.register(ModuleType.PIPELINE)
class ResizeWithinMultiScales:
    """
    Crop an image randomly with bounding box constraints.
    """
    def __init__(self,
                 max_boxes,
                 jitter,
                 max_trial,
                 flip=0.5,
                 rgb=(128, 128, 128),
                 use_constraints=False):
        """Constructor for ResizeWithinMultiScales"""
        self.use_constraints = use_constraints
        self.max_boxes = max_boxes
        self.jitter = jitter
        self.max_trial = max_trial
        self.flip = flip
        self.rgb = rgb

    def __call__(self, results):
        image = copy.deepcopy(results['image'])
        if not isinstance(image, Image.Image):
            image = Image.fromarray(image)
        box = results['bboxes']
        image_h, image_w = results['image_shape']
        input_h, input_w = results['resize_size']

        np.random.shuffle(box)
        if len(box) > self.max_boxes:
            box = box[:self.max_boxes]
        flip = rand_init() < self.flip

        box_data = np.zeros((self.max_boxes, 4))

        candidates = choose_candidate_by_constraints(use_constraints=False,
                                                     max_trial=10,
                                                     input_w=input_w,
                                                     input_h=input_h,
                                                     image_w=image_w,
                                                     image_h=image_h,
                                                     jitter=self.jitter,
                                                     box=box)
        box_data, candidate = correct_bbox_by_candidates(candidates=candidates,
                                                         input_w=input_w,
                                                         input_h=input_h,
                                                         image_w=image_w,
                                                         image_h=image_h,
                                                         flip=flip,
                                                         box=box,
                                                         box_data=box_data,
                                                         allow_outside_center=True)
        dx, dy, nw, nh = candidate
        interp = get_interp_method(interp=10)
        image = image.resize((nw, nh), pil_image_reshape(interp))

        # place image, gray color as back graoud
        new_image = Image.new('RGB', (input_w, input_h), tuple(self.rgb))
        new_image.paste(image, (dx, dy))
        image = new_image
        image = np.array(image)
        results['image'] = image
        results['bboxes'] = box_data
        results['image_shape'] = np.array(image.shape[:2], np.int32)
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class PilResize:
    """Resize for Pil operation."""

    def __init__(self, resize_size, interp=9):
        """Constructor for PilResize."""
        self.output_h, self.output_w = resize_size
        self.interp = interp

    def __call__(self, results):
        """ Do resize. """
        image = results['image']
        if not isinstance(image, Image.Image):
            image = Image.fromarray(image)
        ori_w, ori_h = image.size
        interp = get_interp_method(
            interp=self.interp,
            sizes=(ori_h, ori_w, self.output_h, self.output_w)
        )

        image = image.resize(
            (self.output_w, self.output_h),
            pil_image_reshape(interp)
        )

        image = np.array(image)
        results['image'] = image
        results['image_shape'] = np.array(image.shape[:2], np.int32)
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class Rescale:
    """Rescale operation for image."""

    def __init__(self, img_width, img_height):
        self.img_width = img_width
        self.img_height = img_height

    def __call__(self, results):
        img = results.get("image")
        gt_bboxes = results.get("bboxes")

        img_data, scale_factor = rescale_with_scale(img, (self.img_width, self.img_height))

        if img_data.shape[0] > self.img_height:
            img_data, scale_factor2 = rescale_with_scale(img_data, (self.img_height, self.img_height))
            scale_factor = scale_factor * scale_factor2

        gt_bboxes = gt_bboxes * scale_factor
        gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_data.shape[1] - 1)
        gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_data.shape[0] - 1)

        pad_h = self.img_height - img_data.shape[0]
        pad_w = self.img_width - img_data.shape[1]
        assert ((pad_h >= 0) and (pad_w >= 0))

        pad_img_data = np.zeros((self.img_height, self.img_width, 3)).astype(img_data.dtype)
        pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data

        img_shape = (self.img_height, self.img_width, 1.0)
        img_shape = np.asarray(img_shape, dtype=np.float32)

        results['image'] = pad_img_data
        results['image_shape'] = img_shape
        results['bboxes'] = gt_bboxes

        return results


@ClassFactory.register(ModuleType.PIPELINE)
class RescaleWithoutGT:
    """Rescale operation for image."""

    def __init__(self, img_width, img_height):
        self.img_width = img_width
        self.img_height = img_height

    def __call__(self, results):
        img = results.get("image")
        img_shape = results.get("image_shape")

        img_data, scale_factor = rescale_with_scale(img, (self.img_width, self.img_height))

        if img_data.shape[0] > self.img_height:
            img_data, scale_factor2 = rescale_with_scale(img_data, (self.img_height, self.img_height))
            scale_factor = scale_factor * scale_factor2

        pad_h = self.img_height - img_data.shape[0]
        pad_w = self.img_width - img_data.shape[1]
        assert ((pad_h >= 0) and (pad_w >= 0))

        pad_img_data = np.zeros((self.img_height, self.img_width, 3)).astype(img_data.dtype)
        pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data

        img_shape = np.append(img_shape, (scale_factor, scale_factor))
        img_shape = np.asarray(img_shape, dtype=np.float32)

        results['image'] = pad_img_data
        results['image_shape'] = img_shape

        return results


@ClassFactory.register(ModuleType.PIPELINE)
class Normalize(Normalize_):
    """Imnormalize operation for image."""

    def __init__(self,
                 mean=None,
                 std=None,
                 to_rgb=True):
        if mean is None:
            mean = (123.675, 116.28, 103.53)
        if std is None:
            std = (58.395, 57.12, 57.375)
        self.mean = np.array(mean)
        self.std = np.array(std)
        self.to_rgb = to_rgb

    def __call__(self, results):
        img = results['image']
        img_data = img.copy().astype(np.float32)
        cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB, img_data)  # inplace
        cv2.subtract(img_data, np.float64(self.mean.reshape(1, -1)), img_data)  # inplace
        cv2.multiply(img_data, 1 / np.float64(self.std.reshape(1, -1)), img_data)  # inplace
        results['image'] = img_data
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class StaticNormalize:
    """Use for np.uint8."""
    def __init__(self,
                 statistic_norm=True,
                 mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225)):
        """Constructor for statistic_normalize_img."""
        self.statistic_norm = statistic_norm
        self.mean = mean
        self.std = std

    def __call__(self, results):
        # img: RGB
        img = results['image']
        if isinstance(img, Image.Image):
            img = np.array(img)
        img = img / 255.
        mean = np.array(self.mean)
        std = np.array(self.std)
        if self.statistic_norm:
            img = (img - mean) / std
        img = img.astype(np.float32)
        results['image'] = img
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class RandomFlip:
    """Random flip operation."""

    def __init__(self, flip_ratio):
        self.flip_ratio = flip_ratio

    def __call__(self, results):
        flip = (np.random.rand() < self.flip_ratio)
        if not flip:
            return results

        img = results.get("image")
        gt_bboxes = results.get("bboxes")

        # flip image
        img_data = img
        img_data = np.flip(img_data, axis=1)

        # flip bboxes
        flipped = gt_bboxes.copy()
        _, w, _ = img_data.shape
        # flip bboxes horizontal
        flipped[..., 0::4] = w - gt_bboxes[..., 2::4] - 1
        flipped[..., 2::4] = w - gt_bboxes[..., 0::4] - 1

        results['image'] = img_data
        results['bboxes'] = flipped

        return results


@ClassFactory.register(ModuleType.PIPELINE)
class RandomPilFlip:
    """Use PIL Apis to do flip operations.
    Args:
        direction : Only support "FLIP_LEFT_RIGHT" and "FLIP_TOP_BOTTOM".
    """

    def __init__(self, ratio=0.5, direction="FLIP_LEFT_RIGHT"):
        """Constructor for PilFilp."""
        if direction == "FLIP_LEFT_RIGHT":
            self.flip = Image.FLIP_LEFT_RIGHT
        else:
            self.flip = Image.FLIP_TOP_BOTTOM
        self.ratio = ratio

    def __call__(self, results):
        if rand_init() > self.ratio:
            return results
        image = results['image']
        if not isinstance(image, Image.Image):
            image = Image.fromarray(image)
        image.transpose(Image.FLIP_LEFT_RIGHT)
        image = np.array(image)
        results['image'] = image
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class ColorDistortion:
    """Color distortion.
    Args:
        hue: hue val.
        sat: saturation.
        val: value.
    """

    def __init__(self, hue, saturation, value):
        """Constructor for ColorDistortion."""
        self.hue = hue
        self.saturation = saturation
        self.value = value

    def __call__(self, results):
        image = results['image']
        hue = rand_init(-self.hue, self.hue)
        sat = rand_init(1, self.saturation) if rand_init() < .5 else 1 / rand_init(1, self.saturation)
        val = rand_init(1, self.value) if rand_init() < .5 else 1 / rand_init(1, self.value)

        x = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL)
        x = x / 255.
        x[..., 0] += hue
        x[..., 0][x[..., 0] > 1] -= 1
        x[..., 0][x[..., 0] < 0] += 1
        x[..., 1] *= sat
        x[..., 2] *= val
        x[x > 1] = 1
        x[x < 0] = 0
        x = x * 255.
        x = x.astype(np.uint8)
        image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB_FULL)
        results['image'] = image
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class ConvertGrayToColor:
    """ Convert gray image to color image."""
    def __init__(self):
        pass

    def __call__(self, results):
        """Convert gray 2 color."""
        image = results['image']
        if len(image.shape) == 2:
            image = np.expand_dims(image, axis=-1)
            image = np.concatenate([image, image, image], axis=-1)
            results['image'] = image
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class PerBatchCocoCollect:
    """ Collect for yolo """
    def __init__(self):
        pass

    def __call__(self, data_tuple):
        image = data_tuple[0]
        anno = data_tuple[1]
        resize_size = data_tuple[2]
        gt_box = anno[:, :4]
        gt_label = anno[:, 4]
        image_shape = np.array(image.shape[:2], np.int32)
        results = {
            'image': image,
            'annotation': anno,
            'image_shape': image_shape,
            'bboxes': gt_box,
            'labels': gt_label,
            'resize_size': resize_size
        }
        return results


@ClassFactory.register(ModuleType.PIPELINE)
class YoloBboxPreprocess:
    """ Data bbox preprocess for yolo."""

    def __init__(self,
                 anchors,
                 anchor_mask,
                 num_classes,
                 label_smooth,
                 label_smooth_factor,
                 iou_threshold,
                 max_boxes):
        """Constructor for YoloCollate."""
        self.anchors = anchors
        self.num_classes = num_classes
        self.label_smooth = label_smooth
        self.label_smooth_factor = label_smooth_factor
        self.iou_threshold = iou_threshold
        self.max_boxes = max_boxes
        self.anchor_mask = anchor_mask

    def __call__(self, results):
        anchors = np.array(self.anchors)
        num_layers = anchors.shape[0] // 3
        bboxes = results['bboxes'].tolist()
        labels = results['labels'].tolist()
        extend_labels = [0] * (len(bboxes) - len(labels))
        labels = labels + extend_labels
        true_boxes = []
        for bbox, label in zip(bboxes, labels):
            tmp = []
            tmp.extend(bbox)
            tmp.append(int(label))
            # tmp [x_min y_min x_max y_max, label]
            true_boxes.append(tmp)
        true_boxes = np.array(true_boxes, dtype='float32')
        input_shape = np.array(results['image_shape'], dtype='int32')
        boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.
        # trans to box center point
        boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]

        true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
        true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]

        grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]
        y_true = [np.zeros((grid_shapes[layer][0],
                            grid_shapes[layer][1],
                            len(self.anchor_mask[layer]),
                            5 + self.num_classes), dtype='float32') for layer in range(num_layers)]
        anchors = np.expand_dims(anchors, 0)
        anchors_max = anchors / 2.
        anchors_min = -anchors_max
        valid_mask = boxes_wh[..., 0] > 0

        wh = boxes_wh[valid_mask]
        if wh.size != 0:
            wh = np.expand_dims(wh, -2)
            # move to original point to compare, and choose the best layer-anchor to set
            boxes_max = wh / 2.
            boxes_min = -boxes_max

            intersect_min = np.maximum(boxes_min, anchors_min)
            intersect_max = np.minimum(boxes_max, anchors_max)
            intersect_wh = np.maximum(intersect_max - intersect_min, 0.)
            intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
            box_area = wh[..., 0] * wh[..., 1]
            anchor_area = anchors[..., 0] * anchors[..., 1]
            iou = intersect_area / (box_area + anchor_area - intersect_area)

            y_true = self.get_best_anchor_boxes(iou, num_layers, grid_shapes, true_boxes, y_true)

        pad_gt_box0, pad_gt_box1, pad_gt_box2 = self.pad_gt_boxes(y_true)

        results['bbox1'] = y_true[0]
        results['bbox2'] = y_true[1]
        results['bbox3'] = y_true[2]
        results['gt_box1'] = pad_gt_box0
        results['gt_box2'] = pad_gt_box1
        results['gt_box3'] = pad_gt_box2
        return results

    def get_best_anchor_boxes(self, iou, num_layers, grid_shapes, true_boxes, y_true):
        """Get best anchor boxes."""
        best_anchor = np.argmax(iou, axis=-1)
        for t, n in enumerate(best_anchor):
            for layer in range(num_layers):
                if n in self.anchor_mask[layer]:
                    i = np.floor(true_boxes[t, 0] * grid_shapes[layer][1]).astype('int32')  # grid_y
                    j = np.floor(true_boxes[t, 1] * grid_shapes[layer][0]).astype('int32')  # grid_x

                    k = self.anchor_mask[layer].index(n)
                    c = true_boxes[t, 4].astype('int32')
                    y_true[layer][j, i, k, 0:4] = true_boxes[t, 0:4]
                    y_true[layer][j, i, k, 4] = 1.

                    if self.label_smooth:
                        sigma = self.label_smooth_factor / (self.num_classes - 1)
                        y_true[layer][j, i, k, 5:] = sigma
                        y_true[layer][j, i, k, 5 + c] = 1 - self.label_smooth_factor
                    else:
                        y_true[layer][j, i, k, 5 + c] = 1.

        threshold_anchor = (iou > self.iou_threshold)
        for t in range(threshold_anchor.shape[0]):
            for n in range(threshold_anchor.shape[1]):
                if not threshold_anchor[t][n]:
                    continue
                for layer in range(num_layers):
                    if n not in self.anchor_mask[layer]:
                        continue

                    i = np.floor(true_boxes[t, 0] * grid_shapes[layer][1]).astype('int32')  # grid_y
                    j = np.floor(true_boxes[t, 1] * grid_shapes[layer][0]).astype('int32')  # grid_x

                    k = self.anchor_mask[layer].index(n)
                    c = true_boxes[t, 4].astype('int32')
                    y_true[layer][j, i, k, 0:4] = true_boxes[t, 0:4]
                    y_true[layer][j, i, k, 4] = 1.

                    if self.label_smooth:
                        sigma = self.label_smooth_factor / (self.num_classes - 1)
                        y_true[layer][j, i, k, 5:] = sigma
                        y_true[layer][j, i, k, 5 + c] = 1 - self.label_smooth_factor
                    else:
                        y_true[layer][j, i, k, 5 + c] = 1.
        return y_true

    def pad_gt_boxes(self, y_true):
        """Pad ground truth bboxes for avoiding dynamic shape."""
        pad_gt_box0 = np.zeros(shape=[self.max_boxes, 4], dtype=np.float32)
        pad_gt_box1 = np.zeros(shape=[self.max_boxes, 4], dtype=np.float32)
        pad_gt_box2 = np.zeros(shape=[self.max_boxes, 4], dtype=np.float32)

        mask0 = np.reshape(y_true[0][..., 4:5], [-1])
        gt_box0 = np.reshape(y_true[0][..., 0:4], [-1, 4])
        gt_box0 = gt_box0[mask0 == 1]
        if gt_box0.shape[0] < self.max_boxes:
            pad_gt_box0[:gt_box0.shape[0]] = gt_box0
        else:
            pad_gt_box0 = gt_box0[:self.max_boxes]

        mask1 = np.reshape(y_true[1][..., 4:5], [-1])
        gt_box1 = np.reshape(y_true[1][..., 0:4], [-1, 4])
        gt_box1 = gt_box1[mask1 == 1]
        if gt_box1.shape[0] < self.max_boxes:
            pad_gt_box1[:gt_box1.shape[0]] = gt_box1
        else:
            pad_gt_box1 = gt_box1[:self.max_boxes]

        mask2 = np.reshape(y_true[2][..., 4:5], [-1])
        gt_box2 = np.reshape(y_true[2][..., 0:4], [-1, 4])

        gt_box2 = gt_box2[mask2 == 1]
        if gt_box2.shape[0] < self.max_boxes:
            pad_gt_box2[:gt_box2.shape[0]] = gt_box2
        else:
            pad_gt_box2 = gt_box2[:self.max_boxes]

        return [pad_gt_box0, pad_gt_box1, pad_gt_box2]


@ClassFactory.register(ModuleType.PIPELINE)
class PerBatchMap:
    """Batch data preprocess map.
    Args:
        pipeline : preprocess config.
    """

    def __init__(self, out_orders, multi_scales=None, output_type_dict=None,
                 pipeline=None):
        """Constructor for PerBatchMap."""
        self.out_orders = out_orders
        self.multi_scales = multi_scales
        self.output_type_dict = output_type_dict
        self.preprocess_pipeline = pipeline
        if self.preprocess_pipeline is not None:
            self.opts = build_transforms(self.preprocess_pipeline)

    def __call__(self, imgs, anno, x1, x2, x3, x4, x5, batch_info):
        """Preprocess pipeline for image."""
        if self.preprocess_pipeline is not None:
            args = (imgs, anno, x1, x2, x3, x4, x5, batch_info)
            return self.data_augment(*args)

        return None

    def tuple_batch(self, result_dict):
        """Transform tuple type for batch results."""
        collect = Collect(self.out_orders, self.output_type_dict)
        return collect(result_dict)

    def data_augment(self, *args):
        """Data augment for dataset."""
        if self.preprocess_pipeline is None:
            return args

        images = args[0]
        annos = args[1]
        result_dict = {}
        resize_size = random.choice(self.multi_scales)

        for img, anno in zip(images, annos):
            results = (img, anno, resize_size)
            for opt in self.opts:
                results = opt(results)
            merge_batch(results, result_dict)

        return self.tuple_batch(result_dict)


def merge_batch(results, result_dict):
    """Merge batch results."""
    for k in results:
        if k in result_dict:
            result_dict[k].append(results[k])
        else:
            result_dict[k] = [results[k]]


def pil_image_reshape(interp):
    """Reshape pil image."""
    reshape_type = {
        0: Image.NEAREST,
        1: Image.BILINEAR,
        2: Image.BICUBIC,
        3: Image.NEAREST,
        4: Image.LANCZOS,
    }
    return reshape_type[interp]


def get_interp_method(interp, sizes=()):
    """
    Get the interpolation method for resize functions.
    The major purpose of this function is to wrap a random interp method selection
    and a auto-estimation method.

    Note:
        When shrinking an image, it will generally look best with AREA-based
        interpolation, whereas, when enlarging an image, it will generally look best
        with Bicubic or Bilinear.

    Args:
        interp (int): Interpolation method for all resizing operations.

            - 0: Nearest Neighbors Interpolation.
            - 1: Bilinear interpolation.
            - 2: Bicubic interpolation over 4x4 pixel neighborhood.
            - 3: Nearest Neighbors. Originally it should be Area-based, as we cannot find Area-based,
              so we use NN instead. Area-based (resampling using pixel area relation).
              It may be a preferred method for image decimation, as it gives moire-free results.
              But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default).
            - 4: Lanczos interpolation over 8x8 pixel neighborhood.
            - 9: Cubic for enlarge, area for shrink, bilinear for others.
            - 10: Random select from interpolation method mentioned above.

        sizes (tuple): Format should like (old_height, old_width, new_height, new_width),
            if None provided, auto(9) will return Area(2) anyway. Default: ()

    Returns:
        int, interp method from 0 to 4.
    """
    if interp == 9:
        if sizes:
            assert len(sizes) == 4
            oh, ow, nh, nw = sizes
            if nh > oh and nw > ow:
                return 2
            if nh < oh and nw < ow:
                return 0
            return 1
        return 2
    if interp == 10:
        return random.randint(0, 4)
    if interp not in (0, 1, 2, 3, 4):
        raise ValueError('Unknown interp method %d.' % interp)
    return interp


def rescale_with_scale(img, scale):
    """Rescale image with scale value."""
    h, w = img.shape[:2]
    scale_factor = min(max(scale) / max(h, w), min(scale) / min(h, w))
    new_size = int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5)
    rescaled_img = cv2.resize(img, new_size, interpolation=cv2.INTER_LINEAR)

    return rescaled_img, scale_factor
