from copy import deepcopy
from typing import Tuple

import numpy as np
from torchvision.transforms.functional import resize, to_pil_image


def apply_coords(coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
    return coords


class ResizeLongestSide:
    """
    将图像调整为最长边为 “target_length” 的尺寸，同时提供调整coordinates（坐标）和box（边框）的方法。
    另外，还提供了针对 numpy 数组和批处理 torch 张量的转换方法。
    """

    def __init__(self, target_length: int):
        self.target_length = target_length

    def apply_image(self, image: np.ndarray) -> np.ndarray:
        """
        将图像按比例缩放到目标尺寸，图像较长的那一边会达到target_length的长度
        """
        target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
        # to_pil_image方法将 numpy 数组或 torch 张量转换为 PIL 图像格式，便于后续的图像处理或可视化操作
        # resize 方法会对图像进行插值，确保在缩放过程中尽量保留图像质量
        return np.array(resize(to_pil_image(image), target_size))

    def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
        """
        图像按比例缩放后，原本对应的坐标也要调整
        """
        old_h, old_w = original_size
        new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length)
        coords = deepcopy(coords).astype(float)
        coords[..., 0] = coords[..., 0] * (new_w / old_w)
        coords[..., 1] = coords[..., 1] * (new_h / old_h)
        return coords

    @staticmethod
    def get_preprocess_shape(old_h: int, old_w: int, long_side_length: int) -> Tuple[int, int]:
        """
        根据给定的long_side_length进行缩放
        """
        scale = long_side_length * 1.0 / max(old_h, old_w)
        new_h, new_w = old_h * scale, old_w * scale
        new_w = int(new_w + 0.5)
        new_h = int(new_h + 0.5)
        return new_h, new_w
