# Copyright (c) Ruopeng Gao. All Rights Reserved.

import torch
import torch.nn as nn
from PIL import Image
from torchvision.transforms import v2, InterpolationMode

from torch.utils.data import Dataset
from utils.nested_tensor import nested_tensor_from_tensor_list


class ResizeShortestEdge(nn.Module):
    """Match the shorter edge to a target length while clamping the longer edge if needed."""

    def __init__(
            self,
            max_shorter: int,
            max_longer: int | None = None,
            interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    ):
        super().__init__()
        self.max_shorter = max_shorter
        self.max_longer = max_longer if (max_longer is None or max_longer > 0) else None
        self.interpolation = interpolation

    @staticmethod
    def _get_hw(image):
        if isinstance(image, torch.Tensor):
            return image.shape[-2], image.shape[-1]
        if isinstance(image, Image.Image):
            width, height = image.size
            return height, width
        raise TypeError(f"Unsupported image type {type(image)} for resizing.")

    def forward(self, image):
        curr_h, curr_w = self._get_hw(image)
        if curr_h == 0 or curr_w == 0:
            return image

        min_hw, max_hw = (curr_h, curr_w) if curr_h <= curr_w else (curr_w, curr_h)
        new_shorter = float(self.max_shorter)

        if self.max_longer is not None:
            projected_longer = max_hw / min_hw * new_shorter
            if projected_longer > self.max_longer:
                # Reduce the target shorter edge so that the longer edge stays within bounds.
                new_shorter = max(1.0, self.max_longer * min_hw / max_hw)

        if curr_w < curr_h:
            new_w = int(round(new_shorter))
            new_h = max(1, int(round(new_shorter * curr_h / curr_w)))
        else:
            new_h = int(round(new_shorter))
            new_w = max(1, int(round(new_shorter * curr_w / curr_h)))

        return v2.functional.resize(
            image,
            size=[new_h, new_w],
            interpolation=self.interpolation,
        )


class SeqDataset(Dataset):
    def __init__(
            self,
            seq_info,
            image_paths,
            max_shorter: int = 800,
            max_longer: int = 1536,
            size_divisibility: int = 0,
            dtype=torch.float32,
    ):
        self.seq_info = seq_info
        self.image_paths = image_paths
        self.max_shorter = max_shorter
        self.max_longer = max_longer
        self.size_divisibility = size_divisibility
        self.dtype = dtype

        self.transform = v2.Compose([
            ResizeShortestEdge(
                max_shorter=self.max_shorter,
                max_longer=self.max_longer,
                interpolation=InterpolationMode.BILINEAR,
            ),
            v2.ToImage(),
            v2.ToDtype(torch.float32, scale=True),
            v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        return

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, item):
        image = self._load(self.image_paths[item])
        transformed_image = self.transform(image)
        if self.dtype != torch.float32:
            transformed_image = transformed_image.to(self.dtype)
        transformed_image = nested_tensor_from_tensor_list([transformed_image], self.size_divisibility)
        return transformed_image, self.image_paths[item]

    def seq_hw(self):
        return self.seq_info["height"], self.seq_info["width"]

    @staticmethod
    def _load(path):
        image = Image.open(path)
        return image
