import os
import os.path
from typing import Callable, Optional

import mmcv
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from torch.utils.data import DataLoader
from torchvision.datasets import VisionDataset


def resize_and_pad(image: np.ndarray, allow_scale_up: bool = False):
    """ 
        Resize single image to 640*640 with pad on side.
    """
    EXCEPT_W = 640
    EXCEPT_H = 640
    image_shape = image.shape[:2]  # height, width

    # Scale ratio (new / old)
    ratio = min(EXCEPT_H / image_shape[0], EXCEPT_W / image_shape[1])

    # only scale down, do not scale up (for better test mAP)
    if not allow_scale_up:
        ratio = min(ratio, 1.0)

    ratio = [ratio, ratio]  # float -> (float, float) for (height, width)

    # compute the best size of the image
    no_pad_shape = (int(round(image_shape[0] * ratio[0])),
                    int(round(image_shape[1] * ratio[1])))

    # padding height & width
    padding_h, padding_w = [EXCEPT_H - no_pad_shape[0], EXCEPT_W - no_pad_shape[1]]

    if image_shape != no_pad_shape:
        # compare with no resize and padding size
        image = mmcv.imresize(image, (no_pad_shape[1], no_pad_shape[0]))

    # padding
    top_padding = 0
    left_padding = 0
    bottom_padding = padding_h
    right_padding = padding_w

    padding_list = [top_padding, bottom_padding, left_padding, right_padding]
    
    if top_padding != 0 or bottom_padding != 0 or \
        left_padding != 0 or right_padding != 0:

        pad_val = 0
        if isinstance(pad_val, int) and image.ndim == 3:
            pad_val = tuple(pad_val for _ in range(image.shape[2]))

        image = mmcv.impad(
            img=image,
            padding=(
                padding_list[2], 
                padding_list[0], 
                padding_list[3],
                padding_list[1]),
            pad_val=pad_val,
            padding_mode='constant')

    return image, ratio[0], [top_padding, left_padding, bottom_padding, right_padding]


class CocoDetectionDataset(VisionDataset):
    def __init__(
        self,
        root: str,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
        transforms: Optional[Callable] = None,
    ) -> None:
        self._imgs = [img for img in os.listdir(root) if img.endswith('.jpg')]
        print(f'{len(self._imgs)} imgs has been loaded.')
        super().__init__(root, transforms, transform, target_transform)

    def _load_image(self, path: str) -> torch.Tensor:
        img = Image.open(os.path.join(self.root, path)).convert("RGB")
        img = F.to_tensor(img).float()
        return img

    def __getitem__(self, index: int):
        """
        return a coco data sample:
        [img: torch.Tensor, path: str, scale_factor: float, padding: int4]

        img: [batch, 3, 640, 640]
        path: img path
        scale_factor: resizing factor
        padding: padding size
        """
        path = self._imgs[index]
        img = self._load_image(path)
    
        if self.transform is not None:
            img = self.transform(img)
        
        img, scale_factor, padding = resize_and_pad(img.permute((1, 2, 0)).numpy())
        img = torch.tensor(img).permute((2, 0, 1))
        # torch.purmute()
        return [img, path, scale_factor, padding]

    def __len__(self) -> int:
        return len(self._imgs)


def load_coco_detection_dataset(
    data_dir: str, batchsize: int=1,
    shuffle: bool=False) -> DataLoader:

    # Define your dataset
    data_dir = data_dir
    dataset = CocoDetectionDataset(root=data_dir)

    # Create a dataloader
    data_loader = DataLoader(
        dataset, batch_size=batchsize, 
        shuffle=shuffle, num_workers=0)

    return data_loader
