import numpy as np
import torch
import copy
from PIL import Image
import random
from .utils import read_image
from utils import resize_padding
from .argument import argument_with_params

class MapDataset(torch.utils.data.Dataset):
    def __init__(self, dataset, map_func):
        self._dataset = dataset
        self._map_func = map_func
    
    def __len__(self):
        return len(self._dataset)

    def __getitem__(self, idx):
        return self._map_func(self._dataset[idx])
        

class DatasetMapper:
    def __init__(self, cfg, is_train=True):
        self.is_train = is_train
        self.resize_size = cfg.MODEL.INPUT.SIZE
        self.argument_method = cfg.LOADER.ARGUMENT

    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image in dataset
        
        Returns:
            dict: 
        """
        dataset_dict = copy.deepcopy(dataset_dict)

        image = read_image(dataset_dict['file_name'], preprocess_method=dataset_dict['preprocess'], params=dataset_dict['params'])

        if self.is_train:
            image, dataset_dict['annotations'] = argument_with_params(image, dataset_dict['annotations'], self.argument_method, params=dataset_dict['params'])

        image, resize_params = resize_padding(image, size=self.resize_size)
        dataset_dict['params'].update(resize_params)
        for i in range(len(dataset_dict['annotations'])):
            dataset_dict['annotations'][i]['bbox'] = [j * resize_params['resize']['scale_ratio'] for j in dataset_dict['annotations'][i]['bbox']]

        if len(image.shape) == 2:
            image = np.expand_dims(image, -1)
        assert len(image.shape) == 3
        dataset_dict['image'] = image.transpose(2, 0, 1)

        return dataset_dict
        

class MultiScaleDatasetMapper:
    def __init__(self, cfg, is_train=True):
        self.is_train = is_train
        self.resize_sizes = cfg.MODEL.INPUT.SIZES if "SIZES" in cfg.MODEL.INPUT else [cfg.MODEL.INPUT.SIZE]
        self.batch_size = cfg.LOADER.BATCH_SIZE
        self.argument_method = cfg.LOADER.ARGUMENT
        self.resize_size = None
        self.count = 0

    def __call__(self, dataset_dict):
        """
        Args:
            dataset_dict (dict): Metadata of one image in dataset
        
        Returns:
            dict: 
        """
        if self.count % self.batch_size == 0:
            self.resize_size = random.choice(self.resize_sizes)
            self.count = 0
        self.count += 1

        dataset_dict = copy.deepcopy(dataset_dict)

        image = read_image(dataset_dict['file_name'], preprocess_method=dataset_dict['preprocess'], params=dataset_dict['params'])

        if self.is_train:
            image, dataset_dict['annotations'] = argument_with_params(image, dataset_dict['annotations'], self.argument_method, params=dataset_dict['params'])

        image, resize_params = resize_padding(image, size=self.resize_size)
        dataset_dict['params'].update(resize_params)
        for i in range(len(dataset_dict['annotations'])):
            dataset_dict['annotations'][i]['bbox'] = [j * resize_params['resize']['scale_ratio'] for j in dataset_dict['annotations'][i]['bbox']]

        if len(image.shape) == 2:
            image = np.expand_dims(image, -1)
        assert len(image.shape) == 3
        dataset_dict['image'] = image.transpose(2, 0, 1)

        return dataset_dict
        

