import torch
import torch.nn as nn

import torchvision.transforms.functional as F

from torchvision.transforms import Compose
from .rrc import RandomResizeCrop, RandomMaskCrop
from .mixup import LogMixupExp
from datasets.dataio import TorchScaler

class RandomCrop(nn.Module):
    def __init__(self, crop_rate):
        super().__init__()
        self.crop_rate = crop_rate
    
    @staticmethod
    def get_params(img, output_size):
        _, h, w = F.get_dimensions(img)
        th, tw = output_size

        if h < th or w < tw:
            raise ValueError(f"Required crop size {(th, tw)} is larger than input image size {(h, w)}")

        if w == tw and h == th:
            return 0, 0, h, w

        i = torch.randint(0, h - th + 1, size=(1,)).item()
        j = torch.randint(0, w - tw + 1, size=(1,)).item()
        return i, j, th, tw


    def forward(self, x):
        crop_size = (int(x.shape[-2] * self.crop_rate[0]), int(x.shape[-1] * self.crop_rate[1]))
        i, j, h, w = self.get_params(x, crop_size)
        return F.crop(x, i, j, h, w)

class ComposedTransform(nn.Module):
    def __init__(self, transform_list):
        super().__init__()
        self.transform_list = transform_list
    
    def forward(self, x, rmc_params=None):
        # mixup
        x = self.transform_list[0](x)
        # RMC
        x = self.transform_list[1](x, rmc_params)
        return x

class BYOLATransform(nn.Module):
    def __init__(self, crop_size, rmc_crop_size, rmc_freq_scale, rmc_time_scale):
        super().__init__()
        self.random_crop = RandomCrop(crop_size)  # crop length in time
        rrc = RandomMaskCrop(virtual_crop_scale=rmc_crop_size, freq_scale=rmc_freq_scale, time_scale=rmc_time_scale)
        self.transform_1 = ComposedTransform([LogMixupExp(), rrc])
        self.transform_2 = ComposedTransform([LogMixupExp(), rrc])
        self.post_norm = TorchScaler("instance", "minmax", [1, 2])        

    def forward(self, x):
        if len(x.shape) == 3:
            view_1 = []
            view_2 = []
            for sample in x:
                sample = sample.unsqueeze(0)
                sample = self.random_crop(sample)
                if len(sample) == 2:
                    sample = sample[0]
                    crop_pos = sample[1]
                else:
                    crop_pos = None
                view_1.append(self.transform_1(sample))
                view_2.append(sample)
            view_1 = torch.cat(view_1, dim=0)
            view_2 = torch.cat(view_2, dim=0)
            view_1 = self.post_norm(view_1)
            view_2 = self.post_norm(view_2)
        elif len(x.shape) == 2:
            x = x.unsqueeze(0)
            x = self.random_crop(x)
            view_1, pos_1, _ = self.transform_1(x)
            view_2, pos_2, _ = self.transform_1(x)
            view_1 = self.post_norm(view_1)
            view_2 = self.post_norm(view_2)
            view_1 = view_1.squeeze(0)
            view_2 = view_2.squeeze(0)
            crop_pos = (pos_1, pos_2)
        return view_1, view_2, crop_pos