import torch
import torch.nn as nn
import torchvision
import segmentation_models_pytorch_3d as smp
from torchvision import transforms
from einops import rearrange


class CNN_Extractor(nn.Module):
    def __init__(self, in_channels=7):
        super().__init__()
        
        self.net = smp.Unet(
            encoder_name="resnet50",
            encoder_weights=None,
            strides=((2, 2, 1), (4, 4, 1), (2, 2, 1), (2, 2, 1), (1, 1, 1)),
            in_channels=in_channels
        )
        
        self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7, stride=7)
                
        
    def forward(self, x, time):
        b = x.shape[0]
        h, w = x.shape[-2:]
        time = time.unsqueeze(-1).unsqueeze(-1)
        time = time.repeat(1,1,1,h,w)
        x = torch.cat([x, time], dim=-3)
        x = self._get_resized_batch_image(x)
        x = self.net(x)
        x = rearrange(x, 'b c h w t -> (b t) c h w')
        x = self.conv(x)
        x = x.squeeze(1)
        x = rearrange(x, '(b t) h w -> b t (h w)', b=b)
        return x

    def _get_resized_batch_image(self, batch_images):
        
        b, d, c, h, w = batch_images.shape
        batch_images = batch_images.reshape(-1, c, h, w)
        resized_images = torchvision.transforms.functional.resize(
            batch_images, size=[224, 224], interpolation=transforms.InterpolationMode.BICUBIC
        )
        
        resized_images = resized_images.reshape(b, d, c, 224, 224).permute(0,2,3,4,1)
        
        return resized_images