import torch
import einops

import torch.nn as nn
from lightly.models.modules import SimCLRProjectionHead, MoCoProjectionHead


class LinearLayer(nn.Module):
    def __init__(self, input_dim, output_dim, use_bias=True, use_bn=True, use_relu=True):
        super(LinearLayer, self).__init__()
        self.use_bn = use_bn
        self.use_relu = use_relu
        self.linear = nn.Linear(input_dim, output_dim, bias=use_bias)
        self.bn = nn.BatchNorm1d(output_dim) if use_bn else None
        self.relu = nn.ReLU() if use_relu else None

    def forward(self, x):
        x = self.linear(x)
        if self.bn is not None:
            if x.ndim == 3: # RESNET CASE
                x = self.bn(x.permute(0, 2, 1)).permute(0, 2, 1) # expects (N,C,L) but got N,L,C
            elif x.ndim == 2: # ViT
                x = self.bn(x)
        if self.relu is not None:
            x = self.relu(x)
        return x

class NonLinearProjectionHead(nn.Module):
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 proj_out_dim,
                 num_layers
                 ):
        
        super().__init__()
        self.linear_layers = nn.ModuleList()

        for j in range(num_layers):
            if j != num_layers - 1:
                self.linear_layers.append(
                    LinearLayer(
                        input_dim if j == 0 else hidden_dim,
                        hidden_dim,
                        use_bias=False,
                        use_bn=True,
                        use_relu=True
                    )
                )
            else:
                self.linear_layers.append(
                    LinearLayer(
                        hidden_dim,
                        proj_out_dim,
                        use_bias=False,
                        use_bn=True,
                        use_relu=False
                    )
                )

    def forward(self, x):
        for layer in self.linear_layers:
            x = layer(x)
        return x


class ImageLocalProjection(nn.Module):
    def __init__(
        self, 
        encoded_dim: int = 512,
        d_contrastive: int = 128,
        t_contrastive: int = 10,
    ):
        super().__init__()
        
        self.local_projection = SimCLRProjectionHead(input_dim=encoded_dim,
                                                     output_dim=d_contrastive)
        self.t_contrastive = t_contrastive

    def forward(self, local_embeddings: torch.Tensor) -> torch.Tensor:
        local_embeddings = einops.rearrange(local_embeddings, 'b c t -> (b t) c', t=self.t_contrastive)
        local_embeddings = self.local_projection(local_embeddings)

        local_embeddings = einops.rearrange(local_embeddings, '(b t) d -> b t d', t=self.t_contrastive)

        return local_embeddings


class ImageGlobalProjectionMultiModal(nn.Module):
    def __init__(
        self, 
        encoded_dim: int = 512,
        hidden_dim: int = 512,
        d_contrastive: int = 128,
        num_layers:int = 2,
    ):
        super().__init__()
        self.global_projection_multimodal = SimCLRProjectionHead(input_dim=encoded_dim,
                                                                 hidden_dim=hidden_dim,
                                                                 num_layers=num_layers,
                                                                 output_dim=d_contrastive)

    def forward(self, global_embeddings: torch.Tensor) -> torch.Tensor:
        return self.global_projection_multimodal(global_embeddings)



class ImageGlobalProjectionUnimodalSimCLR(nn.Module):
    def __init__(
        self, 
        encoded_dim: int = 512,
        hidden_dim: int = 512,
        output_dim: int = 128,
        num_layers: int = 2,
        batch_norm: bool = True,
    ):
        super().__init__()
        
        self.global_projection_unimodal = SimCLRProjectionHead(input_dim=encoded_dim,
                                                               hidden_dim=hidden_dim,
                                                               output_dim=output_dim,
                                                               num_layers=num_layers,
                                                               batch_norm=batch_norm)

    def forward(self, embeddings: torch.Tensor) -> torch.Tensor:

        embeddings = self.global_projection_unimodal(embeddings)

        return embeddings
    
class ImageGlobalProjectionUnimodalMoCo(nn.Module):
    def __init__(
        self, 
        encoded_dim: int = 512,
        hidden_dim: int = 512,
        output_dim: int = 128,
        batch_norm: bool = False,
        num_layers: int = 2
    ):
        super().__init__()
        
        self.global_projection_unimodal = MoCoProjectionHead(input_dim=encoded_dim,
                                                             hidden_dim=hidden_dim,
                                                             output_dim=output_dim,
                                                             num_layers=num_layers,
                                                             batch_norm=batch_norm)

    def forward(self, embeddings: torch.Tensor) -> torch.Tensor:

        embeddings = self.global_projection_unimodal(embeddings)

        return embeddings