from unittest.mock import Base
import torch
import torch.nn as nn

class Transpose3D(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        if len(x.shape) == 3:
            return x.transpose(1, 2)
        else:
            return x

class BaseCls(nn.Module):
    def __init__(self, n_in, n_classes, dropout):
        super(BaseCls, self).__init__()
        self.n_in = n_in
        self.n_classes = n_classes
        self.dropout = dropout
    
    def forward(self, x):
        """Forward pass through the classifier.
        input:
            x: [B, n_in, T]
        output:
            strong_logits: [B, n_classes, T]
            weak_logits: [B, n_classes]"""
        raise NotImplementedError("This method should be overridden by subclasses.")

class EmbeddingCls(BaseCls):
    def __init__(self, n_in, n_classes, dropout=0):
        super(EmbeddingCls, self).__init__(n_in, n_classes, dropout)
        self.mlps = nn.Sequential(
            nn.Linear(n_in, n_in * 4, bias=False),
            Transpose3D(),
            nn.BatchNorm1d(n_in * 4),
            Transpose3D(),
            nn.ReLU(inplace=False),
            nn.Linear(n_in * 4, n_in, bias=False),
        )
        
    def forward(self, x):
        embeddings = self.mlps(x)
        return embeddings

class LinearCls(BaseCls):
    def __init__(self, n_in, n_classes, dropout=0):
        super(LinearCls, self).__init__(n_in, n_classes, dropout)
        self.linear = nn.Linear(n_in, n_classes)
        self.dropout = nn.Dropout(dropout)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.dropout(x)
        strong_preds = self.sigmoid(self.linear(x))
        return strong_preds.transpose(1, 2), strong_preds.mean(-1)


class LinearAttentionCls(BaseCls):
    def __init__(self, n_in, n_classes, dropout=0.5):
        super(LinearAttentionCls, self).__init__(n_in, n_classes, dropout)
        self.dropout = nn.Dropout(dropout)
        self.linear = nn.Linear(n_in, n_classes)
        self.sigmoid = nn.Sigmoid()
        self.linear_softmax = nn.Linear(n_in, n_classes)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        x = self.dropout(x)
        strong_logits = self.linear(x)  # [bs, frames, nclass]
        strong = self.sigmoid(strong_logits)
        soft = self.linear_softmax(x)  # [bs, frames, nclass]
        soft = self.softmax(soft)
        soft = torch.clamp(soft, min=1e-7, max=1)
        weak = (strong * soft).sum(1) / soft.sum(1)  # [bs, nclass]
        return strong.transpose(1, 2), weak

class NonLinearAttentionCls(LinearAttentionCls):
    def __init__(self, n_in, n_classes, dropout=0):
        super(NonLinearAttentionCls, self).__init__(n_in, n_classes, dropout)
        self.linear = nn.Sequential(
            nn.Linear(n_in, n_in),
            nn.ReLU(),
            nn.Linear(n_in, n_classes)
        )
        self.linear_softmax = nn.Sequential(
            nn.Linear(n_in, n_classes)
        )