import torch.nn as nn
from .backbone import get_backbone


class SimCLR(nn.Module):
    """
    We opt for simplicity and adopt the commonly used ResNet (He et al., 2016) to obtain hi = f(x ̃i) = ResNet(x ̃i) where hi ∈ Rd is the output after the average pooling layer.
    """

    def __init__(self, backbone, out_dim=128):
        super(SimCLR, self).__init__()

        self.encoder = get_backbone(backbone)  # 初始化backbone ,
        self.n_features = self.encoder.fc.weight.shape[1]  # 添加mlp头。

        # Replace the fc layer with an Identity function
        # self.encoder.fc = nn.Identity()

        # We use a MLP with one hidden layer to obtain z_i = g(h_i) = W(2)σ(W(1)h_i) where σ is a ReLU non-linearity.
        self.encoder.fc = nn.Sequential(
            nn.Linear(self.n_features, self.n_features, bias=False),
            nn.ReLU(),
            nn.Linear(self.n_features, out_dim, bias=False),
        )

    def forward(self, x_i, x_j):
        z_i = self.encoder(x_i)
        z_j = self.encoder(x_j)
        return z_i, z_j
