import torch
import torch.nn as nn


class PosEmbedding(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding, self).__init__()
        self.N_freqs            = N_freqs
        self.funcs              = [torch.sin, torch.cos]
        self.freq_bands         = 2 ** torch.linspace(0, N_freqs-1, N_freqs)
        self.freqs              = list(range(N_freqs))
            
    def forward(self, x):
        out = [x]
        for L, freq in zip(self.freqs, self.freq_bands):
            for func in self.funcs:
                out += [func(freq * x)]
        return torch.cat(out, dim=-1)
    
    
class MLP(nn.Module):
    def __init__(self, in_dim=32, hidden_dim=32, out_dim=3, n_layers=2, activation=nn.Sigmoid()):
        super(MLP, self).__init__()
        
        self.activation = activation
        
        self.head = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.ReLU(True))
        
        body = []
        for i in range(n_layers-2):
            body += [nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True))]
        self.body = nn.Sequential(*body)
        
        self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim))
    
    def forward(self, x):
        x = self.body(self.head(x))
        
        if self.activation is None:
            return self.tail(x)
        else:
            return self.activation(self.tail(x))
        
class EmbeddingExtractor(nn.Module):
    def __init__(self, hidden_dim=128, out_dim=32, n_layers=4, activation=None):
        super(EmbeddingExtractor, self).__init__()
        self.n_layers = n_layers
        self.activation = activation
        
        self.head = nn.Sequential(nn.Conv2d(3, hidden_dim, 3, stride=2, padding=1), nn.ReLU(True))
        
        body = []
        for i in range(n_layers-2):
            body += [nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride=2, padding=1), nn.ReLU(True))]
        self.body = nn.Sequential(*body)
        
        self.tail = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride=2, padding=1), nn.ReLU(True))
        
        self.pooling = nn.AdaptiveAvgPool2d((7, 7))
        
        self.out = nn.Sequential(nn.Linear(hidden_dim*7*7, hidden_dim), nn.ReLU(True),
                                 nn.Linear(hidden_dim, out_dim))
        
    def forward(self, x):
        B, C = x.shape[:2]
        assert C == 3
        
        feat_map = self.pooling(self.tail(self.body(self.head(x))))
        feat_view = feat_map.view(B, -1)
        embedding = self.out(feat_view)
        
        if self.activation is not None:
            embedding = self.activation(embedding)
        
        return embedding 
        
        
        