
import torch
import logging
import torchvision
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from backbones.dinov2 import DINOv2
from dino_finetune import DINOV2EncoderLoRA

class GeoLocalizationNet(nn.Module):
    def __init__(self, backbone_name, ):
        super().__init__()
        self.backbone, out_channels = get_backbone(backbone_name)
        self.classifier = nn.Sequential(
            GeM(),
            Flatten(),
            nn.Linear(out_channels, out_channels),
            L2Norm()
        )
        self.feature_dim = out_channels
    
    def forward(self, x):
        x = self.backbone(x)
        x = self.classifier(x)
        return x


def get_pooling():
    pooling = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(1, 1)),
            Flatten(),
        )
    return pooling


def get_pretrained_torchvision_model(backbone_name):
    """This function takes the name of a backbone and returns the pretrained model from torchvision.
    Examples of backbone_name are 'ResNet18' or 'EfficientNet_B0'
    """
    try:  # Newer versions of pytorch require to pass weights=weights_module.DEFAULT
        weights_module = getattr(__import__('torchvision.models', fromlist=[f"{backbone_name}_Weights"]), f"{backbone_name}_Weights")
        model = getattr(torchvision.models, backbone_name.lower())(weights=weights_module.DEFAULT)
    except (ImportError, AttributeError):  # Older versions of pytorch require to pass pretrained=True
        model = getattr(torchvision.models, backbone_name.lower())(pretrained=True)
    return model

def get_backbone(backbone_name: str, train_all_layers: bool = False):
    # Load DINOv2 model
    model_name = backbone_name.lower()
    num_trainable_blocks = 2
    norm_layer = False
    return_token = False
    use_lora = False
    if use_lora:
        backbone =  DINOV2EncoderLoRA(
            model_name=model_name,
            r=5
        )
    else:
        backbone = DINOv2(  
            model_name=model_name,  
            num_trainable_blocks=num_trainable_blocks,  
            norm_layer=norm_layer,  
            return_token=return_token  
        )
    out_channels = backbone.num_channels

    return backbone, out_channels


def get_output_channels_dim(model):
    """Return the number of channels in the output of a model."""
    return model(torch.ones([1, 3, 224, 224])).shape[1]


class Flatten(nn.Module):
    def __init__(self):
        super().__init__()
    def forward(self, x):
        assert x.shape[2] == x.shape[3] == 1, f"{x.shape[2]} != {x.shape[3]} != 1"
        return x[:,:,0,0]


class L2Norm(nn.Module):
    def __init__(self, dim=1):
        super().__init__()
        self.dim = dim
    def forward(self, x):
        return F.normalize(x, p=2, dim=self.dim)

def gem(x, p=torch.ones(1)*3, eps: float = 1e-6):
    return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)

class GeM(nn.Module):
    def __init__(self, p=3, eps=1e-6, learn_p=False):
        super().__init__()
        self.eps = eps
        self.p = nn.Parameter(torch.ones(1) * p, requires_grad=learn_p) 

    
    def forward(self, x):
        return gem(x, p=self.p, eps=self.eps)
    
    def __repr__(self):
        return f"{self.__class__.__name__}(p={self.p.data.tolist()[0]:.4f}, eps={self.eps})"
