import torch
import torch.nn as nn
import torch.nn.functional as F

from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec


DINOV2_ARCHS = {
    'dinov2_vits14': 384,
    'dinov2_vitb14': 768,
    'dinov2_vitl14': 1024,
    'dinov2_vitg14': 1536,
}

@BACKBONE_REGISTRY.register()
class DINOv2Transformer(Backbone):
    """
    DINOv2 model

    Args:
        model_name (str): The name of the model architecture 
            should be one of ('dinov2_vits14', 'dinov2_vitb14', 'dinov2_vitl14', 'dinov2_vitg14')
        num_trainable_blocks (int): The number of last blocks in the model that are trainable.
        norm_layer (bool): If True, a normalization layer is applied in the forward pass.
        return_token (bool): If True, the forward pass returns both the feature map and the token.
    """
    def __init__( 
            self,
            cfg, input_shape
        ):
        super().__init__()

        model_name='dinov2_vitb14'
        num_trainable_blocks=2
        norm_layer=False
        return_token=False
        assert model_name in DINOV2_ARCHS.keys(), f'Unknown model name {model_name}'
        # self.model = torch.hub.load('facebookresearch/dinov2', model_name)
        #/root/.cache/torch/hub/facebookresearch_dinov2_main
        self.model = torch.hub.load('/root/.cache/torch/hub/facebookresearch_dinov2_main', 
                                    model_name,source='local')
        
        self.num_channels = DINOV2_ARCHS[model_name]
        self.num_trainable_blocks = num_trainable_blocks
        self.norm_layer = norm_layer
        self.return_token = return_token

        self._out_features = cfg.MODEL.DINO.OUT_FEATURES
        self._out_feature_strides = {
            "res2": 4,
            "res3": 8,
            "res4": 16,
            "res5": 32,
        }
        self._out_feature_channels = {
            "res2": 48,
            "res3": 192,
            "res4": 768,
            "res5": 1536*2,
        }
    def output_shape(self):
        return {
            name: ShapeSpec(
                channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
            )
            for name in self._out_features
        }
        
    def forward(self, x):
        """
        The forward method for the DINOv2 class

        Parameters:
            x (torch.Tensor): The input tensor [B, 3, H, W]. H and W should be divisible by 14.

        Returns:
            f (torch.Tensor): The feature map [B, C, H // 14, W // 14].
            t (torch.Tensor): The token [B, C]. This is only returned if return_token is True.
        """

        B, C, H, W = x.shape
        x = self.model.prepare_tokens_with_masks(x)
        
        # First blocks are frozen
        # with torch.no_grad():
        #     for blk in self.model.blocks[:-4]:
        #         x = blk(x)
        # x = x.detach()
        for blk in self.model.blocks[:-4]:
            x = blk(x)
        # blk_2 = self.model.blocks[-4]
        # x_2 = blk_2(x)
        
        # blk_3 = self.model.blocks[-3]
        # x_3 = blk_3(x_2)
        
        # blk_4 = self.model.blocks[-2]
        # x_4 = blk_4(x_3)
        
        # blk_5 = self.model.blocks[-1]
        # x_5 = blk_5(x_4)
        
        out = []
        
        # # Last blocks are trained
        for blk in self.model.blocks[-4:]:
            x = blk(x)
            out.append(x)

        if self.norm_layer:
            for i in range(len(out)):
                out[i] = self.model.norm(out[i])
            
        for i in range(len(out)):
            out[i] = out[i][:, 1:]
        
        size_list = [
            [(H // 7)*2,(W // 7)*2, self.num_channels/16],
            [H // 7, W // 7, self.num_channels/4],
            [H // 14, W // 14, self.num_channels],
            [H // 28, W // 28, self.num_channels*4]
        ]
        
        interpolate_list = [
            [H // 4,W // 4],
            [H // 8, W // 8],
            [H // 16, W // 16],
            [H // 32, W // 32]
        ]
        
        for i in range(len(out)):
            out[i] = out[i].reshape((B, int(size_list[i][0]), int(size_list[i][1]), int(size_list[i][2]))).permute(0, 3, 1, 2)
            out[i] = F.interpolate(
                out[i], size=interpolate_list[i], mode="bilinear", align_corners=False
            )
        
        outputs = {}
        for i in range(len(out)):
            outputs[self._out_features[i]] = out[i]

        return outputs
