# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------
# Copyright (c) 2025 Meta AI. All Rights Reserved.
# Licensed under the Apache License, Version 2.0
# ------------------------------------------------------------------------
import torch
import torch.nn as nn
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
import os


class DINOv3Backbone(Backbone):
    """
    DINOv3 backbone wrapper for Detectron2.
    """

    def __init__(self, cfg, input_shape):
        super().__init__()
        self.cfg = cfg
        
        # Get DINOv3 configuration
        self.dinov3_repo_dir = cfg.MODEL.DINOV3.REPO_DIR
        self.dinov3_model_name = cfg.MODEL.DINOV3.MODEL_NAME
        self.dinov3_weights = cfg.MODEL.DINOV3.WEIGHTS
        self.freeze_backbone = cfg.MODEL.DINOV3.FREEZE_BACKBONE
        
        # Load DINOv3 model via torch hub
        if os.path.exists(self.dinov3_repo_dir):
            # If local repo exists, load from local
            self.dinov3 = torch.hub.load(
                self.dinov3_repo_dir, 
                self.dinov3_model_name, 
                source='local',
                pretrained=False  # We will load weights separately
            )
        else:
            # If local repo doesn't exist, try loading from torch hub directly
            self.dinov3 = torch.hub.load(
                "facebookresearch/dinov3", 
                self.dinov3_model_name,
                pretrained=False  # We will load weights separately
            )
        
        # Load weights if specified
        if self.dinov3_weights and os.path.exists(self.dinov3_weights):
            state_dict = torch.load(self.dinov3_weights, map_location='cpu')
            self.dinov3.load_state_dict(state_dict, strict=False)
        
        # Freeze the backbone if specified
        if self.freeze_backbone:
            for param in self.dinov3.parameters():
                param.requires_grad = False
                
        # Get the actual embedding dimension from the model (usually 768 for ViT-B/16)
        actual_embed_dim = (self.dinov3.embed_dims if hasattr(self.dinov3, 'embed_dims') else \
                          (self.dinov3.embed_dim if hasattr(self.dinov3, 'embed_dim') else 768))
        
        # Get the desired output dimension from config (1024 to match ROI_HEADS)
        output_embed_dim = cfg.MODEL.DINOV3.OUT_FEATURE_CHANNELS if hasattr(cfg.MODEL.DINOV3, 'OUT_FEATURE_CHANNELS') else 1024
        
        # Add a 1x1 convolution to transform channels if needed
        if actual_embed_dim != output_embed_dim:
            self.channel_transform = nn.Conv2d(actual_embed_dim, output_embed_dim, kernel_size=1, stride=1, padding=0)
        else:
            self.channel_transform = None
            
        self._out_features = cfg.MODEL.DINOV3.OUT_FEATURES
        self._out_feature_channels = {f: output_embed_dim for f in self._out_features}
        self._out_feature_strides = {f: 16 for f in self._out_features}  # Default stride for ViT

    def forward(self, x):
        """
        Args:
            x: Tensor of shape (N,C,H,W)
        Returns:
            dict[str->Tensor]: features
        """
        # Forward pass through DINOv3
        features = self.dinov3.get_intermediate_layers(x, n=4, return_class_token=True)
        
        # Extract different levels of features
        outputs = {}
        # DINOv3 returns a list of features from different layers
        # We'll use the last 4 layers for multi-scale features
        
        # DINOv3 ViT-B/16 uses patch size of 16, so the feature map will be (H/16) x (W/16)
        patch_size = 16
        
        for i, feat_name in enumerate(self._out_features):
            if i < len(features):
                # features[i] is a tuple (intermediate_features, class_token)
                intermediate_features = features[i][0]  # Get intermediate features
                # Reshape from (B, N, C) to (B, C, H, W)
                B, N, C = intermediate_features.shape
                
                # Try to find the best H and W that matches the total number of elements
                # Start with the assumption of a square feature map
                H = W = int(N ** 0.5)
                
                # If H*W doesn't match N, try to find a better pair
                if H * W != N:
                    # Try to find factors of N that are close to each other
                    for h in range(int(N ** 0.5), 0, -1):
                        if N % h == 0:
                            H = h
                            W = N // h
                            break
                
                # Use reshape instead of view to avoid memory layout issues
                feature_map = intermediate_features.permute(0, 2, 1).reshape(B, C, H, W)
                
                # Apply channel transformation if needed
                if self.channel_transform is not None:
                    feature_map = self.channel_transform(feature_map)
                    
                outputs[feat_name] = feature_map
            else:
                # If we don't have enough layers, use the last available feature
                last_feature = features[-1][0]
                B, N, C = last_feature.shape
                
                # Same process as above
                H = W = int(N ** 0.5)
                if H * W != N:
                    for h in range(int(N ** 0.5), 0, -1):
                        if N % h == 0:
                            H = h
                            W = N // h
                            break
                    
                feature_map = last_feature.permute(0, 2, 1).reshape(B, C, H, W)
                
                # Apply channel transformation if needed
                if self.channel_transform is not None:
                    feature_map = self.channel_transform(feature_map)
                    
                outputs[feat_name] = feature_map
            
        return outputs

    def output_shape(self):
        return {
            name: ShapeSpec(
                channels=self._out_feature_channels[name],
                stride=self._out_feature_strides[name]
            )
            for name in self._out_features
        }


@BACKBONE_REGISTRY.register()
def build_dinov3_backbone(cfg, input_shape):
    """
    Create a DINOv3 instance from config.

    Returns:
        DINOv3Backbone: a :class:`DINOv3Backbone` instance.
    """
    return DINOv3Backbone(cfg, input_shape)