"""
Simplified ROI Refiner using ResNet instead of OverLock
This is a fallback when OverLock has CUDA issues
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50, ResNet50_Weights
from src.models.model_utils import load_dinov2_model
from src.models.attention_fusion import DynamicAttentionFusion
from src.models.cross_roi_attention import CrossROISelfAttention
from src.config import IMPROVED_MODEL_CONFIG
import yaml


class SimpleROIRefinerModel(nn.Module):
    """
    Simplified ROI Refiner using ResNet50 instead of OverLock

    Uses:
    - ResNet50 (local features) - 2048 channels
    - DINOv2 (global features) - 384 channels
    - Same fusion and attention mechanisms
    """

    def __init__(self, num_classes=2, device='cpu', unfreeze_layers=2, config=None):
        super(SimpleROIRefinerModel, self).__init__()

        if config is None:
            config = IMPROVED_MODEL_CONFIG

        self.config = config
        self.device = device
        self.num_classes = num_classes
        self.unfreeze_layers = unfreeze_layers
        self.use_dynamic_fusion = config.get('use_dynamic_fusion', True)
        self.use_cross_roi_attention = config.get('use_cross_roi_attention', True)

        print("=" * 80)
        print("Initializing SIMPLE ROI Refiner Model (ResNet backbone)")
        print("=" * 80)
        print(f"Device: {device}")
        print(f"Unfreeze layers: {unfreeze_layers}")
        print(f"⚠️  Using ResNet50 instead of OverLock (fallback mode)")

        # ===== Feature Extractors =====
        print("\nLoading feature extractors...")

        # ResNet50 for local features
        print("  Loading ResNet50...")
        self.resnet = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
        # Remove final FC and avgpool
        self.resnet = nn.Sequential(*list(self.resnet.children())[:-2])  # Up to layer4
        self.resnet = self.resnet.to(device)
        print("  ✓ ResNet50 loaded (2048 channels)")

        # DINOv2 for semantic features
        self.dino_semantic = load_dinov2_model(device=device)
        print("✓ ResNet50 and DINOv2 loaded")

        # Apply freezing
        self._apply_freezing_strategy()

        # ===== Adapter to match dimensions =====
        # ResNet50 outputs 2048 channels, need to reduce to 640 to match OverLock
        resnet_out_channels = 2048
        target_channels = 640

        self.resnet_adapter = nn.Sequential(
            nn.Conv2d(resnet_out_channels, target_channels, kernel_size=1),
            nn.BatchNorm2d(target_channels),
            nn.ReLU(inplace=True)
        ).to(device)
        print(f"✓ ResNet adapter: {resnet_out_channels} → {target_channels} channels")

        # ===== Fusion Module =====
        overlock_feat_channels = target_channels  # After adapter
        dino_feat_channels = 384

        if self.use_dynamic_fusion:
            print("\nInitializing Dynamic Attention Fusion...")
            self.fusion_module = DynamicAttentionFusion(
                overlock_channels=overlock_feat_channels,
                dino_channels=dino_feat_channels,
                output_channels=config.get('fusion_channels', 512),
                use_channel_attention=config.get('fusion_use_channel_attention', True),
                use_spatial_attention=config.get('fusion_use_spatial_attention', True),
                use_cross_attention=config.get('fusion_use_cross_attention', True),
                reduction_ratio=config.get('fusion_reduction_ratio', 16),
                spatial_kernel_size=config.get('fusion_spatial_kernel_size', 7)
            ).to(device)
        else:
            # Simple concatenation + conv
            self.fusion_module = nn.Sequential(
                nn.Conv2d(overlock_feat_channels + dino_feat_channels,
                          config.get('fusion_channels', 512), kernel_size=1),
                nn.ReLU(),
                nn.BatchNorm2d(config.get('fusion_channels', 512))
            ).to(device)

        # ===== Cross-ROI Attention =====
        if self.use_cross_roi_attention:
            print("\nInitializing Cross-ROI Self-Attention...")
            self.cross_roi_attention = CrossROISelfAttention(
                feature_dim=config.get('fusion_channels', 512),
                num_heads=config.get('cross_roi_num_heads', 8),
                dropout=config.get('cross_roi_dropout', 0.1),
                position_embed_dim=config.get('cross_roi_position_embed_dim', 128),
                use_relative_pos=config.get('cross_roi_use_relative_pos', True)
            ).to(device)

        # ===== Classification and Regression Heads =====
        feature_dim = config.get('fusion_channels', 512)

        self.classifier = nn.Sequential(
            nn.Linear(feature_dim, 256),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(256, num_classes + 1)  # +1 for background
        ).to(device)

        self.regressor = nn.Sequential(
            nn.Linear(feature_dim, 256),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(256, num_classes * 4)  # 4 bbox coordinates per class
        ).to(device)

        print("\n" + "=" * 80)
        print("Simple ROI Refiner Model initialized successfully!")
        print("=" * 80)

    def _apply_freezing_strategy(self):
        """Apply parameter freezing strategy"""
        if self.unfreeze_layers == -1:
            print("Strategy: Fully unfrozen")
            pass
        elif self.unfreeze_layers == 0:
            print("Strategy: Fully frozen")
            for param in self.resnet.parameters():
                param.requires_grad = False
            for param in self.dino_semantic.parameters():
                param.requires_grad = False
        else:
            print(f"Strategy: Partially unfrozen (last {self.unfreeze_layers} layers)")
            # Freeze ResNet
            for param in self.resnet.parameters():
                param.requires_grad = False

            # Unfreeze last N layers of ResNet
            resnet_children = list(self.resnet.children())
            for layer in resnet_children[-self.unfreeze_layers:]:
                for param in layer.parameters():
                    param.requires_grad = True

            # Freeze DINOv2
            for param in self.dino_semantic.parameters():
                param.requires_grad = False

    def forward(self, roi_batch, roi_positions=None):
        """
        Forward pass for pre-cropped ROI images

        Args:
            roi_batch: [B, 3, H, W] Pre-cropped ROI images
            roi_positions: [B, 4] ROI positions [cx, cy, w, h] (optional)

        Returns:
            classification_logits: [B, num_classes+1]
            bounding_box_deltas: [B, num_classes*4]
            attention_weights: Attention weights (if using cross-ROI attention)
        """
        B, _, H_roi, W_roi = roi_batch.shape

        # ===== Step 1: Feature Extraction =====
        # ResNet features
        resnet_feats = self.resnet(roi_batch)  # [B, 2048, H, W]
        resnet_feats = self.resnet_adapter(resnet_feats)  # [B, 640, H, W]

        # DINOv2 features
        dino_tokens = self.dino_semantic.forward_features(roi_batch)['x_norm_patchtokens']
        H_dino, W_dino = H_roi // 14, W_roi // 14
        dino_feats = dino_tokens.permute(0, 2, 1).reshape(B, -1, H_dino, W_dino)  # [B, 384, H, W]

        # ===== Step 2: Fusion =====
        if self.use_dynamic_fusion and isinstance(self.fusion_module, DynamicAttentionFusion):
            fused = self.fusion_module(resnet_feats, dino_feats)
        else:
            # Upsample DINOv2 to match ResNet spatial size
            dino_upsampled = F.interpolate(dino_feats, size=resnet_feats.shape[2:],
                                          mode='bilinear', align_corners=False)
            fused = self.fusion_module(torch.cat([resnet_feats, dino_upsampled], dim=1))

        # ===== Step 3: Global Pooling =====
        pooled = F.adaptive_avg_pool2d(fused, (1, 1)).flatten(1)  # [B, 512]

        # ===== Step 4: Cross-ROI Attention =====
        attention_weights = None
        if self.use_cross_roi_attention and roi_positions is not None:
            enhanced_features, attention_weights = self.cross_roi_attention(pooled, roi_positions)
        else:
            enhanced_features = pooled

        # ===== Step 5: Classification and Regression =====
        classification_logits = self.classifier(enhanced_features)
        bounding_box_deltas = self.regressor(enhanced_features)

        return classification_logits, bounding_box_deltas, attention_weights
