"""
Improved ROI Refiner Model


1. DynamicAttentionFusion - ++
2. CrossROISelfAttention - ROI
3. FPN - 
4. RoIAlign - ROI

baseline
- concat
- ROI
- ROIcrop+resizeRoIAlign
- FPN
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.model_utils import load_dinov2_model
from models_ext.overlock_local import overlock_t
from src.models.attention_fusion import DynamicAttentionFusion
from src.models.cross_roi_attention import CrossROISelfAttention
from src.config import MODEL_CONFIG, IMPROVED_MODEL_CONFIG, DATASET_DIR
import yaml

# FPN
try:
    from src.models.fpn import FPN, RoIFeatureExtractor
    FPN_AVAILABLE = True
except ImportError:
    FPN_AVAILABLE = False
    print("Warning: FPN module not available. Set use_fpn=False to continue.")

with open(DATASET_DIR + '/data.yaml', 'r') as f:
    num_classes = yaml.safe_load(f)['nc']


class ImprovedROIRefinerModel(nn.Module):
    """
    ROI

    

    A (Baseline): use_fpn=False, use_roi_align=False
    1. : ROI [B, 3, 224, 224]
    2. : OverLoCK + DINOv2
    3. : DynamicAttentionFusion
    4. : Global Average Pooling
    5. ROI: CrossROISelfAttention
    6. : Classifier + Regressor

    B (RoIAlign): use_fpn=False, use_roi_align=True
    1. :  + ROI
    2. : OverLoCK → 
    3. ROI: RoIAlignROI
    4. DINOv2: ROIRoIAlign
    5.  →  → ROI → 

    C (FPN + RoIAlign): use_fpn=True, use_roi_align=True
    1. :  + ROI
    2. Backbone: OverLoCK [C2, C3, C4, C5]
    3. FPN:  [P2, P3, P4, P5]
    4. ROI: ROIFPN + RoIAlign
    5. DINOv2 →  →  → ROI → 
    """

    def __init__(self, device='cpu', unfreeze_layers=2, config=None):
        """
        Args:
            device: 
            unfreeze_layers: 
                           0 = 
                           -1 = 
                           n > 0 = n
            config: NoneIMPROVED_MODEL_CONFIG
        """
        super().__init__()
        self.device = device
        self.num_classes = num_classes
        self.unfreeze_layers = unfreeze_layers

        # 
        if config is None:
            config = IMPROVED_MODEL_CONFIG

        self.config = config
        self.use_dynamic_fusion = config.get('use_dynamic_fusion', True)
        self.use_cross_roi_attention = config.get('use_cross_roi_attention', True)
        self.use_fpn = config.get('use_fpn', False)
        self.use_roi_align = config.get('use_roi_align', False)

        # 
        if self.use_fpn and not FPN_AVAILABLE:
            raise ImportError("FPN module not available. Please check src/models/fpn.py")
        if self.use_fpn and not self.use_roi_align:
            raise ValueError("FPN requires RoIAlign to be enabled (use_roi_align=True)")

        print("=" * 80)
        print("Initializing Improved ROI Refiner Model")
        print("=" * 80)
        print(f"Device: {device}")
        print(f"Unfreeze layers: {unfreeze_layers}")
        print(f"Dynamic Fusion: {' Enabled' if self.use_dynamic_fusion else ' Disabled'}")
        print(f"Cross-ROI Attention: {' Enabled' if self.use_cross_roi_attention else ' Disabled'}")
        print(f"FPN: {' Enabled' if self.use_fpn else ' Disabled'}")
        print(f"RoIAlign: {' Enabled' if self.use_roi_align else ' Disabled'}")

        if self.use_fpn:
            print("\n  FPN Mode: Expects full images + ROI coordinates")
        elif self.use_roi_align:
            print("\n  RoIAlign Mode: Expects full images + ROI coordinates")
        else:
            print("\n Baseline Mode: Expects pre-cropped ROI images")

        # =====  =====
        print("\nLoading feature extractors...")

        # Initialize OverLock on CPU first to avoid CUDA initialization issues
        print("  Initializing OverLoCK on CPU...")
        self.overlock_focus = overlock_t()
        print("  Moving OverLoCK to device...")
        try:
            self.overlock_focus = self.overlock_focus.to(device)
            print("   OverLoCK successfully moved to", device)
        except RuntimeError as e:
            print(f"    Error moving OverLock to {device}: {e}")
            print("  Attempting workaround...")
            # Move in eval mode to avoid potential issues
            self.overlock_focus.eval()
            self.overlock_focus = self.overlock_focus.to(device)

        self.dino_semantic = load_dinov2_model(device=device)
        print(" OverLoCK and DINOv2 loaded")

        # 
        self._apply_freezing_strategy()

        # ===== FPN =====
        if self.use_fpn:
            print("\nInitializing FPN (Feature Pyramid Network)...")
            self.fpn = FPN(
                in_channels_list=config.get('fpn_in_channels', [128, 384, 640, 640]),
                out_channels=config.get('fpn_out_channels', 256),
                use_p2=config.get('fpn_use_p2', True)
            ).to(device)

            # RoI
            fpn_levels = config.get('fpn_levels', [2, 3, 4, 5])
            self.roi_extractor = RoIFeatureExtractor(
                output_size=config.get('roi_align_output_size', 7),
                sampling_ratio=config.get('roi_align_sampling_ratio', 2),
                canonical_scale=config.get('roi_align_canonical_scale', 224),
                canonical_level=config.get('roi_align_canonical_level', 4),
                fpn_levels=fpn_levels
            ).to(device)

            # FPNOverLoCK
            overlock_feat_channels = config.get('fpn_out_channels', 256)
            print(f" FPN initialized with levels: {fpn_levels}")
            print(f"  Output channels: {overlock_feat_channels}")

        elif self.use_roi_align:
            # RoIAlignFPN
            print("\nInitializing RoIAlign (single-scale)...")
            from torchvision.ops import RoIAlign

            # OverLoCKstride32
            self.roi_align = RoIAlign(
                output_size=(config.get('roi_align_output_size', 7),
                           config.get('roi_align_output_size', 7)),
                spatial_scale=1/32,  # OverLoCK stride
                sampling_ratio=config.get('roi_align_sampling_ratio', 2),
                aligned=True
            ).to(device)

            # DINOv2RoIAlignpatch size=14
            self.dino_roi_align = RoIAlign(
                output_size=(16, 16),  # DINOv216x16 patches (224x224)
                spatial_scale=1/14,  # DINOv2 patch size
                sampling_ratio=config.get('roi_align_sampling_ratio', 2),
                aligned=True
            ).to(device)

            overlock_feat_channels = config.get('overlock_channels', 640)
            print(f" RoIAlign initialized")
            print(f"  Output size: {config.get('roi_align_output_size', 7)}x{config.get('roi_align_output_size', 7)}")
            print(f"  DINOv2 RoIAlign: 16x16 patches")
        else:
            # BaselineOverLoCK
            overlock_feat_channels = config.get('overlock_channels', 640)

        # =====  =====
        if self.use_dynamic_fusion:
            print("\nInitializing Dynamic Attention Fusion...")
            self.fusion_module = DynamicAttentionFusion(
                overlock_channels=overlock_feat_channels,  # 
                dino_channels=config.get('dino_channels', 384),
                fusion_channels=config.get('fusion_channels', 512),
                use_channel_attn=config.get('fusion_use_channel_attention', True),
                use_spatial_attn=config.get('fusion_use_spatial_attention', True),
                use_cross_attn=config.get('fusion_use_cross_attention', True),
                reduction_ratio=config.get('fusion_reduction_ratio', 16),
                spatial_kernel_size=config.get('fusion_spatial_kernel_size', 7),
                num_heads=8,
                dropout=0.1
            ).to(device)
            print(" Dynamic Attention Fusion initialized")
            print(f"  - OverLoCK channels: {overlock_feat_channels}")
            print(f"  - Channel Attention: {config.get('fusion_use_channel_attention', True)}")
            print(f"  - Spatial Attention: {config.get('fusion_use_spatial_attention', True)}")
            print(f"  - Cross Attention: {config.get('fusion_use_cross_attention', True)}")
        else:
            # Fallback
            print("\nUsing simple fusion (baseline)...")
            fusion_in_channels = overlock_feat_channels + config.get('dino_channels', 384)
            self.fusion_module = nn.Sequential(
                nn.Conv2d(fusion_in_channels, config.get('fusion_channels', 512), 1),
                nn.ReLU(),
                nn.BatchNorm2d(config.get('fusion_channels', 512))
            ).to(device)

        # ===== ROI =====
        if self.use_cross_roi_attention:
            print("\nInitializing Cross-ROI Self-Attention...")
            self.cross_roi_attention = CrossROISelfAttention(
                feature_dim=config.get('fusion_channels', 512),
                num_heads=config.get('cross_roi_num_heads', 8),
                dropout=config.get('cross_roi_dropout', 0.1),
                position_embed_dim=config.get('cross_roi_position_embed_dim', 128),
                use_relative_pos=config.get('cross_roi_use_relative_pos', True)
            ).to(device)
            print(" Cross-ROI Self-Attention initialized")
            print(f"  - Num heads: {config.get('cross_roi_num_heads', 8)}")
            print(f"  - Position embed dim: {config.get('cross_roi_position_embed_dim', 128)}")
            print(f"  - Relative position: {config.get('cross_roi_use_relative_pos', True)}")

        # =====  =====
        # Output dimensions:
        # - classifier: num_classes+1 (0 for background, 1-num_classes for defect classes)
        # - regressor: num_classes*4 (class-specific bbox deltas for defects only, no background)
        print("\nInitializing classification and regression heads...")
        self.classifier = nn.Linear(config.get('fusion_channels', 512),
                                    self.num_classes + 1).to(device)
        self.regressor = nn.Linear(config.get('fusion_channels', 512),
                                   self.num_classes * 4).to(device)
        print(f" Classifier: {config.get('fusion_channels', 512)} → {self.num_classes + 1}")
        print(f" Regressor: {config.get('fusion_channels', 512)} → {self.num_classes * 4}")

        print("=" * 80)
        print("Improved ROI Refiner Model initialized successfully!")
        print("=" * 80)

    def _apply_freezing_strategy(self):
        """"""
        if self.unfreeze_layers == -1:
            # 
            print("Strategy: Fully unfrozen (all layers trainable)")
            pass
        elif self.unfreeze_layers == 0:
            # 
            print("Strategy: Fully frozen (all layers fixed)")
            for param in self.overlock_focus.parameters():
                param.requires_grad = False
            for param in self.dino_semantic.parameters():
                param.requires_grad = False
        else:
            # 
            print(f"Strategy: Partially unfrozen (last {self.unfreeze_layers} layers)")

            # OverLoCK
            for param in self.overlock_focus.parameters():
                param.requires_grad = False

            # OverLoCK
            if hasattr(self.overlock_focus, 'stages'):
                total_stages = len(self.overlock_focus.stages)
                for i in range(max(0, total_stages - self.unfreeze_layers), total_stages):
                    for param in self.overlock_focus.stages[i].parameters():
                        param.requires_grad = True

            # DINOv2
            for param in self.dino_semantic.parameters():
                param.requires_grad = False

            # DINOv2
            if hasattr(self.dino_semantic, 'blocks'):
                total_blocks = len(self.dino_semantic.blocks)
                for i in range(max(0, total_blocks - self.unfreeze_layers), total_blocks):
                    for param in self.dino_semantic.blocks[i].parameters():
                        param.requires_grad = True

    def forward(self, roi_batch, roi_positions=None, roi_boxes=None, image_size=None):
        """
        

        A (Baseline): use_fpn=False, use_roi_align=False
            Args:
                roi_batch: [B, 3, 224, 224] ROI
                roi_positions: [B, 4] ROIROI
            Returns:
                classification_logits, bounding_box_deltas, attention_weights

        B (RoIAlign): use_fpn=False, use_roi_align=True
            Args:
                roi_batch: [B, 3, H, W] 
                roi_boxes: [N, 5] ROI [batch_idx, x1, y1, x2, y2]
                roi_positions: [N, 4] ROIROI
                image_size: (H, W) 
            Returns:
                classification_logits, bounding_box_deltas, attention_weights

        C (FPN + RoIAlign): use_fpn=True, use_roi_align=True
            Args:
                roi_batch: [B, 3, H, W] 
                roi_boxes: [N, 5] ROI [batch_idx, x1, y1, x2, y2]
                roi_positions: [N, 4] ROIROI
                image_size: (H, W) 
            Returns:
                classification_logits, bounding_box_deltas, attention_weights
        """

        # =====  =====
        if self.use_fpn:
            return self._forward_fpn_mode(roi_batch, roi_boxes, roi_positions, image_size)
        elif self.use_roi_align:
            return self._forward_roialign_mode(roi_batch, roi_boxes, roi_positions, image_size)
        else:
            return self._forward_baseline_mode(roi_batch, roi_positions)

    def _extract_dino_roi_features(self, full_images, roi_boxes, use_roi_align=True):
        """
        DINOv2ROI

        Args:
            full_images: [B, 3, H, W] 
            roi_boxes: [N, 5] ROI [batch_idx, x1, y1, x2, y2]
            use_roi_align: RoIAligncrop+resize

        Returns:
            dino_feats: [N, 384, H_feat, W_feat] DINOv2 ROI
        """
        N = roi_boxes.shape[0]

        if use_roi_align and hasattr(self, 'dino_roi_align'):
            # 1: RoIAlign
            # Step 1: DINOv2
            dino_output = self.dino_semantic.forward_features(full_images)
            dino_tokens = dino_output['x_norm_patchtokens']  # [B, num_patches, 384]

            # 
            B = full_images.shape[0]
            H_img, W_img = full_images.shape[2], full_images.shape[3]
            # DINOv2 patch size = 14
            H_feat, W_feat = H_img // 14, W_img // 14

            dino_feat_map = dino_tokens.permute(0, 2, 1).reshape(B, 384, H_feat, W_feat)

            # Step 2: RoIAlignROI
            dino_roi_feats = self.dino_roi_align(dino_feat_map, roi_boxes)  # [N, 384, 16, 16]

            return dino_roi_feats

        else:
            # 2: crop+resizeBaseline
            roi_crops = []

            for i in range(N):
                batch_idx = int(roi_boxes[i, 0])
                x1, y1, x2, y2 = roi_boxes[i, 1:].int().tolist()

                # ROI
                roi_crop = full_images[batch_idx:batch_idx+1, :, y1:y2, x1:x2]
                # Resize224x224DINOv2
                roi_crop = F.interpolate(roi_crop, size=(224, 224), mode='bilinear', align_corners=False)
                roi_crops.append(roi_crop)

            roi_crops_batch = torch.cat(roi_crops, dim=0)  # [N, 3, 224, 224]

            dino_tokens = self.dino_semantic.forward_features(roi_crops_batch)['x_norm_patchtokens']
            H_dino, W_dino = 224 // 14, 224 // 14
            dino_feats = dino_tokens.permute(0, 2, 1).reshape(N, -1, H_dino, W_dino)  # [N, 384, H, W]

            return dino_feats

    def _forward_baseline_mode(self, roi_batch, roi_positions=None):
        """
        A: Baselinecrop + resize

        : ROI [B, 3, 224, 224]
        """
        B, _, H_roi, W_roi = roi_batch.shape

        # ===== 1:  =====
        # OverLoCK
        overlock_feats = self.overlock_focus.forward_features(roi_batch)[-1]  # [B, 640, H, W]

        # DINOv2
        dino_tokens = self.dino_semantic.forward_features(roi_batch)['x_norm_patchtokens']
        # DINOv2 ViT-S/14 patch size is 14
        H_dino, W_dino = H_roi // 14, W_roi // 14
        dino_feats = dino_tokens.permute(0, 2, 1).reshape(B, -1, H_dino, W_dino)  # [B, 384, H, W]

        # ===== 2:  =====
        if self.use_dynamic_fusion and isinstance(self.fusion_module, DynamicAttentionFusion):
            # 
            fused = self.fusion_module(overlock_feats, dino_feats)  # [B, 512, H, W]
        else:
            # FallbackDINOv2
            dino_feats_resized = F.interpolate(dino_feats, size=overlock_feats.shape[2:],
                                              mode='bilinear', align_corners=False)
            combined = torch.cat([overlock_feats, dino_feats_resized], dim=1)
            fused = self.fusion_module(combined)  # [B, 512, H, W]

        # ===== 3:  =====
        pooled = F.adaptive_avg_pool2d(fused, (1, 1)).flatten(1)  # [B, 512]

        # ===== 4: ROI =====
        attention_weights = None
        if self.use_cross_roi_attention and roi_positions is not None:
            # ROI
            enhanced_features, attention_weights = self.cross_roi_attention(pooled, roi_positions)
        else:
            # ROI
            enhanced_features = pooled

        # ===== 5:  =====
        classification_logits = self.classifier(enhanced_features)  # [B, num_classes+1]
        bounding_box_deltas = self.regressor(enhanced_features)     # [B, num_classes*4]

        return classification_logits, bounding_box_deltas, attention_weights

    def _forward_roialign_mode(self, full_images, roi_boxes, roi_positions=None, image_size=None):
        """
        B: RoIAlign

        :  + ROI
        """
        # ===== 1: OverLoCK =====
        backbone_features = self.overlock_focus.forward_features(full_images)
        feature_map = backbone_features[-1]  # [B, 640, H/32, W/32]

        # ===== 2: RoIAlignROI =====
        # roi_boxes: [N, 5] = [batch_idx, x1, y1, x2, y2]
        overlock_roi_feats = self.roi_align(feature_map, roi_boxes)  # [N, 640, 7, 7]

        # ===== 3: DINOv2RoIAlign =====
        # DINOv2 ROI
        dino_feats = self._extract_dino_roi_features(full_images, roi_boxes, use_roi_align=True)
        # dino_feats: [N, 384, 16, 16]  [N, 384, 16, 16] (dino_roi_align)

        # ===== 4:  =====
        if self.use_dynamic_fusion and isinstance(self.fusion_module, DynamicAttentionFusion):
            fused = self.fusion_module(overlock_roi_feats, dino_feats)  # [N, 512, H, W]
        else:
            dino_feats_resized = F.interpolate(dino_feats, size=overlock_roi_feats.shape[2:],
                                              mode='bilinear', align_corners=False)
            combined = torch.cat([overlock_roi_feats, dino_feats_resized], dim=1)
            fused = self.fusion_module(combined)  # [N, 512, H, W]

        # ===== 5:  =====
        pooled = F.adaptive_avg_pool2d(fused, (1, 1)).flatten(1)  # [N, 512]

        # ===== 6: ROI =====
        attention_weights = None
        if self.use_cross_roi_attention and roi_positions is not None:
            enhanced_features, attention_weights = self.cross_roi_attention(pooled, roi_positions)
        else:
            enhanced_features = pooled

        # ===== 7:  =====
        classification_logits = self.classifier(enhanced_features)  # [N, num_classes+1]
        bounding_box_deltas = self.regressor(enhanced_features)     # [N, num_classes*4]

        return classification_logits, bounding_box_deltas, attention_weights

    def _forward_fpn_mode(self, full_images, roi_boxes, roi_positions=None, image_size=None):
        """
        C: FPN + RoIAlign

        :  + ROI
        """
        # ===== 1: Backbone =====
        backbone_features = self.overlock_focus.forward_features(full_images)
        # : [C2, C3, C4, C5]
        # OverLoCK4

        # ===== 2: FPN =====
        fpn_features = self.fpn(backbone_features)  # [P2, P3, P4, P5]  [P3, P4, P5]

        # ===== 3: RoIAlignROI =====
        # roi_boxes: [N, 5] = [batch_idx, x1, y1, x2, y2]
        if image_size is None:
            image_size = (full_images.shape[2], full_images.shape[3])

        overlock_roi_feats = self.roi_extractor(fpn_features, roi_boxes, image_size)  # [N, 256, 7, 7]

        # ===== 4: DINOv2RoIAlign =====
        # DINOv2 ROI
        dino_feats = self._extract_dino_roi_features(full_images, roi_boxes, use_roi_align=True)
        # dino_feats: [N, 384, 16, 16] (RoIAlign)  [N, 384, 16, 16] (crop+resize)

        # ===== 5:  =====
        if self.use_dynamic_fusion and isinstance(self.fusion_module, DynamicAttentionFusion):
            fused = self.fusion_module(overlock_roi_feats, dino_feats)  # [N, 512, H, W]
        else:
            dino_feats_resized = F.interpolate(dino_feats, size=overlock_roi_feats.shape[2:],
                                              mode='bilinear', align_corners=False)
            combined = torch.cat([overlock_roi_feats, dino_feats_resized], dim=1)
            fused = self.fusion_module(combined)  # [N, 512, H, W]

        # ===== 6:  =====
        pooled = F.adaptive_avg_pool2d(fused, (1, 1)).flatten(1)  # [N, 512]

        # ===== 7: ROI =====
        attention_weights = None
        if self.use_cross_roi_attention and roi_positions is not None:
            enhanced_features, attention_weights = self.cross_roi_attention(pooled, roi_positions)
        else:
            enhanced_features = pooled

        # ===== 8:  =====
        classification_logits = self.classifier(enhanced_features)  # [N, num_classes+1]
        bounding_box_deltas = self.regressor(enhanced_features)     # [N, num_classes*4]

        return classification_logits, bounding_box_deltas, attention_weights

    def get_attention_weights(self):
        """
        

        Returns:
            dict: 
        """
        weights = {}

        if self.use_dynamic_fusion and hasattr(self.fusion_module, 'overlock_channel_attn'):
            weights['fusion'] = {
                'channel_attention': True,
                'spatial_attention': True,
                'cross_attention': True
            }

        if self.use_cross_roi_attention:
            weights['cross_roi'] = {
                'enabled': True,
                'num_heads': self.config.get('cross_roi_num_heads', 8)
            }

        return weights


def test_improved_refiner():
    """Refiner"""
    print("Testing ImprovedROIRefinerModel...")

    # 
    B = 8
    roi_batch = torch.randn(B, 3, 224, 224)

    # ROI
    roi_positions = torch.rand(B, 4)  #  [cx, cy, w, h]

    # 
    model = ImprovedROIRefinerModel(
        device='cpu',
        unfreeze_layers=0  # 
    )

    # 
    print("\nTesting forward pass with cross-ROI attention...")
    cls_logits, bbox_deltas, attn_weights = model(roi_batch, roi_positions)

    print(f"\nOutput shapes:")
    print(f"  Classification logits: {cls_logits.shape}")
    print(f"  BBox deltas: {bbox_deltas.shape}")
    if attn_weights is not None:
        print(f"  Attention weights: {attn_weights.shape}")

    # ROI
    print("\nTesting forward pass without cross-ROI attention...")
    cls_logits2, bbox_deltas2, attn_weights2 = model(roi_batch, roi_positions=None)
    assert attn_weights2 is None, "Attention weights should be None when positions not provided"

    print("\n All tests passed!")


if __name__ == '__main__':
    test_improved_refiner()
