import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.model_utils import load_dinov2_model
from models_ext.overlock_local import overlock_t
from src.config import MODEL_CONFIG, DATASET_DIR
import yaml

with open(DATASET_DIR + '/data.yaml', 'r') as f:
    num_classes = yaml.safe_load(f)['nc']


class ROIRefinerModel(nn.Module):

    def __init__(self, device='cpu', unfreeze_layers=2):
        """
        ROI Refiner Model with configurable feature extractor freezing.

        Args:
            device: Device to run model on
            unfreeze_layers: Number of layers to unfreeze from the end
                           0 = fully frozen
                           -1 = fully unfrozen
                           n > 0 = unfreeze last n layers
        """
        super().__init__()
        self.device = device
        self.num_classes = num_classes
        self.unfreeze_layers = unfreeze_layers

        # Load feature extractors
        self.overlock_focus = overlock_t().to(device)
        self.dino_semantic = load_dinov2_model(device=device)

        # Freezing strategy
        if unfreeze_layers == -1:
            # Fully unfrozen
            pass
        elif unfreeze_layers == 0:
            # Fully frozen (original strategy)
            for param in self.overlock_focus.parameters():
                param.requires_grad = False
            for param in self.dino_semantic.parameters():
                param.requires_grad = False
        else:
            # Partial unfreezing: freeze early layers, unfreeze last n layers
            # Overlock: freeze all except last unfreeze_layers stages
            for param in self.overlock_focus.parameters():
                param.requires_grad = False

            # Unfreeze last stages of Overlock
            if hasattr(self.overlock_focus, 'stages'):
                total_stages = len(self.overlock_focus.stages)
                for i in range(max(0, total_stages - unfreeze_layers), total_stages):
                    for param in self.overlock_focus.stages[i].parameters():
                        param.requires_grad = True

            # DiNO: freeze most layers, unfreeze only last blocks
            for param in self.dino_semantic.parameters():
                param.requires_grad = False

            if hasattr(self.dino_semantic, 'blocks'):
                total_blocks = len(self.dino_semantic.blocks)
                for i in range(max(0, total_blocks - unfreeze_layers), total_blocks):
                    for param in self.dino_semantic.blocks[i].parameters():
                        param.requires_grad = True

        # Trainable fusion module and classification/regression heads
        fusion_in_channels = MODEL_CONFIG['overlock_out_channels'] + MODEL_CONFIG['dino_out_channels']
        self.fusion_module = nn.Sequential(
            nn.Conv2d(fusion_in_channels, MODEL_CONFIG['fusion_out_channels'], 1),
            nn.ReLU(),
            nn.BatchNorm2d(MODEL_CONFIG['fusion_out_channels'])
        ).to(device)

        # Classification and bounding box regression heads
        # Output dimensions:
        # - classifier: num_classes + 1 (0 for background, 1-6 for defect classes)
        # - regressor: num_classes * 4 (class-specific bbox deltas for defects only)
        self.classifier = nn.Linear(MODEL_CONFIG['fusion_out_channels'],
                                    self.num_classes + 1)
        self.regressor = nn.Linear(MODEL_CONFIG['fusion_out_channels'],
                                   self.num_classes * 4)

        self.classifier.to(device)
        self.regressor.to(device)

    def forward(self, roi_batch):
        overlock_feats = self.overlock_focus.forward_features(roi_batch)[-1]

        dino_tokens = self.dino_semantic.forward_features(roi_batch)['x_norm_patchtokens']

        B, _, H_roi, W_roi = roi_batch.shape
        # DINOv2 ViT-S/14 patch size is 14
        H_dino, W_dino = H_roi // 14, W_roi // 14
        dino_feats = dino_tokens.permute(0, 2, 1).reshape(B, -1, H_dino, W_dino)

        dino_feats_resized = F.interpolate(dino_feats, size=overlock_feats.shape[2:], mode='bilinear',
                                           align_corners=False)
        combined = torch.cat([overlock_feats, dino_feats_resized], dim=1)
        fused = self.fusion_module(combined)

        # Global average pooling
        pooled = F.adaptive_avg_pool2d(fused, (1, 1)).flatten(1)
        classification_logits = self.classifier(pooled)
        bounding_box_deltas = self.regressor(pooled)

        return classification_logits, bounding_box_deltas