#!/usr/bin/env python3

"""
Modern visual encoder module for Neural-SLAM
Replaces traditional ResNet with modern architectures like EfficientNet
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights

class SpatialAttention(nn.Module):
    """Spatial attention mechanism for feature enhancement"""
    
    def __init__(self, in_channels, reduction=16):
        super().__init__()
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, bias=False)
        self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, bias=False)
        self.relu = nn.ReLU(inplace=True)
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        # Channel attention
        avg_out = self.conv2(self.relu(self.conv1(self.avg_pool(x))))
        max_out = self.conv2(self.relu(self.conv1(self.max_pool(x))))
        channel_att = self.sigmoid(avg_out + max_out)
        
        x = x * channel_att
        
        # Spatial attention
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial_att = self.sigmoid(torch.cat([avg_out, max_out], dim=1))
        spatial_att = nn.Conv2d(2, 1, 7, padding=3, bias=False).to(x.device)(spatial_att)
        spatial_att = self.sigmoid(spatial_att)
        
        return x * spatial_att

class ChannelAttention(nn.Module):
    """Channel attention mechanism"""
    
    def __init__(self, in_channels, reduction=16):
        super().__init__()
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        self.fc = nn.Sequential(
            nn.Conv2d(in_channels, in_channels // reduction, 1, bias=False),
            nn.ReLU(),
            nn.Conv2d(in_channels // reduction, in_channels, 1, bias=False)
        )
        
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        attention = self.sigmoid(avg_out + max_out)
        return x * attention

class EfficientNetEncoder(nn.Module):
    """EfficientNet-based visual encoder with attention mechanisms"""
    
    def __init__(self, output_dim=512, pretrained=True, use_attention=True):
        super().__init__()
        
        self.output_dim = output_dim
        self.use_attention = use_attention
        
        # Load pre-trained EfficientNet-B0
        if pretrained:
            self.backbone = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
        else:
            self.backbone = efficientnet_b0(weights=None)
        
        # Remove classifier
        self.backbone.classifier = nn.Identity()
        
        # Get feature dimensions (EfficientNet-B0: 1280)
        self.backbone_out_dim = 1280
        
        # Attention mechanisms
        if use_attention:
            self.channel_attention = ChannelAttention(self.backbone_out_dim)
            self.spatial_attention = SpatialAttention(self.backbone_out_dim)
        
        # Feature projection layers
        self.feature_projection = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(self.backbone_out_dim, output_dim * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
            nn.Linear(output_dim * 2, output_dim),
            nn.ReLU(inplace=True)
        )
        
        # Alternative: Convolutional projection for spatial features
        self.conv_projection = nn.Sequential(
            nn.Conv2d(self.backbone_out_dim, 512, 3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, 3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 64, 3, padding=1),
            nn.ReLU(inplace=True)
        )
        
    def forward(self, x, return_spatial_features=False):
        """
        Args:
            x: [batch_size, 3, height, width] RGB images
            return_spatial_features: If True, return spatial feature maps
        
        Returns:
            features: [batch_size, output_dim] or spatial feature maps
        """
        # Extract features using EfficientNet
        features = self.backbone.features(x)  # [B, 1280, H/32, W/32]
        
        # Apply attention mechanisms
        if self.use_attention:
            features = self.channel_attention(features)
            features = self.spatial_attention(features)
        
        if return_spatial_features:
            # Return convolutional features for spatial reasoning
            spatial_features = self.conv_projection(features)
            return spatial_features
        else:
            # Return global features
            global_features = self.feature_projection(features)
            return global_features

class ModernVisualEncoder(nn.Module):
    """
    Modern visual encoder that can replace ResNet in Neural-SLAM
    Supports both EfficientNet and ResNet backbones with attention
    """
    
    def __init__(self, output_dim=512, backbone='efficientnet', pretrained=True, 
                 use_attention=True, input_channels=3):
        super().__init__()
        
        self.output_dim = output_dim
        self.backbone_type = backbone
        self.use_attention = use_attention
        
        if backbone == 'efficientnet':
            self.encoder = EfficientNetEncoder(output_dim, pretrained, use_attention)
        elif backbone == 'resnet18':
            self.encoder = self._create_resnet_encoder(output_dim, pretrained, use_attention)
        elif backbone == 'resnet50':
            self.encoder = self._create_resnet50_encoder(output_dim, pretrained, use_attention)
        else:
            raise ValueError(f"Unsupported backbone: {backbone}")
    
    def _create_resnet_encoder(self, output_dim, pretrained, use_attention):
        """Create ResNet18-based encoder with modern improvements"""
        
        # Load ResNet18
        resnet = models.resnet18(pretrained=pretrained)
        
        # Remove fully connected layer
        backbone_features = nn.Sequential(*list(resnet.children())[:-2])  # Remove avgpool and fc
        backbone_out_dim = 512  # ResNet18 final feature dim
        
        # Create enhanced encoder
        layers = [backbone_features]
        
        if use_attention:
            layers.append(ChannelAttention(backbone_out_dim))
        
        # Feature projection
        layers.extend([
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(backbone_out_dim, output_dim),
            nn.ReLU(inplace=True)
        ])
        
        return nn.Sequential(*layers)
    
    def _create_resnet50_encoder(self, output_dim, pretrained, use_attention):
        """Create ResNet50-based encoder with modern improvements"""
        
        # Load ResNet50
        resnet = models.resnet50(pretrained=pretrained)
        
        # Remove fully connected layer
        backbone_features = nn.Sequential(*list(resnet.children())[:-2])
        backbone_out_dim = 2048  # ResNet50 final feature dim
        
        layers = [backbone_features]
        
        if use_attention:
            layers.append(ChannelAttention(backbone_out_dim))
        
        layers.extend([
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(backbone_out_dim, output_dim * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
            nn.Linear(output_dim * 2, output_dim),
            nn.ReLU(inplace=True)
        ])
        
        return nn.Sequential(*layers)
    
    def forward(self, x, return_spatial_features=False):
        """Forward pass through the visual encoder"""
        
        if self.backbone_type == 'efficientnet':
            return self.encoder(x, return_spatial_features)
        else:
            # ResNet encoders don't support spatial features yet
            return self.encoder(x)

class LegacyCompatibleVisualEncoder(nn.Module):
    """
    Visual encoder that maintains compatibility with original Neural-SLAM
    Can be used as a drop-in replacement for ResNet in model.py
    """
    
    def __init__(self, pretrained=True, use_modern_backbone=True):
        super().__init__()
        
        if use_modern_backbone:
            # Use EfficientNet as backbone
            self.backbone = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1 if pretrained else None)
            self.backbone.classifier = nn.Identity()
            backbone_out_dim = 1280
        else:
            # Use original ResNet18
            resnet = models.resnet18(pretrained=pretrained)
            self.backbone = nn.Sequential(*list(resnet.children())[:-2])
            backbone_out_dim = 512
        
        # Attention enhancement
        self.attention = ChannelAttention(backbone_out_dim)
        
        # Output projection to match original ResNet output
        self.conv_proj = nn.Sequential(
            nn.Conv2d(backbone_out_dim, 512, 1),
            nn.ReLU(inplace=True)
        )
        
    def forward(self, x):
        """
        Forward pass maintaining original ResNet interface
        Returns features compatible with original model.py
        """
        if hasattr(self.backbone, 'features'):
            # EfficientNet path
            features = self.backbone.features(x)
        else:
            # ResNet path
            features = self.backbone(x)
        
        # Apply attention
        features = self.attention(features)
        
        # Project to expected dimensions
        features = self.conv_proj(features)
        
        return features

# Factory functions for easy integration
def create_modern_visual_encoder(output_dim=512, backbone='efficientnet', **kwargs):
    """Factory function to create modern visual encoder"""
    return ModernVisualEncoder(output_dim=output_dim, backbone=backbone, **kwargs)

def create_legacy_compatible_encoder(pretrained=True, use_modern_backbone=True):
    """Factory function to create legacy-compatible encoder"""
    return LegacyCompatibleVisualEncoder(pretrained=pretrained, use_modern_backbone=use_modern_backbone)
