# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class UnderwaterEnhancement(nn.Module):
    """
    Underwater Image Enhancement Network for YOLOv8
    
    This module implements an end-to-end underwater image enhancement network
    that can be integrated with YOLOv8 for underwater object detection.
    
    The network consists of:
    1. Color correction module
    2. Contrast enhancement module  
    3. Dehazing module
    4. Feature fusion module
    """
    
    def __init__(self, in_channels=3, out_channels=3, hidden_channels=64):
        super(UnderwaterEnhancement, self).__init__()
        
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.hidden_channels = hidden_channels
        
        # Color correction branch
        self.color_conv1 = nn.Conv2d(in_channels, hidden_channels, 3, padding=1)
        self.color_conv2 = nn.Conv2d(hidden_channels, hidden_channels, 3, padding=1)
        self.color_conv3 = nn.Conv2d(hidden_channels, 3, 3, padding=1)
        
        # Contrast enhancement branch
        self.contrast_conv1 = nn.Conv2d(in_channels, hidden_channels, 3, padding=1)
        self.contrast_conv2 = nn.Conv2d(hidden_channels, hidden_channels, 3, padding=1)
        self.contrast_conv3 = nn.Conv2d(hidden_channels, 1, 3, padding=1)
        
        # Dehazing branch
        self.dehaze_conv1 = nn.Conv2d(in_channels, hidden_channels, 3, padding=1)
        self.dehaze_conv2 = nn.Conv2d(hidden_channels, hidden_channels, 3, padding=1)
        self.dehaze_conv3 = nn.Conv2d(hidden_channels, 1, 3, padding=1)
        
        # Feature fusion
        self.fusion_conv1 = nn.Conv2d(in_channels + 5, hidden_channels, 3, padding=1)
        self.fusion_conv2 = nn.Conv2d(hidden_channels, hidden_channels, 3, padding=1)
        self.fusion_conv3 = nn.Conv2d(hidden_channels, out_channels, 3, padding=1)
        
        # Attention mechanism
        self.attention = ChannelAttention(hidden_channels)
        
        # Activation functions
        self.relu = nn.ReLU(inplace=True)
        self.sigmoid = nn.Sigmoid()
        self.tanh = nn.Tanh()
        
    def forward(self, x):
        """
        Forward pass of the underwater enhancement network
        
        Args:
            x (torch.Tensor): Input underwater image tensor [B, C, H, W]
            
        Returns:
            enhanced (torch.Tensor): Enhanced image tensor [B, C, H, W]
        """
        # Color correction branch
        color_feat = self.relu(self.color_conv1(x))
        color_feat = self.relu(self.color_conv2(color_feat))
        color_correction = self.tanh(self.color_conv3(color_feat))
        
        # Contrast enhancement branch
        contrast_feat = self.relu(self.contrast_conv1(x))
        contrast_feat = self.relu(self.contrast_conv2(contrast_feat))
        contrast_map = self.sigmoid(self.contrast_conv3(contrast_feat))
        
        # Dehazing branch
        dehaze_feat = self.relu(self.dehaze_conv1(x))
        dehaze_feat = self.relu(self.dehaze_conv2(dehaze_feat))
        haze_map = self.sigmoid(self.dehaze_conv3(dehaze_feat))
        
        # Apply corrections
        color_corrected = x + color_correction
        contrast_enhanced = x * contrast_map
        dehazed = x * (1 - haze_map) + haze_map * 0.1  # Simple dehazing
        
        # Feature fusion
        fusion_input = torch.cat([x, color_correction, contrast_map, haze_map, 
                                 color_corrected, contrast_enhanced], dim=1)
        
        fusion_feat = self.relu(self.fusion_conv1(fusion_input))
        fusion_feat = self.attention(fusion_feat)
        fusion_feat = self.relu(self.fusion_conv2(fusion_feat))
        enhanced = self.sigmoid(self.fusion_conv3(fusion_feat))
        
        return enhanced


class ChannelAttention(nn.Module):
    """Channel Attention Module"""
    
    def __init__(self, channels, reduction=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        self.fc = nn.Sequential(
            nn.Conv2d(channels, channels // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(channels // reduction, channels, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        out = avg_out + max_out
        return x * self.sigmoid(out)


class UnderwaterEnhancementLoss(nn.Module):
    """
    Loss function for underwater image enhancement
    
    Combines multiple loss terms:
    1. L1 loss for pixel-wise reconstruction
    2. Perceptual loss for feature-level similarity
    3. Color consistency loss
    4. Gradient loss for edge preservation
    """
    
    def __init__(self, alpha=1.0, beta=0.1, gamma=0.1, delta=0.1):
        super(UnderwaterEnhancementLoss, self).__init__()
        self.alpha = alpha
        self.beta = beta
        self.gamma = gamma
        self.delta = delta
        
        self.l1_loss = nn.L1Loss()
        self.mse_loss = nn.MSELoss()
        
    def forward(self, enhanced, target=None):
        """
        Compute enhancement loss
        
        Args:
            enhanced (torch.Tensor): Enhanced image
            target (torch.Tensor, optional): Target image for supervised learning
            
        Returns:
            loss (torch.Tensor): Total loss
        """
        if target is not None:
            # Supervised learning with target image
            l1_loss = self.l1_loss(enhanced, target)
            mse_loss = self.mse_loss(enhanced, target)
            
            # Color consistency loss
            color_loss = self._color_consistency_loss(enhanced, target)
            
            # Gradient loss for edge preservation
            gradient_loss = self._gradient_loss(enhanced, target)
            
            total_loss = (self.alpha * l1_loss + 
                         self.beta * mse_loss + 
                         self.gamma * color_loss + 
                         self.delta * gradient_loss)
        else:
            # Unsupervised learning with enhancement quality metrics
            # Brightness and contrast enhancement
            brightness_loss = self._brightness_loss(enhanced)
            contrast_loss = self._contrast_loss(enhanced)
            
            # Color saturation enhancement
            saturation_loss = self._saturation_loss(enhanced)
            
            # Edge sharpness enhancement
            sharpness_loss = self._sharpness_loss(enhanced)
            
            total_loss = (self.alpha * brightness_loss + 
                         self.beta * contrast_loss + 
                         self.gamma * saturation_loss + 
                         self.delta * sharpness_loss)
        
        return total_loss
    
    def _color_consistency_loss(self, enhanced, target):
        """Color consistency loss between enhanced and target images"""
        enhanced_gray = 0.299 * enhanced[:, 0:1] + 0.587 * enhanced[:, 1:2] + 0.114 * enhanced[:, 2:3]
        target_gray = 0.299 * target[:, 0:1] + 0.587 * target[:, 1:2] + 0.114 * target[:, 2:3]
        return self.l1_loss(enhanced_gray, target_gray)
    
    def _gradient_loss(self, enhanced, target):
        """Gradient loss for edge preservation"""
        enhanced_grad_x = torch.abs(enhanced[:, :, :, 1:] - enhanced[:, :, :, :-1])
        target_grad_x = torch.abs(target[:, :, :, 1:] - target[:, :, :, :-1])
        enhanced_grad_y = torch.abs(enhanced[:, :, 1:, :] - enhanced[:, :, :-1, :])
        target_grad_y = torch.abs(target[:, :, 1:, :] - target[:, :, :-1, :])
        
        grad_loss_x = self.l1_loss(enhanced_grad_x, target_grad_x)
        grad_loss_y = self.l1_loss(enhanced_grad_y, target_grad_y)
        
        return grad_loss_x + grad_loss_y
    
    def _brightness_loss(self, enhanced):
        """Encourage appropriate brightness levels"""
        mean_brightness = torch.mean(enhanced)
        target_brightness = 0.5  # Target mean brightness
        return torch.abs(mean_brightness - target_brightness)
    
    def _contrast_loss(self, enhanced):
        """Encourage high contrast"""
        std_brightness = torch.std(enhanced)
        return -std_brightness  # Negative to maximize contrast
    
    def _saturation_loss(self, enhanced):
        """Encourage color saturation"""
        # Convert to HSV and compute saturation
        hsv = self._rgb_to_hsv(enhanced)
        saturation = hsv[:, 1:2, :, :]
        return -torch.mean(saturation)  # Negative to maximize saturation
    
    def _sharpness_loss(self, enhanced):
        """Encourage edge sharpness"""
        # Laplacian filter for edge detection
        laplacian_kernel = torch.tensor([[0, 1, 0], [1, -4, 1], [0, 1, 0]], 
                                       dtype=torch.float32, device=enhanced.device)
        laplacian_kernel = laplacian_kernel.view(1, 1, 3, 3).repeat(3, 1, 1, 1)
        
        gray = 0.299 * enhanced[:, 0:1] + 0.587 * enhanced[:, 1:2] + 0.114 * enhanced[:, 2:3]
        edges = F.conv2d(gray, laplacian_kernel, padding=1, groups=1)
        return -torch.mean(torch.abs(edges))  # Negative to maximize sharpness
    
    def _rgb_to_hsv(self, rgb):
        """Convert RGB to HSV"""
        r, g, b = rgb[:, 0:1], rgb[:, 1:2], rgb[:, 2:3]
        
        max_val = torch.max(torch.max(r, g), b)
        min_val = torch.min(torch.min(r, g), b)
        diff = max_val - min_val
        
        # Value
        v = max_val
        
        # Saturation
        s = torch.where(max_val != 0, diff / max_val, torch.zeros_like(max_val))
        
        # Hue
        h = torch.zeros_like(r)
        h = torch.where(max_val == r, (g - b) / diff, h)
        h = torch.where(max_val == g, 2 + (b - r) / diff, h)
        h = torch.where(max_val == b, 4 + (r - g) / diff, h)
        h = (h / 6) % 1
        
        return torch.cat([h, s, v], dim=1)


class UnderwaterEnhancementModel(nn.Module):
    """
    Complete underwater enhancement model with enhancement network and loss
    """
    
    def __init__(self, in_channels=3, out_channels=3, hidden_channels=64):
        super(UnderwaterEnhancementModel, self).__init__()
        
        self.enhancement_net = UnderwaterEnhancement(in_channels, out_channels, hidden_channels)
        self.loss_fn = UnderwaterEnhancementLoss()
        
    def forward(self, x, target=None, return_loss=False):
        """
        Forward pass of the enhancement model
        
        Args:
            x (torch.Tensor): Input underwater image
            target (torch.Tensor, optional): Target image for supervised learning
            return_loss (bool): Whether to return loss
            
        Returns:
            enhanced (torch.Tensor): Enhanced image
            loss (torch.Tensor, optional): Enhancement loss
        """
        enhanced = self.enhancement_net(x)
        
        if return_loss:
            loss = self.loss_fn(enhanced, target)
            return enhanced, loss
        
        return enhanced
