"""
VAE Encoder for Hi-C Semantic Encoding
Supports both standalone VAE training and integration with diffusion models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .nn import normalization, zero_module

class VAEEncoder(nn.Module):
    """
    VAE Encoder for Hi-C matrices
    Encodes Hi-C data to latent semantic space with proper uncertainty modeling
    """
    
    def __init__(
        self,
        in_channels=1,
        latent_dim=256,
        base_channels=64,
        channel_multipliers=[1, 2, 4, 8],
        num_res_blocks=2,
        dropout=0.1,
        use_attention=True,
        beta=1e-3,  # KL weight
    ):
        super().__init__()
        
        self.in_channels = in_channels
        self.latent_dim = latent_dim
        self.beta = beta
        
        # Encoder network
        self.encoder_blocks = nn.ModuleList()
        curr_channels = in_channels
        
        # Initial conv
        self.initial_conv = nn.Conv2d(
            curr_channels, base_channels, 3, padding=1
        )
        curr_channels = base_channels
        
        # Downsampling blocks
        for i, mult in enumerate(channel_multipliers):
            out_channels = base_channels * mult
            
            # Add residual blocks
            for _ in range(num_res_blocks):
                self.encoder_blocks.append(
                    VAEResBlock(
                        curr_channels, out_channels, dropout=dropout
                    )
                )
                curr_channels = out_channels
            
            # Add attention if specified
            if use_attention and i >= len(channel_multipliers) - 2:
                self.encoder_blocks.append(
                    VAEAttentionBlock(curr_channels)
                )
            
            # Downsample (except last layer)
            if i < len(channel_multipliers) - 1:
                self.encoder_blocks.append(
                    nn.Conv2d(curr_channels, out_channels, 3, stride=2, padding=1)
                )
        
        # Global pooling and latent projection
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.fc_mu = nn.Linear(curr_channels, latent_dim)
        self.fc_logvar = nn.Linear(curr_channels, latent_dim)
        
        # Initialize weights
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Conv2d):
            nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Linear):
            nn.init.normal_(module.weight, 0, 0.02)
            nn.init.zeros_(module.bias)
    
    def encode(self, x):
        """
        Encode input to latent parameters
        
        Args:
            x: input tensor [B, C, H, W]
        
        Returns:
            mu: mean of latent distribution [B, latent_dim]
            logvar: log variance of latent distribution [B, latent_dim]
        """
        # Initial conv
        h = F.relu(self.initial_conv(x))
        
        # Pass through encoder blocks
        for block in self.encoder_blocks:
            h = block(h)
        
        # Global pooling
        h = self.global_pool(h)  # [B, C, 1, 1]
        h = h.view(h.size(0), -1)  # [B, C]
        
        # Get latent parameters
        mu = self.fc_mu(h)
        logvar = self.fc_logvar(h)
        
        return mu, logvar
    
    def reparameterize(self, mu, logvar):
        """
        Reparameterization trick for VAE sampling
        
        Args:
            mu: mean [B, latent_dim]
            logvar: log variance [B, latent_dim]
        
        Returns:
            z: sampled latent code [B, latent_dim]
        """
        if self.training:
            std = torch.exp(0.5 * logvar)
            eps = torch.randn_like(std)
            return mu + eps * std
        else:
            return mu  # Use mean during inference
    
    def forward(self, x):
        """
        Full forward pass
        
        Args:
            x: input tensor [B, C, H, W]
        
        Returns:
            z: latent code [B, latent_dim]
            mu: mean [B, latent_dim]
            logvar: log variance [B, latent_dim]
        """
        mu, logvar = self.encode(x)
        z = self.reparameterize(mu, logvar)
        return z, mu, logvar
    
    def kl_loss(self, mu, logvar):
        """
        Compute KL divergence loss
        
        Args:
            mu: mean [B, latent_dim]
            logvar: log variance [B, latent_dim]
        
        Returns:
            kl_loss: KL divergence loss
        """
        kl = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
        return kl.mean()


class VAEDecoder(nn.Module):
    """
    VAE Decoder for Hi-C matrices
    Reconstructs Hi-C data from latent codes
    """
    
    def __init__(
        self,
        latent_dim=256,
        out_channels=1,
        base_channels=64,
        channel_multipliers=[8, 4, 2, 1],
        num_res_blocks=2,
        dropout=0.1,
        use_attention=True,
        output_size=256,
    ):
        super().__init__()
        
        self.latent_dim = latent_dim
        self.out_channels = out_channels
        self.output_size = output_size
        
        # Calculate initial spatial size
        num_downsample = len(channel_multipliers) - 1
        self.init_size = output_size // (2 ** num_downsample)
        
        # Project latent to initial feature map
        init_channels = base_channels * channel_multipliers[0]
        self.fc_decode = nn.Linear(
            latent_dim, init_channels * self.init_size * self.init_size
        )
        
        # Decoder blocks
        self.decoder_blocks = nn.ModuleList()
        curr_channels = init_channels
        
        for i, mult in enumerate(channel_multipliers):
            out_channels_block = base_channels * mult
            
            # Add residual blocks
            for _ in range(num_res_blocks):
                self.decoder_blocks.append(
                    VAEResBlock(
                        curr_channels, out_channels_block, dropout=dropout
                    )
                )
                curr_channels = out_channels_block
            
            # Add attention if specified
            if use_attention and i <= 1:
                self.decoder_blocks.append(
                    VAEAttentionBlock(curr_channels)
                )
            
            # Upsample (except last layer)
            if i < len(channel_multipliers) - 1:
                next_channels = base_channels * channel_multipliers[i + 1]
                self.decoder_blocks.append(
                    nn.ConvTranspose2d(
                        curr_channels, next_channels, 4, stride=2, padding=1
                    )
                )
                curr_channels = next_channels
        
        # Final output layer
        self.final_conv = nn.Sequential(
            normalization(curr_channels),
            nn.SiLU(),
            nn.Conv2d(curr_channels, out_channels, 3, padding=1),
        )
        
        # Initialize weights
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):
            nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Linear):
            nn.init.normal_(module.weight, 0, 0.02)
            nn.init.zeros_(module.bias)
    
    def forward(self, z):
        """
        Decode latent code to Hi-C matrix
        
        Args:
            z: latent code [B, latent_dim]
        
        Returns:
            x_recon: reconstructed Hi-C matrix [B, out_channels, H, W]
        """
        # Project to feature map
        h = self.fc_decode(z)
        h = h.view(h.size(0), -1, self.init_size, self.init_size)
        
        # Pass through decoder blocks
        for block in self.decoder_blocks:
            h = block(h)
        
        # Final output
        x_recon = self.final_conv(h)
        
        return x_recon


class VAEResBlock(nn.Module):
    """Residual block for VAE"""
    
    def __init__(self, in_channels, out_channels, dropout=0.1):
        super().__init__()
        
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
        self.norm1 = normalization(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
        self.norm2 = normalization(out_channels)
        self.dropout = nn.Dropout(dropout)
        
        # Skip connection
        if in_channels != out_channels:
            self.skip = nn.Conv2d(in_channels, out_channels, 1)
        else:
            self.skip = nn.Identity()
    
    def forward(self, x):
        residual = self.skip(x)
        
        h = F.silu(self.norm1(self.conv1(x)))
        h = self.dropout(h)
        h = self.norm2(self.conv2(h))
        
        return F.silu(h + residual)


class VAEAttentionBlock(nn.Module):
    """Attention block for VAE"""
    
    def __init__(self, channels):
        super().__init__()
        self.norm = normalization(channels)
        self.qkv = nn.Conv2d(channels, channels * 3, 1)
        self.proj_out = zero_module(nn.Conv2d(channels, channels, 1))
    
    def forward(self, x):
        B, C, H, W = x.shape
        h = self.norm(x)
        qkv = self.qkv(h).view(B, 3, C, H * W)
        q, k, v = qkv.unbind(1)  # [B, C, H*W]
        
        # Compute attention
        scale = 1.0 / (C ** 0.5)
        attn = torch.softmax(torch.bmm(q.transpose(1, 2), k) * scale, dim=-1)
        h = torch.bmm(v, attn.transpose(1, 2)).view(B, C, H, W)
        
        return x + self.proj_out(h)


class HiCVAE(nn.Module):
    """
    Complete VAE model for Hi-C matrices
    Combines encoder and decoder with training utilities
    """
    
    def __init__(
        self,
        in_channels=1,
        latent_dim=256,
        base_channels=64,
        channel_multipliers=[1, 2, 4, 8],
        num_res_blocks=2,
        dropout=0.1,
        use_attention=True,
        beta=1e-3,
        output_size=256,
        reconstruction_loss='mse',
    ):
        super().__init__()
        
        self.latent_dim = latent_dim
        self.beta = beta
        self.reconstruction_loss = reconstruction_loss
        
        self.encoder = VAEEncoder(
            in_channels=in_channels,
            latent_dim=latent_dim,
            base_channels=base_channels,
            channel_multipliers=channel_multipliers,
            num_res_blocks=num_res_blocks,
            dropout=dropout,
            use_attention=use_attention,
            beta=beta,
        )
        
        self.decoder = VAEDecoder(
            latent_dim=latent_dim,
            out_channels=in_channels,
            base_channels=base_channels,
            channel_multipliers=channel_multipliers[::-1],  # Reverse for decoder
            num_res_blocks=num_res_blocks,
            dropout=dropout,
            use_attention=use_attention,
            output_size=output_size,
        )
    
    def forward(self, x):
        """
        Forward pass through VAE
        
        Args:
            x: input Hi-C matrix [B, C, H, W]
        
        Returns:
            recon_x: reconstructed Hi-C matrix [B, C, H, W]
            mu: latent mean [B, latent_dim]
            logvar: latent log variance [B, latent_dim]
            z: latent code [B, latent_dim]
        """
        z, mu, logvar = self.encoder(x)
        recon_x = self.decoder(z)
        return recon_x, mu, logvar, z
    
    def encode_only(self, x):
        """Encode input to latent space (for inference)"""
        z, mu, logvar = self.encoder(x)
        return z
    
    def compute_loss(self, x, recon_x, mu, logvar):
        """
        Compute VAE loss (reconstruction + KL)
        
        Args:
            x: original input [B, C, H, W]
            recon_x: reconstructed input [B, C, H, W]
            mu: latent mean [B, latent_dim]
            logvar: latent log variance [B, latent_dim]
        
        Returns:
            loss_dict: dictionary containing individual losses
        """
        # Reconstruction loss
        if self.reconstruction_loss == 'mse':
            recon_loss = F.mse_loss(recon_x, x, reduction='mean')
        elif self.reconstruction_loss == 'bce':
            recon_loss = F.binary_cross_entropy_with_logits(
                recon_x, x, reduction='mean'
            )
        else:
            raise ValueError(f"Unknown reconstruction loss: {self.reconstruction_loss}")
        
        # KL divergence loss
        kl_loss = self.encoder.kl_loss(mu, logvar)
        
        # Total loss
        total_loss = recon_loss + self.beta * kl_loss
        
        return {
            'total_loss': total_loss,
            'reconstruction_loss': recon_loss,
            'kl_loss': kl_loss,
            'beta': self.beta,
        } 