"""
Encoder Networks for VAE

实现VAE的编码器网络，将观察映射到潜在变量的分布参数
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Dict, Any


class Encoder(nn.Module):
    """
    基础编码器类
    """

    def __init__(self, input_dim: int, latent_dim: int, hidden_dims: list = None):
        super().__init__()
        self.input_dim = input_dim
        self.latent_dim = latent_dim

        if hidden_dims is None:
            hidden_dims = [128, 128]

        # 构建编码器网络
        layers = []
        in_dim = input_dim

        for h_dim in hidden_dims:
            layers.extend([
                nn.Linear(in_dim, h_dim),
                nn.ReLU()
            ])
            in_dim = h_dim

        self.encoder = nn.Sequential(*layers)
        self.output_dim = in_dim

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x: 输入观察 [batch_size, input_dim]

        Returns:
            编码后的特征 [batch_size, output_dim]
        """
        return self.encoder(x)


class GaussianEncoder(Encoder):
    """
    高斯编码器，输出潜在变量的均值和方差参数

    对应Julia版本中的encoder网络，将观察映射到
    高斯分布的参数（均值和对数方差）
    """

    def __init__(self, input_dim: int, latent_dim: int, hidden_dims: list = None):
        super().__init__(input_dim, latent_dim, hidden_dims)

        # 输出层：均值和对数方差
        self.mu_layer = nn.Linear(self.output_dim, latent_dim)
        self.logvar_layer = nn.Linear(self.output_dim, latent_dim)

        # 初始化
        self._init_weights()

    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        前向传播

        Args:
            x: 输入观察 [batch_size, input_dim]

        Returns:
            字典包含:
            - mu: 潜在变量均值 [batch_size, latent_dim]
            - logvar: 潜在变量对数方差 [batch_size, latent_dim]
            - std: 潜在变量标准差 [batch_size, latent_dim]
        """
        # 编码
        h = super().forward(x)

        # 计算均值和对数方差
        mu = self.mu_layer(h)
        logvar = self.logvar_layer(h)
        
        # 🔧 关键修复：限制logvar范围，防止方差爆炸
        # exp(-5) ≈ 0.0067（最小方差）
        # exp(5) ≈ 148（最大方差）
        # 这比[-10, 10]更合理，因为我们希望方差接近1
        logvar = torch.clamp(logvar, min=-5.0, max=5.0)

        # 计算标准差（对应Julia版本中的Σ_diag）
        std = torch.exp(0.5 * logvar)

        return {
            'mu': mu,
            'logvar': logvar,
            'std': std
        }

    def sample(self, mu: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
        """
        重参数化技巧采样

        Args:
            mu: 均值 [batch_size, latent_dim]
            std: 标准差 [batch_size, latent_dim]

        Returns:
            采样的潜在变量 [batch_size, latent_dim]
        """
        eps = torch.randn_like(std)
        return mu + eps * std

    def encode_and_sample(self, x: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        编码并采样

        Args:
            x: 输入观察 [batch_size, input_dim]

        Returns:
            - z: 采样的潜在变量 [batch_size, latent_dim]
            - encoding_params: 编码参数字典
        """
        encoding_params = self.forward(x)
        z = self.sample(encoding_params['mu'], encoding_params['std'])
        return z, encoding_params


class MultivariateGaussianEncoder(GaussianEncoder):
    """
    多变量高斯编码器，支持协方差矩阵
    """

    def __init__(self, input_dim: int, latent_dim: int, hidden_dims: list = None,
                 use_full_covariance: bool = False):
        super().__init__(input_dim, latent_dim, hidden_dims)

        self.use_full_covariance = use_full_covariance

        if use_full_covariance:
            # 输出下三角矩阵的元素
            self.cov_dim = latent_dim * (latent_dim + 1) // 2
            self.cov_layer = nn.Linear(self.output_dim, self.cov_dim)
        else:
            # 只使用对角协方差矩阵
            self.cov_layer = self.logvar_layer

    def _tril_to_matrix(self, tril_vec: torch.Tensor) -> torch.Tensor:
        """
        将下三角向量转换为协方差矩阵

        Args:
            tril_vec: 下三角向量 [batch_size, cov_dim]

        Returns:
            协方差矩阵 [batch_size, latent_dim, latent_dim]
        """
        batch_size = tril_vec.shape[0]
        L = torch.zeros(batch_size, self.latent_dim, self.latent_dim, device=tril_vec.device)

        # 填充下三角矩阵
        idx = 0
        for i in range(self.latent_dim):
            for j in range(i + 1):
                if i == j:
                    # 对角元素使用softplus确保正值
                    L[:, i, j] = F.softplus(tril_vec[:, idx]) + 1e-6
                else:
                    L[:, i, j] = tril_vec[:, idx]
                idx += 1

        # 计算协方差矩阵 Σ = LL^T
        cov = torch.bmm(L, L.transpose(-2, -1))
        return cov

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        前向传播

        Args:
            x: 输入观察 [batch_size, input_dim]

        Returns:
            字典包含均值和协方差参数
        """
        h = self.encoder(x)
        mu = self.mu_layer(h)

        if self.use_full_covariance:
            cov_params = self.cov_layer(h)
            cov = self._tril_to_matrix(cov_params)

            return {
                'mu': mu,
                'cov': cov,
                'cov_params': cov_params
            }
        else:
            # 对角协方差矩阵
            logvar = self.logvar_layer(h)
            std = torch.exp(0.5 * logvar)

            return {
                'mu': mu,
                'logvar': logvar,
                'std': std
            }