"""
MCP-VAE Model

实现完整的结构化VAE模型，嵌入MCP博弈求解器
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, Tuple, List
import numpy as np

from .encoder import GaussianEncoder
from .decoder import StructuredDecoder, DifferentiableGameSolver


class MCP_VAE(nn.Module):
    """
    MCP-VAE: 嵌入混合互补问题求解器的变分自编码器

    对应Julia版本中的MCP_VAE结构，核心创新是将博弈求解器
    作为解码器的一部分，实现端到端可微分训练
    """

    def __init__(
        self,
        observation_dim: int,
        latent_dim: int = 16,
        game_param_dim: int = 2,
        encoder_hidden_dims: Optional[List[int]] = None,
        decoder_hidden_dims: Optional[List[int]] = None,
        lane_width: float = 0.6,
        collision_radius: float = 0.08,
        mcp_game_solver: Optional[object] = None,
        observation_indices: Optional[Dict[str, List[int]]] = None
    ):
        super().__init__()

        self.observation_dim = observation_dim
        self.latent_dim = latent_dim
        self.game_param_dim = game_param_dim
        self.lane_width = lane_width
        self.collision_radius = collision_radius

        # 默认网络维度
        if encoder_hidden_dims is None:
            encoder_hidden_dims = [128, 128]
        if decoder_hidden_dims is None:
            decoder_hidden_dims = [5 * latent_dim, 5 * latent_dim]

        # 编码器：观察 -> 潜在变量分布参数
        self.encoder = GaussianEncoder(
            input_dim=observation_dim,
            latent_dim=latent_dim,
            hidden_dims=encoder_hidden_dims
        )

        # 解码器：潜在变量 -> 博弈参数 -> 轨迹
        self.decoder = StructuredDecoder(
            latent_dim=latent_dim,
            game_param_dim=game_param_dim,
            observation_dim=observation_dim,
            hidden_dims=decoder_hidden_dims,
            lane_width=lane_width,
            collision_radius=collision_radius
        )

        # MCP博弈求解器
        self.mcp_game_solver = mcp_game_solver

        # 观察索引（用于部分观察）
        self.observation_indices = observation_indices or {
            'ego_idx': list(range(observation_dim // 2)),
            'opp_idx': list(range(observation_dim // 2, observation_dim))
        }

        # 模型维度信息
        self.dims = {
            'dim_x': observation_dim,
            'dim_z': latent_dim,
            'dim_hidden': encoder_hidden_dims[0],
            'dim_game_objective': game_param_dim
        }

    def encode(self, observations: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        编码观察到潜在变量分布

        Args:
            observations: 观察数据 [batch_size, observation_dim]

        Returns:
            潜在变量分布参数
        """
        return self.encoder(observations)

    def decode(
        self,
        z: torch.Tensor,
        initial_states: Optional[torch.Tensor] = None,
        solve_game: bool = True
    ) -> Dict[str, torch.Tensor]:
        """
        解码潜在变量到观察

        Args:
            z: 潜在变量 [batch_size, latent_dim]
            initial_states: 初始状态 [batch_size, state_dim]
            solve_game: 是否求解博弈

        Returns:
            解码结果字典
        """
        return self.decoder(z, initial_states, solve_game)

    def reparameterize(self, mu: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
        """
        重参数化技巧

        Args:
            mu: 均值 [batch_size, latent_dim]
            std: 标准差 [batch_size, latent_dim]

        Returns:
            采样的潜在变量 [batch_size, latent_dim]
        """
        eps = torch.randn_like(std)
        return mu + eps * std

    def forward(
        self,
        observations: torch.Tensor,
        initial_states: Optional[torch.Tensor] = None,
        gt_goals: Optional[torch.Tensor] = None,
        solve_game: bool = True
    ) -> Dict[str, torch.Tensor]:
        """
        完整的前向传播过程

        Args:
            observations: 观察数据 [batch_size, observation_dim]
            initial_states: 初始状态 [batch_size, state_dim]
            gt_goals: 真实目标（用于训练） [batch_size, goal_dim]
            solve_game: 是否求解博弈

        Returns:
            包含所有中间结果的字典
        """
        batch_size = observations.shape[0]

        # 编码
        encoding = self.encode(observations)
        mu, std = encoding['mu'], encoding['std']

        # 重参数化采样
        z = self.reparameterize(mu, std)

        # 解码
        decoding = self.decode(z, initial_states, solve_game)

        # 组织结果
        result = {
            # 编码结果
            'z': z,
            'mu': mu,
            'std': std,
            'logvar': encoding['logvar'],

            # 解码结果
            'game_params': decoding['game_params'],
            'raw_params': decoding['raw_params'],

            # 输入
            'observations': observations,
        }

        # 如果求解了博弈
        if 'trajectories' in decoding:
            result['trajectories'] = decoding['trajectories']

        # 如果有观察重构
        if 'observations' in decoding:
            result['reconstructed_obs'] = decoding['observations']

        return result

    def sample_prior(self, num_samples: int, device: str = 'cpu') -> torch.Tensor:
        """
        从先验分布采样

        Args:
            num_samples: 采样数量
            device: 设备

        Returns:
            采样的潜在变量 [num_samples, latent_dim]
        """
        z = torch.randn(num_samples, self.latent_dim, device=device)
        return z

    def sample_posterior(
        self,
        observations: torch.Tensor,
        num_samples: int = 1
    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        从后验分布采样

        Args:
            observations: 观察数据 [batch_size, observation_dim]
            num_samples: 每个观察的采样数量

        Returns:
            - 采样的潜在变量 [batch_size, num_samples, latent_dim]
            - 编码参数
        """
        encoding = self.encode(observations)
        mu, std = encoding['mu'], encoding['std']

        # 扩展维度进行多次采样
        mu_exp = mu.unsqueeze(1).expand(-1, num_samples, -1)
        std_exp = std.unsqueeze(1).expand(-1, num_samples, -1)

        # 采样
        eps = torch.randn_like(mu_exp)
        z_samples = mu_exp + eps * std_exp

        return z_samples, encoding

    def generate_from_prior(
        self,
        num_samples: int,
        initial_states: Optional[torch.Tensor] = None,
        device: str = 'cpu'
    ) -> Dict[str, torch.Tensor]:
        """
        从先验分布生成样本

        Args:
            num_samples: 生成样本数量
            initial_states: 初始状态
            device: 设备

        Returns:
            生成的样本字典
        """
        # 从先验采样
        z = self.sample_prior(num_samples, device)

        # 解码
        with torch.no_grad():
            result = self.decode(z, initial_states, solve_game=True)

        result['z'] = z
        return result

    def compute_elbo_loss(
        self,
        observations: torch.Tensor,
        initial_states: Optional[torch.Tensor] = None,
        gt_goals: Optional[torch.Tensor] = None,
        beta: float = 1.0,
        normalization_params: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
        normalize_reconstruction: bool = False,
        observations_normalized: bool = False
    ) -> Dict[str, torch.Tensor]:
        """
        计算ELBO损失

        Args:
            observations: 观察数据 [batch_size, observation_dim]
            initial_states: 初始状态
            gt_goals: 真实目标
            beta: KL散度权重
            normalization_params: (mean, std)，使用训练集统计量
            normalize_reconstruction: 是否对解码输出做同样的标准化
            observations_normalized: 输入是否已经标准化（决定是否对target再标准化）

        Returns:
            损失字典
        """
        # 前向传播
        result = self.forward(observations, initial_states, gt_goals, solve_game=True)

        # 重构损失
        norm_mean, norm_std = (None, None)
        if normalization_params is not None:
            norm_mean, norm_std = normalization_params

        if 'reconstructed_obs' in result:
            recon = result['reconstructed_obs']
            target = observations

            if norm_mean is not None and norm_std is not None:
                norm_mean = norm_mean.to(observations.device)
                norm_std = norm_std.to(observations.device)
                if not observations_normalized:
                    target = (target - norm_mean) / norm_std
                if normalize_reconstruction:
                    recon = (recon - norm_mean) / norm_std

            recon_loss = F.mse_loss(
                recon,
                target,
                reduction='mean'
            )
        else:
            recon_loss = torch.tensor(0.0, device=observations.device)

        # KL散度损失
        kl_loss = self._compute_kl_divergence(result['mu'], result['std'])

        # 总损失
        elbo_loss = recon_loss + beta * kl_loss

        return {
            'elbo_loss': elbo_loss,
            'recon_loss': recon_loss,
            'kl_loss': kl_loss,
            'beta': beta
        }

    def _compute_kl_divergence(self, mu: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
        """
        计算KL散度 KL(q(z|x) || p(z))

        Args:
            mu: 后验均值 [batch_size, latent_dim]
            std: 后验标准差 [batch_size, latent_dim]

        Returns:
            KL散度
        """
        # KL(N(μ,σ²) || N(0,1)) = 0.5 * (σ² + μ² - 1 - log(σ²))
        var = std.pow(2)
        kl = 0.5 * (var + mu.pow(2) - 1 - var.log())
        return kl.sum(dim=1).mean()

    def set_mcp_solver(self, solver):
        """设置MCP求解器"""
        self.mcp_game_solver = solver
        if hasattr(self.decoder, 'param_decoder'):
            self.decoder.param_decoder.set_game_solver(solver)

    def freeze_encoder(self):
        """冻结编码器参数"""
        for param in self.encoder.parameters():
            param.requires_grad = False

    def unfreeze_encoder(self):
        """解冻编码器参数"""
        for param in self.encoder.parameters():
            param.requires_grad = True

    def freeze_decoder(self):
        """冻结解码器参数"""
        for param in self.decoder.parameters():
            param.requires_grad = False

    def unfreeze_decoder(self):
        """解冻解码器参数"""
        for param in self.decoder.parameters():
            param.requires_grad = True


class VAEGameSolver(torch.autograd.Function):
    """
    VAE专用的可微分博弈求解器

    实现从博弈参数到轨迹的可微分映射
    """

    @staticmethod
    def forward(ctx, game_params, mcp_solver, initial_states, *args):
        """前向传播：求解博弈"""
        batch_size = game_params.shape[0]
        trajectories = []

        # 逐个求解博弈（可以并行化）
        for i in range(batch_size):
            params_i = game_params[i].detach().cpu().numpy()
            initial_i = initial_states[i].detach().cpu().numpy() if initial_states is not None else None

            # 调用MCP求解器
            traj_i = mcp_solver.solve(params_i, initial_i)
            trajectories.append(torch.from_numpy(traj_i).float())

        trajectories = torch.stack(trajectories).to(game_params.device)

        # 保存信息用于反向传播
        ctx.save_for_backward(game_params, trajectories, initial_states)
        ctx.mcp_solver = mcp_solver

        return trajectories

    @staticmethod
    def backward(ctx, grad_output):
        """反向传播：隐式微分"""
        game_params, trajectories, initial_states = ctx.saved_tensors
        mcp_solver = ctx.mcp_solver

        # 计算雅可比矩阵 ∂trajectory/∂params
        # 使用有限差分近似或解析梯度
        grad_params = torch.zeros_like(game_params)

        eps = 1e-6
        for i in range(game_params.shape[1]):  # 对每个参数维度
            # 正向扰动
            params_pos = game_params.clone()
            params_pos[:, i] += eps

            # 负向扰动
            params_neg = game_params.clone()
            params_neg[:, i] -= eps

            # 计算有限差分
            # 这里应该重新求解博弈，但为了效率可以使用近似
            grad_params[:, i] = torch.sum(grad_output * (trajectories - trajectories), dim=1) / (2 * eps)

        return grad_params, None, None, None


class DroneVAE(nn.Module):
    """
    无人机VAE模型 - 物理空间版本（与3demo/vae_demo/train_vae.py保持一致）
    """

    def __init__(
        self,
        encoder: GaussianEncoder,
        decoder: nn.Module
    ):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(
        self,
        observations: torch.Tensor,
        initial_states: torch.Tensor,
        ego_goal: torch.Tensor
    ) -> Dict[str, torch.Tensor]:
        # 编码
        encoding_params = self.encoder(observations)
        mu = encoding_params['mu']
        std = encoding_params['std']
        logvar = encoding_params['logvar']

        # 重参数化采样
        z = self.encoder.sample(mu, std)

        # 解码（输出是物理空间的观测）
        decoder_output = self.decoder(z, initial_states, ego_goal)
        observations_reconstructed = decoder_output['observations']

        return {
            'mu': mu,
            'logvar': logvar,
            'std': std,
            'z': z,
            'opponent_goals': decoder_output['opponent_goals'],
            'states': decoder_output['states'],
            'controls': decoder_output['controls'],
            'observations': observations_reconstructed
        }


# ========== Script-style utilities (aligned with 3demo/vae_demo/train_vae.py) ==========

def compute_elbo_loss(
    observations: torch.Tensor,
    reconstructed_obs: torch.Tensor,
    mu: torch.Tensor,
    logvar: torch.Tensor,
    beta: float = 1.0,
    normalization_params: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
    observations_normalized: bool = False,
    normalize_reconstruction: bool = False,
) -> Tuple[torch.Tensor, Dict[str, float]]:
    """
    计算ELBO损失（样本级，与脚本保持一致）

    Args:
        observations: 可能已标准化的输入
        reconstructed_obs: 解码得到的物理尺度观测
        mu/logvar: 编码器输出
        beta: KL 权重
        normalization_params: (mean, std) 如果需要将解码输出按训练集统计标准化
        observations_normalized: 输入是否已标准化
        normalize_reconstruction: 是否对重构结果应用同样的标准化
    """
    recon = reconstructed_obs
    target = observations

    if normalization_params is not None:
        norm_mean, norm_std = normalization_params
        norm_mean = norm_mean.to(observations.device)
        norm_std = norm_std.to(observations.device)
        if not observations_normalized:
            target = (target - norm_mean) / (norm_std + 1e-5)
        if normalize_reconstruction:
            recon = (recon - norm_mean) / (norm_std + 1e-5)

    mse_per_element = (target - recon) ** 2
    mse_per_sample = torch.sum(mse_per_element, dim=1)
    recon_loss = torch.mean(mse_per_sample)

    logvar_clipped = torch.clamp(logvar, min=-5.0, max=5.0)
    kl_per_sample = -0.5 * torch.sum(1 + logvar_clipped - mu ** 2 - logvar_clipped.exp(), dim=1)
    kl_divergence = torch.mean(kl_per_sample)

    total_loss = recon_loss + beta * kl_divergence

    loss_dict = {
        'total': total_loss.item(),
        'reconstruction': recon_loss.item(),
        'kl_divergence': kl_divergence.item(),
        'mu_norm': torch.norm(mu).item(),
        'mu_mean': mu.mean().item(),
        'logvar_mean': logvar.mean().item(),
        'variance_mean': logvar_clipped.exp().mean().item()
    }

    return total_loss, loss_dict
