"""
Decoder Networks for VAE

实现VAE的解码器网络，包括嵌入博弈求解器的结构化解码器
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, Callable, List

from src.mcp.mcp_solver import solve_mcp_game_differentiable


class Decoder(nn.Module):
    """
    基础解码器类
    """

    def __init__(self, latent_dim: int, output_dim: int, hidden_dims: list = None):
        super().__init__()
        self.latent_dim = latent_dim
        self.output_dim = output_dim

        if hidden_dims is None:
            hidden_dims = [128, 128]

        # 构建解码器网络
        layers = []
        in_dim = latent_dim

        for h_dim in hidden_dims:
            layers.extend([
                nn.Linear(in_dim, h_dim),
                nn.ReLU()
            ])
            in_dim = h_dim

        # 输出层
        layers.append(nn.Linear(in_dim, output_dim))

        self.decoder = nn.Sequential(*layers)
        self._init_weights()

    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self, z: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            z: 潜在变量 [batch_size, latent_dim]

        Returns:
            解码输出 [batch_size, output_dim]
        """
        return self.decoder(z)


class NormalizationLayer(nn.Module):
    """
    标准化层，对应Julia版本中的MyNormLayer
    将VAE输出标准化到合理范围（针对博弈参数）
    """

    def __init__(self, lane_width: float = 0.6, collision_radius: float = 0.08):
        super().__init__()
        self.lane_width = lane_width
        self.collision_radius = collision_radius

        # 计算标准化参数
        self.x_range = (lane_width / 2) * 2 - (-(lane_width / 2 - collision_radius))
        self.x_center = ((lane_width / 2) * 2 - self.x_range / 2)

        self.y_range = -collision_radius - (-(lane_width / 2) * 2)
        self.y_center = (-collision_radius - self.y_range / 2)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        标准化坐标到合理范围

        Args:
            x: 输入张量 [batch_size, 2] (假设是2D坐标)

        Returns:
            标准化后的坐标 [batch_size, 2]
        """
        # 应用tanh激活
        x_norm = torch.tanh(x)

        # 分别处理x和y坐标
        x_coords = x_norm[:, 0:1] * (self.x_range / 2) + self.x_center
        y_coords = x_norm[:, 1:2] * (self.y_range / 2) + self.y_center

        return torch.cat([x_coords, y_coords], dim=1)


class GameDecoder(nn.Module):
    """
    博弈解码器，结合神经网络和博弈求解器

    对应Julia版本中的decoder + game solver结构
    """

    def __init__(
        self,
        latent_dim: int,
        game_param_dim: int,
        hidden_dims: list = None,
        lane_width: float = 0.6,
        collision_radius: float = 0.08,
        game_solver: Optional[Callable] = None
    ):
        super().__init__()

        self.latent_dim = latent_dim
        self.game_param_dim = game_param_dim
        self.game_solver = game_solver

        # 神经网络部分
        if hidden_dims is None:
            hidden_dims = [5 * latent_dim, 5 * latent_dim]

        layers = []
        in_dim = latent_dim

        for h_dim in hidden_dims:
            layers.extend([
                nn.Linear(in_dim, h_dim),
                nn.ReLU()
            ])
            in_dim = h_dim

        # 输出到博弈参数
        layers.append(nn.Linear(in_dim, game_param_dim))

        self.param_net = nn.Sequential(*layers)

        # 标准化层
        self.norm_layer = NormalizationLayer(lane_width, collision_radius)

        self._init_weights()

    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self, z: torch.Tensor, solve_game: bool = True) -> Dict[str, torch.Tensor]:
        """
        前向传播

        Args:
            z: 潜在变量 [batch_size, latent_dim]
            solve_game: 是否求解博弈（训练时为True，推理时可为False）

        Returns:
            包含博弈参数和可能的轨迹的字典
        """
        # 神经网络输出博弈参数
        raw_params = self.param_net(z)

        # 标准化参数
        game_params = self.norm_layer(raw_params)

        result = {
            'game_params': game_params,
            'raw_params': raw_params
        }

        # 如果需要求解博弈且求解器可用
        if solve_game and self.game_solver is not None:
            # 这里会调用可微分的博弈求解器
            # 在训练时需要能够反向传播梯度
            trajectories = self._solve_game_differentiable(game_params)
            result['trajectories'] = trajectories

        return result

    def _solve_game_differentiable(self, game_params: torch.Tensor) -> torch.Tensor:
        """
        可微分博弈求解

        Args:
            game_params: 博弈参数 [batch_size, game_param_dim]

        Returns:
            轨迹 [batch_size, trajectory_dim]
        """
        if self.game_solver is None:
            raise ValueError("Game solver not provided")

        # 这里需要实现可微分的博弈求解
        # 可以使用自定义的autograd函数来包装求解器
        return self.game_solver(game_params)

    def set_game_solver(self, solver: Callable):
        """设置博弈求解器"""
        self.game_solver = solver


class DifferentiableGameSolver(torch.autograd.Function):
    """
    可微分博弈求解器的autograd函数

    使用隐式微分来计算梯度，对应论文中的核心创新
    """

    @staticmethod
    def forward(ctx, game_params, solver_func, *solver_args):
        """
        前向传播：求解博弈

        Args:
            ctx: autograd上下文
            game_params: 博弈参数
            solver_func: 求解器函数
            solver_args: 求解器额外参数

        Returns:
            博弈解（轨迹）
        """
        # 求解博弈
        with torch.no_grad():
            solution = solver_func(game_params, *solver_args)

        # 保存信息用于反向传播
        ctx.save_for_backward(game_params, solution)
        ctx.solver_func = solver_func
        ctx.solver_args = solver_args

        return solution

    @staticmethod
    def backward(ctx, grad_output):
        """
        反向传播：计算梯度

        使用隐式微分定理计算 dL/dθ = -[d²F/dx²]⁻¹ [d²F/dxdθ]ᵀ dL/dx

        Args:
            ctx: autograd上下文
            grad_output: 输出梯度

        Returns:
            输入梯度
        """
        game_params, solution = ctx.saved_tensors

        # 计算雅可比矩阵和海塞矩阵
        # 这里需要实现隐式微分的具体计算
        # 对应论文中的梯度计算方法

        # 简化版本：使用数值微分（实际应用中应该用解析梯度）
        grad_params = torch.zeros_like(game_params)

        # 这里应该实现完整的隐式微分
        # 当前返回零梯度作为占位符
        return grad_params, None, None


# ===== Drone-specific decoder used in train_vae.py =====


class MyNormLayer(torch.nn.Module):
    """
    归一化层：将decoder原始输出映射到物理空间范围（无人机场景）
    """

    def __init__(self, lw: float = 0.6, collision_radius: float = 0.08):
        super().__init__()

        self.x_center = -3.5
        self.x_radius = 4.0  # [-7.5, 0.5]
        self.y_center = -3.5
        self.y_radius = 4.0  # [-7.5, 0.5]
        self.z_center = 25.0
        self.z_radius = 0.5  # [24.5, 25.5]

        min_bounds = [
            self.x_center - self.x_radius,
            self.y_center - self.y_radius,
            self.z_center - self.z_radius,
        ]
        max_bounds = [
            self.x_center + self.x_radius,
            self.y_center + self.y_radius,
            self.z_center + self.z_radius,
        ]
        self.register_buffer('min_bounds', torch.tensor(min_bounds))
        self.register_buffer('max_bounds', torch.tensor(max_bounds))
        self.register_buffer('clamp_min_bounds', torch.tensor([-7.5, -7.5, 24.5]))
        self.register_buffer('clamp_max_bounds', torch.tensor([0.5, 0.5, 25.5]))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_norm = torch.tanh(x)
        result = torch.zeros_like(x_norm)
        result[:, 0] = x_norm[:, 0] * self.x_radius + self.x_center
        result[:, 1] = x_norm[:, 1] * self.y_radius + self.y_center
        result[:, 2] = x_norm[:, 2] * self.z_radius + self.z_center
        return result


class DroneMCPDecoder(Decoder):
    """
    无人机MCP解码器（Julia风格）
    """

    def __init__(
        self,
        latent_dim: int,
        hidden_dims: List[int],
        output_dim: int,
        mcp_solver,
        observation_dim: int,
        output_bounds: tuple = None
    ):
        super().__init__(latent_dim, output_dim, hidden_dims)
        self.mcp_solver = mcp_solver
        self.observation_dim = observation_dim
        self.output_bounds = output_bounds

        self.norm_layer = MyNormLayer(lw=0.6, collision_radius=0.08)

        with torch.no_grad():
            last_layer = self.decoder[-1]
            if isinstance(last_layer, torch.nn.Linear):
                last_layer.bias[2] = 1.0

    def decode_to_game_params(self, z: torch.Tensor) -> torch.Tensor:
        raw_output = self.decoder(z)
        self._last_raw_output = raw_output.detach()

        normalized_output = self.norm_layer(raw_output)

        if hasattr(self.norm_layer, 'clamp_min_bounds') and hasattr(self.norm_layer, 'clamp_max_bounds'):
            min_bounds = self.norm_layer.clamp_min_bounds.to(normalized_output.device)
            max_bounds = self.norm_layer.clamp_max_bounds.to(normalized_output.device)
            normalized_output = torch.max(torch.min(normalized_output, max_bounds), min_bounds)

        return normalized_output

    def forward(
        self,
        z: torch.Tensor,
        initial_states: torch.Tensor,
        ego_goal: torch.Tensor
    ) -> Dict[str, torch.Tensor]:
        batch_size = z.shape[0]
        opponent_goals = self.decode_to_game_params(z)

        states_list = []
        controls_list = []

        for i in range(batch_size):
            theta_estimates = {
                0: ego_goal.detach(),
                1: opponent_goals[i]
            }
            initial_state_i = initial_states[i].detach()
            controls_i = solve_mcp_game_differentiable(
                theta_estimates=theta_estimates,
                initial_state=initial_state_i,
                mcp_game_solver=self.mcp_solver
            )
            states_i = self._reconstruct_states(initial_state_i, controls_i)
            states_list.append(states_i)
            controls_list.append(controls_i)

        states = torch.stack(states_list, dim=0)
        controls = torch.stack(controls_list, dim=0)

        observation_window = self.observation_dim // 6
        ego_positions = states[:, :observation_window, 0:3]
        opponent_positions = states[:, :observation_window, 6:9]

        ego_positions_flat = ego_positions.reshape(batch_size, -1)
        opponent_positions_flat = opponent_positions.reshape(batch_size, -1)
        observations = torch.cat([ego_positions_flat, opponent_positions_flat], dim=1)

        if observations.shape[1] != self.observation_dim:
            raise ValueError(
                f"观测维度不匹配: 期望{self.observation_dim}, 实际{observations.shape[1]}"
            )

        return {
            'opponent_goals': opponent_goals,
            'states': states,
            'controls': controls,
            'observations': observations
        }

    def _reconstruct_states(
        self,
        initial_state: torch.Tensor,
        controls: torch.Tensor
    ) -> torch.Tensor:
        game = self.mcp_solver.game
        horizon = controls.shape[0]

        if not initial_state.requires_grad:
            initial_state = initial_state + controls.sum() * 0.0

        states = [initial_state]
        current_state = initial_state

        for k in range(horizon):
            next_state = game.step_func(current_state, controls[k])
            states.append(next_state)
            current_state = next_state

        return torch.stack(states, dim=0)


class StructuredDecoder(nn.Module):
    """
    结构化解码器，完整实现嵌入博弈求解器的解码过程
    """

    def __init__(
        self,
        latent_dim: int,
        game_param_dim: int,
        observation_dim: int,
        hidden_dims: list = None,
        lane_width: float = 0.6,
        collision_radius: float = 0.08
    ):
        super().__init__()

        self.latent_dim = latent_dim
        self.game_param_dim = game_param_dim
        self.observation_dim = observation_dim

        # 参数网络
        self.param_decoder = GameDecoder(
            latent_dim, game_param_dim, hidden_dims,
            lane_width, collision_radius
        )

        # 观察模型的参数（高斯分布）
        self.obs_mean_scale = nn.Parameter(torch.ones(1))
        self.obs_std_scale = nn.Parameter(torch.ones(1))

    def forward(
        self,
        z: torch.Tensor,
        initial_state: Optional[torch.Tensor] = None,
        solve_game: bool = True
    ) -> Dict[str, torch.Tensor]:
        """
        完整的结构化解码过程

        Args:
            z: 潜在变量 [batch_size, latent_dim]
            initial_state: 初始状态 [batch_size, state_dim]
            solve_game: 是否求解博弈

        Returns:
            包含参数、轨迹、观察等的字典
        """
        # 解码博弈参数
        param_result = self.param_decoder(z, solve_game=solve_game)

        result = {
            'latent': z,
            'game_params': param_result['game_params'],
            'raw_params': param_result['raw_params']
        }

        if 'trajectories' in param_result:
            trajectories = param_result['trajectories']
            result['trajectories'] = trajectories

            # 从轨迹生成观察
            observations = self._trajectory_to_observation(trajectories)
            result['observations'] = observations

        return result

    def _trajectory_to_observation(self, trajectories: torch.Tensor) -> torch.Tensor:
        """
        从轨迹生成观察

        Args:
            trajectories: 轨迹 [batch_size, trajectory_dim]

        Returns:
            观察 [batch_size, observation_dim]
        """
        # 这里需要根据具体的观察模型实现
        # 简化版本：直接截取轨迹的部分维度
        return trajectories[:, :self.observation_dim]

    def sample_observation(self, trajectories: torch.Tensor) -> torch.Tensor:
        """
        从轨迹采样观察（考虑噪声）

        Args:
            trajectories: 轨迹 [batch_size, trajectory_dim]

        Returns:
            带噪声的观察 [batch_size, observation_dim]
        """
        # 确定性部分
        obs_mean = self._trajectory_to_observation(trajectories)

        # 添加高斯噪声
        obs_std = self.obs_std_scale * torch.ones_like(obs_mean)
        noise = torch.randn_like(obs_mean) * obs_std

        return obs_mean + noise
