
from typing import Tuple

import torch.nn as nn
import torch
import torch.nn.functional as F

class MLP(nn.Module):
    """
    实现一个普通的4层MLP，输入input_dim, hidden_dim, output_dim, activate_function
    """
    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        hidden_dim: int,
        hidden_activation: torch.nn.functional = F.relu,
        init_w: float = 3e-3,
    ) -> None:
        super().__init__()

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.hidden_activation = hidden_activation

        # 创建全连接层
        self.fc_layers = nn.ModuleList()
        self.hidden_layers = [hidden_dim] * 3
        in_layer = input_dim

        for i, hidden_layer in enumerate(self.hidden_layers):
            fc_layer = nn.Linear(in_layer, hidden_layer)
            in_layer = hidden_layer
            self.__setattr__("fc_layer{}".format(i), fc_layer)
            self.fc_layers.append(fc_layer)

        # 创建输出层
        self.last_fc_layer = nn.Linear(hidden_dim, output_dim)
        self.last_fc_layer.weight.data.uniform_(-init_w, init_w)
        self.last_fc_layer.bias.data.uniform_(-init_w, init_w)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        for fc_layer in self.fc_layers:
            x = self.hidden_activation(fc_layer(x))
        x = self.last_fc_layer(x)
        return x

class FlattenMLP(MLP):
    """
    forward输入是多个tensor，这里把它最后一个维度拼接然后再经过MLP
    """
    def forward(self, *x: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
        x = torch.cat(x, dim=-1)
        return super().forward(x)

class MLPEncoder(FlattenMLP):
    """
    这里的encoder指的是对上下文(context)进行encode->N(mean,std)
    """
    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        latent_dim: int,
        hidden_dim: int,
        num_tasks: int,
        device: torch.device,
    ) -> None:
        super().__init__(input_dim=input_dim, output_dim=output_dim, hidden_dim=hidden_dim)

        self.output_dim = output_dim
        self.latent_dim = latent_dim
        self.device = device
        self.num_tasks = num_tasks

        self.z_mean = None
        self.z_var = None
        self.task_z = None
        self.clear_z()

    def clear_z(self) -> None:
        # 将q(z|c)初始化为 prior r(z)
        self.z_mean = torch.zeros(self.num_tasks, self.latent_dim).to(self.device)
        self.z_var = torch.ones(self.num_tasks, self.latent_dim).to(self.device)
        
        # 依据Prior r(z)采样新z
        self.sample_z()

        # 初始化上下文
        self.context = None

    def sample_z(self) -> None:
        # 依据 z ~ r(z) 或者 z ~ q(z|c) 采样 z
        dists = []
        for mean, var in zip(torch.unbind(self.z_mean), torch.unbind(self.z_var)):
            dist = torch.distributions.Normal(mean, torch.sqrt(var))
            dists.append(dist)
        sampled_z = [dist.rsample() for dist in dists]
        self.task_z = torch.stack(sampled_z).to(self.device) # [task_num, latent_dim]

    @classmethod
    def _product_of_gaussians(
        cls,
        mean: torch.Tensor,
        var: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        # 计算Product of gaussians（POG）的平均值和方差
        # torch.reciprocal 为求反比例 freciprocal([5]) = [1/5]
        # 在batch维度上计算高斯分布的总的均值和方差
        # mu_{ij}，var_{ij}中i表示批量下标，j表示latent_dim下标，那么这个函数会求出
        # [mu_{0j}, mu_{1j}, ... ,mu_{Nj}] -> merge_mu_{j}
        # [var_{0j}, var_{1j}, ... ,var_{Nj}] -> merge_var_{j}
        # 最终[batch_size, latent_dim] -> [latent_dim]。
        var = torch.clamp(var, min=1e-7) 
        pog_var = 1.0 / torch.sum(torch.reciprocal(var), dim=0)
        pog_mean = pog_var * torch.sum(mean / var, dim=0)
        return pog_mean, pog_var
    
    # 根据上下文确定z
    def infer_posterior(self, context: torch.Tensor) -> None:
        """
        context: [task_num, batch_size, latent_dim*2]
        """

        params = self.forward(context)
        params = params.view(context.size(0), -1, self.output_dim).to(self.device)
        
        # q(z|c)的平均值和分布计算
        z_mean = torch.unbind(params[..., : self.latent_dim])
        z_var = torch.unbind(F.softplus(params[..., self.latent_dim :]))
        z_params = [self._product_of_gaussians(mu, var) for mu, var in zip(z_mean, z_var)]

        self.z_mean = torch.stack([z_param[0] for z_param in z_params]).to(self.device)
        self.z_var = torch.stack([z_param[1] for z_param in z_params]).to(self.device)
        self.sample_z()
    
    def compute_kl_div(self) -> torch.Tensor:
        # 计算 KL( q(z|c) || r(z) ) 
        # r(z) ： N(0,1)
        prior = torch.distributions.Normal(
            torch.zeros(self.latent_dim).to(self.device),
            torch.ones(self.latent_dim).to(self.device),
        )

        posteriors = []
        for mean, var in zip(torch.unbind(self.z_mean), torch.unbind(self.z_var)):
            dist = torch.distributions.Normal(mean, torch.sqrt(var))
            posteriors.append(dist)

        kl_div = [torch.distributions.kl.kl_divergence(posterior, prior) for posterior in posteriors]
        kl_div = torch.stack(kl_div).sum().to(self.device)
        return kl_div
