import torch
import torch.nn as nn
import numpy as np
from torch import Tensor
from typing import Optional, Any, Union, Callable
import torch.nn.functional as F
import math
import random
from local_attention import LocalAttention
from torchvision.ops import stochastic_depth
from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle, matrix_to_rotation_6d, rotation_6d_to_matrix
from pytorch3d.transforms import quaternion_to_matrix
def _get_activation_fn(activation):
    if activation == "relu":
        return F.relu
    elif activation == "gelu":
        return F.gelu

    raise RuntimeError("activation should be relu/gelu, not {}".format(activation))

def _normalize_and_reshape_query(q, heads, unit_norm, depth_scale, normalize_stop_grads=False):
    """Normalizes the query and prepares it for attention computation."""
    newshape = [heads, q.shape[-1] // heads]
    newshape = [*q.shape[:-1], *newshape]
    q = q.reshape(newshape)
    if unit_norm:
        if normalize_stop_grads:
            with torch.no_grad():
                q_norm = torch.norm(q, dim=-1, keepdim=True)
        else:
            q_norm = torch.norm(q, dim=-1, keepdim=True)
        q = q / (q_norm + 1e-6)
    if depth_scale:
        depth = q.shape[-1]
        q = q / math.sqrt(depth)
    newshape = [*q.shape[:-2], -1]
    q = q.reshape(newshape)
    return q

class TransformerEncoderLayerQaN(nn.Module):
    r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
    This standard encoder layer is based on the paper "Attention Is All You Need".
    Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
    Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
    Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
    in a different way during application.

    Args:
        d_model: the number of expected features in the input (required).
        nhead: the number of heads in the multiheadattention models (required).
        dim_feedforward: the dimension of the feedforward network model (default=2048).
        dropout: the dropout value (default=0.1).
        activation: the activation function of the intermediate layer, can be a string
            ("relu" or "gelu") or a unary callable. Default: relu
        layer_norm_eps: the eps value in layer normalization components (default=1e-5).
        batch_first: If ``True``, then the input and output tensors are provided
            as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
        norm_first: if ``True``, layer norm is done prior to attention and feedforward
            operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).

    Examples::
        >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
        >>> src = torch.rand(10, 32, 512)
        >>> out = encoder_layer(src)

    Alternatively, when ``batch_first`` is ``True``:
        >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
        >>> src = torch.rand(32, 10, 512)
        >>> out = encoder_layer(src)
    """
    __constants__ = ['batch_first', 'norm_first']

    def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, num_queries: int = 10, window_size: int = 1,
                 activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
                 layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
                 device=None, dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(TransformerEncoderLayerQaN, self).__init__()
        # self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
        #                                     **factory_kwargs)

        self.self_attn = LocalAttention(
            dim = d_model,           # dimension of each head (you need to pass this in for relative positional encoding)
            window_size = window_size,       # window size. 512 is optimal, but 256 or 128 yields good enough results
            causal = False,           # auto-regressive or not
            look_backward = 1,       # each window looks at the window before
            look_forward = 1,        # for non-auto-regressive case, will default to 1, so each window looks at the window before and after it
            dropout = dropout,           # post-attention dropout
            exact_windowsize = False, # if this is set to true, in the causal setting, each query will see at maximum the number of keys equal to the window size
            autopad = True
        )

        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)

        self.queries = nn.Parameter(torch.FloatTensor(num_queries, d_model)) 
        stdv = 1. / math.sqrt(self.queries.size(1))
        self.queries.data.normal_(-stdv,stdv)

        self.wk = nn.Parameter(torch.FloatTensor(num_queries, 1)) 
        stdv = 1. / math.sqrt(self.wk.size(0))
        self.wk.data.normal_(-stdv,stdv)

        self.norm_first = norm_first
        self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        # Legacy string support for activation function.
        if isinstance(activation, str):
            self.activation = _get_activation_fn(activation)
        else:
            self.activation = activation

        self.d_model = d_model
        self.nhead = nhead
        self.num_queries = num_queries
        self.dropout_rate = 0

    def _get_query(self, x):
        """Defines the queries parameters and reshape then into [..., N_Queries, Heads, Head_dim].
        Queries are also scaled by SQRT(Head_dim) for stability (proposed in https://arxiv.org/abs/1706.03762).
        Returns: Queries after preprocessing with shape [..., N_Queries, Heads, Head_dim].
        """
        q = _normalize_and_reshape_query(self.queries, self.nhead, True, depth_scale=True
                                        )
        # B, N, T, d
        q = q.unsqueeze(0).unsqueeze(2).repeat(x.shape[1], 1, x.shape[0], 1).contiguous()
        return q

    def __setstate__(self, state):
        if 'activation' not in state:
            state['activation'] = F.relu
        super(TransformerEncoderLayerQaN, self).__setstate__(state)

    def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
        r"""Pass the input through the encoder layer.

        Args:
            src: the sequence to the encoder layer (required).
            src_mask: the mask for the src sequence (optional).
            src_key_padding_mask: the mask for the src keys per batch (optional).

        Shape:
            see the docs in Transformer class.
        """

        # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf

        x = src.clone()
        if self.norm_first:
            x = x + self._qa_block(self.norm1(x), src_mask, src_key_padding_mask)
            x = x + self._ff_block(self.norm2(x))
        else:
            x = self.norm1(x + self._qa_block(x, src_mask, src_key_padding_mask))
            x = self.norm2(x + self._ff_block(x))

        residual = stochastic_depth((x - src).permute(1, 0, 2), self.dropout_rate, 'row', self.training).permute(1, 0, 2).contiguous()
        x = src + residual

        return x

    # def _compute_QK_scores(self, q, x):
    #     """Computes the QK dot product in fused manner.
    #     Since the queries are shared across windows, we compute (Q*W_k^T)X^T for better memory utilization.
    #     :param q: The leared queries of shape [..., N_Queries, Heads, Head_dim]
    #     :param x: The input features [B, H, W, C]
    #     :return: The query-key dot product for each query head [B, H, W, N_Queries, Heads]
    #     """
    #     # q = [Nq, h, d]
    #     # WK = [D_in, h, D]
    #     T, B, D = x.shape
    #     q = q.unsqueeze(0).repeat(B, 1, 1, 1)
    #     Wk = Wk.reshape([-1, self.nhead, self.d_model // self.nhead])
    #     qWk = torch.einsum('Bqhd,Dhd->BDqh', q, Wk)
    #     qWkx = torch.einsum('TBD,BDqh->TBqh', x, qWk)

    #     return qWkx

    def _qa_block(self, x: Tensor,
                  attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
        q = self._get_query(x)
        # T B D -> N T B D -> B N T D
        x = x.unsqueeze(0).repeat(self.num_queries, 1, 1, 1).permute(2, 0, 1, 3).contiguous()
        B, N, T, D = x.shape
        mask = torch.ones(1, T).bool().to(x.device)
        x = self.self_attn(q.view(B * N, T, D), x.view(B * N, T, D), x.view(B * N, T, D), mask = mask).view(B, N, T, D)
        x = torch.einsum("bntd,nk->bktd", (x, self.wk)).squeeze(1).permute(1, 0, 2).contiguous()
        return self.dropout1(x)

    # self-attention block
    def _sa_block(self, x: Tensor,
                  attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
        T, B, D = x.shape
        mask = torch.ones(1, T).bool().to(x.device)
        x = x.permute(1, 0, 2).contiguous()
        x = self.self_attn(x, x, x, mask = mask).permute(1, 0, 2).contiguous()
        return self.dropout1(x)

    # feed forward block
    def _ff_block(self, x: Tensor) -> Tensor:
        x = self.linear2(self.dropout(self.activation(self.linear1(x))))
        return self.dropout2(x)


class TransformerDecoderLayerQaN(nn.Module):
    r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
    This standard decoder layer is based on the paper "Attention Is All You Need".
    Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
    Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
    Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
    in a different way during application.

    Args:
        d_model: the number of expected features in the input (required).
        nhead: the number of heads in the multiheadattention models (required).
        dim_feedforward: the dimension of the feedforward network model (default=2048).
        dropout: the dropout value (default=0.1).
        activation: the activation function of the intermediate layer, can be a string
            ("relu" or "gelu") or a unary callable. Default: relu
        layer_norm_eps: the eps value in layer normalization components (default=1e-5).
        batch_first: If ``True``, then the input and output tensors are provided
            as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
        norm_first: if ``True``, layer norm is done prior to self attention, multihead
            attention and feedforward operations, respectivaly. Otherwise it's done after.
            Default: ``False`` (after).

    Examples::
        >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
        >>> memory = torch.rand(10, 32, 512)
        >>> tgt = torch.rand(20, 32, 512)
        >>> out = decoder_layer(tgt, memory)

    Alternatively, when ``batch_first`` is ``True``:
        >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
        >>> memory = torch.rand(32, 10, 512)
        >>> tgt = torch.rand(32, 20, 512)
        >>> out = decoder_layer(tgt, memory)
    """
    __constants__ = ['batch_first', 'norm_first']

    def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, num_queries: int = 10, window_size: int = 1,
                 activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
                 layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
                 device=None, dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(TransformerDecoderLayerQaN, self).__init__()
                # self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
        #                                     **factory_kwargs)

        self.self_attn = LocalAttention(
            dim = d_model,           # dimension of each head (you need to pass this in for relative positional encoding)
            window_size = window_size,       # window size. 512 is optimal, but 256 or 128 yields good enough results
            causal = False,           # auto-regressive or not
            look_backward = 1,       # each window looks at the window before
            look_forward = 1,        # for non-auto-regressive case, will default to 1, so each window looks at the window before and after it
            dropout = dropout,           # post-attention dropout
            exact_windowsize = False, # if this is set to true, in the causal setting, each query will see at maximum the number of keys equal to the window size
            autopad = True
        )
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
                                                 **factory_kwargs)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)

        self.queries = nn.Parameter(torch.FloatTensor(num_queries, d_model)) 
        stdv = 1. / math.sqrt(self.queries.size(1))
        self.queries.data.normal_(-stdv,stdv)

        self.wk = nn.Parameter(torch.FloatTensor(num_queries, 1)) 
        stdv = 1. / math.sqrt(self.wk.size(0))
        self.wk.data.normal_(-stdv,stdv)

        self.norm_first = norm_first
        self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        # Legacy string support for activation function.
        if isinstance(activation, str):
            self.activation = _get_activation_fn(activation)
        else:
            self.activation = activation

        self.d_model = d_model
        self.nhead = nhead
        self.num_queries = num_queries
        self.dropout_rate = 0

    def _get_query(self, x):
        """Defines the queries parameters and reshape then into [..., N_Queries, Heads, Head_dim].
        Queries are also scaled by SQRT(Head_dim) for stability (proposed in https://arxiv.org/abs/1706.03762).
        Returns: Queries after preprocessing with shape [..., N_Queries, Heads, Head_dim].
        """
        q = _normalize_and_reshape_query(self.queries, self.nhead, True, depth_scale=True
                                        )
        # B, N, T, d
        q = q.unsqueeze(0).unsqueeze(2).repeat(x.shape[1], 1, x.shape[0], 1).contiguous()
        return q

    def __setstate__(self, state):
        if 'activation' not in state:
            state['activation'] = F.relu
        super(TransformerDecoderLayerQaN, self).__setstate__(state)

    def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
        r"""Pass the inputs (and mask) through the decoder layer.

        Args:
            tgt: the sequence to the decoder layer (required).
            memory: the sequence from the last layer of the encoder (required).
            tgt_mask: the mask for the tgt sequence (optional).
            memory_mask: the mask for the memory sequence (optional).
            tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
            memory_key_padding_mask: the mask for the memory keys per batch (optional).

        Shape:
            see the docs in Transformer class.
        """
        # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf

        x = tgt.clone()
        if self.norm_first:
            x = x + self._qa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask)
            x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask)
            x = x + self._ff_block(self.norm3(x))
        else:
            x = self.norm1(x + self._qa_block(x, tgt_mask, tgt_key_padding_mask))
            x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask))
            x = self.norm3(x + self._ff_block(x))

        residual = stochastic_depth((x - tgt).permute(1, 0, 2), self.dropout_rate, 'row', self.training).permute(1, 0, 2).contiguous()
        x = tgt + residual

        return x

    def _qa_block(self, x: Tensor,
                  attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
        q = self._get_query(x)
        # T B D -> N T B D -> B N T D
        x = x.unsqueeze(0).repeat(self.num_queries, 1, 1, 1).permute(2, 0, 1, 3).contiguous()
        B, N, T, D = x.shape
        mask = torch.ones(1, T).bool().to(x.device)
        x = self.self_attn(q.view(B * N, T, D), x.view(B * N, T, D), x.view(B * N, T, D), mask = mask).view(B, N, T, D)
        x = torch.einsum("bntd,nk->bktd", (x, self.wk)).squeeze(1).permute(1, 0, 2).contiguous()
        return self.dropout1(x)

    # self-attention block
    def _sa_block(self, x: Tensor,
                  attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
        T, B, D = x.shape
        mask = torch.ones(1, T).bool().to(x.device)
        x = x.permute(1, 0, 2).contiguous()
        x = self.self_attn(x, x, x, mask = mask).permute(1, 0, 2).contiguous()
        return self.dropout1(x)

    # multihead attention block
    def _mha_block(self, x: Tensor, mem: Tensor,
                   attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
        x = self.multihead_attn(x, mem, mem,
                                attn_mask=attn_mask,
                                key_padding_mask=key_padding_mask,
                                need_weights=False)[0]
        return self.dropout2(x)

    # feed forward block
    def _ff_block(self, x: Tensor) -> Tensor:
        x = self.linear2(self.dropout(self.activation(self.linear1(x))))
        return self.dropout3(x)


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(1)  # (max_len, 1, d_model)

        self.register_buffer('pe', pe)

    # input: SxBxD
    def forward(self, x):
        x = x + self.pe[:x.shape[0]]
        return self.dropout(x)


class TimestepEmbedder(nn.Module):
    def __init__(self, latent_dim, sequence_pos_encoder):
        super().__init__()
        self.latent_dim = latent_dim
        self.sequence_pos_encoder = sequence_pos_encoder

        time_embed_dim = self.latent_dim
        self.time_embed = nn.Sequential(
            nn.Linear(self.latent_dim, time_embed_dim),
            nn.SiLU(),
            nn.Linear(time_embed_dim, time_embed_dim),
        )

    def forward(self, timesteps):
        return self.time_embed(self.sequence_pos_encoder.pe[timesteps]).permute(1, 0, 2)  # (1, B, N_feature)


#NERF Positional encoding
# https://github.com/yenchenlin/nerf-pytorch/blob/master/run_nerf_helpers.py
class Embedder:
    def __init__(self, **kwargs):
        self.kwargs = kwargs
        self.create_embedding_fn()

    def create_embedding_fn(self):
        embed_fns = []
        d = self.kwargs['input_dims']
        out_dim = 0
        if self.kwargs['include_input']:
            embed_fns.append(lambda x: x)
            out_dim += d

        max_freq = self.kwargs['max_freq_log2']
        N_freqs = self.kwargs['num_freqs']

        if self.kwargs['log_sampling']:
            freq_bands = 2. ** torch.linspace(0., max_freq, steps=N_freqs)
        else:
            freq_bands = torch.linspace(2. ** 0., 2. ** max_freq, steps=N_freqs)

        for freq in freq_bands:
            for p_fn in self.kwargs['periodic_fns']:
                embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
                out_dim += d

        self.embed_fns = embed_fns
        self.out_dim = out_dim

    def embed(self, inputs):
        return torch.cat([fn(inputs) for fn in self.embed_fns], -1)


def get_embedder(multires, i=0, input_dimension=3):
    if i == -1:
        return nn.Identity(), 3

    embed_kwargs = {
        'include_input': True,
        'input_dims': input_dimension,
        'max_freq_log2': multires - 1,
        'num_freqs': multires,
        'log_sampling': True,
        'periodic_fns': [torch.sin, torch.cos],
    }

    embedder_obj = Embedder(**embed_kwargs)
    embed = lambda x, eo=embedder_obj: eo.embed(x)
    return embed, embedder_obj.out_dim

class NormalDistDecoder(nn.Module):
    def __init__(self, num_feat_in, latentD):
        super(NormalDistDecoder, self).__init__()
        self.num_feat_in = num_feat_in

        self.mu = nn.Linear(num_feat_in, latentD)
        self.logvar = nn.Linear(num_feat_in, latentD)

    def forward(self, Xout):
        Xout = Xout.reshape(-1, self.num_feat_in)
        # return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
        return torch.distributions.normal.Normal(self.mu(Xout), torch.exp(0.5 * self.logvar(Xout)))

class TransformerEncoder(nn.Module):
    r"""TransformerEncoder is a stack of N encoder layers

    Args:
        encoder_layer: an instance of the TransformerEncoderLayer() class (required).
        num_layers: the number of sub-encoder-layers in the encoder (required).
        norm: the layer normalization component (optional).

    Examples::
        >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
        >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
        >>> src = torch.rand(10, 32, 512)
        >>> out = transformer_encoder(src)
    """
    __constants__ = ['norm']

    def __init__(self, encoder_layer, norm=None):
        super(TransformerEncoder, self).__init__()
        self.layers = encoder_layer
        self.norm = norm

    def forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
        r"""Pass the input through the encoder layers in turn.

        Args:
            src: the sequence to the encoder (required).
            mask: the mask for the src sequence (optional).
            src_key_padding_mask: the mask for the src keys per batch (optional).

        Shape:
            see the docs in Transformer class.
        """
        output = src

        for mod in self.layers:
            output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)

        if self.norm is not None:
            output = self.norm(output)

        return output


class TransformerDecoder(nn.Module):
    r"""TransformerDecoder is a stack of N decoder layers

    Args:
        decoder_layer: an instance of the TransformerDecoderLayer() class (required).
        num_layers: the number of sub-decoder-layers in the decoder (required).
        norm: the layer normalization component (optional).

    Examples::
        >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
        >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
        >>> memory = torch.rand(10, 32, 512)
        >>> tgt = torch.rand(20, 32, 512)
        >>> out = transformer_decoder(tgt, memory)
    """
    __constants__ = ['norm']

    def __init__(self, decoder_layer, norm=None):
        super(TransformerDecoder, self).__init__()
        self.layers = decoder_layer
        self.norm = norm

    def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
        r"""Pass the inputs (and mask) through the decoder layer in turn.

        Args:
            tgt: the sequence to the decoder (required).
            memory: the sequence from the last layer of the encoder (required).
            tgt_mask: the mask for the tgt sequence (optional).
            memory_mask: the mask for the memory sequence (optional).
            tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
            memory_key_padding_mask: the mask for the memory keys per batch (optional).

        Shape:
            see the docs in Transformer class.
        """
        output = tgt

        for mod in self.layers:
            output = mod(output, memory, tgt_mask=tgt_mask,
                         memory_mask=memory_mask,
                         tgt_key_padding_mask=tgt_key_padding_mask,
                         memory_key_padding_mask=memory_key_padding_mask)

        if self.norm is not None:
            output = self.norm(output)

        return output

class MDM(nn.Module):
    # input:
    # 21 human body keypoints 
    # 12 object keypoints
    # 7-D object pose
    # output:
    # predicted  
    def __init__(self, args):
        super(MDM, self).__init__()
        self.args = args
        num_channels = args.embedding_dim
        self.bodyEmbedding = nn.Linear(args.smpl_dim, num_channels)
        self.shapeEmbedding = nn.Linear(args.num_points*3, num_channels)
        self.objEmbedding = nn.Linear(args.num_points*3, num_channels)
        self.PositionalEmbedding = PositionalEncoding(d_model=num_channels, dropout=args.dropout)
        self.embedTimeStep = TimestepEmbedder(num_channels, self.PositionalEmbedding)
        from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
        seqTransEncoderLayer1 = TransformerEncoderLayer(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer2 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer3 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer4 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer5 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer6 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer7 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer8 = TransformerEncoderLayer(d_model=num_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        seqTransEncoderLayer = nn.ModuleList([seqTransEncoderLayer1, seqTransEncoderLayer2, seqTransEncoderLayer3, seqTransEncoderLayer4,
                                              seqTransEncoderLayer5, seqTransEncoderLayer6, seqTransEncoderLayer7, seqTransEncoderLayer8])
        self.encoder = TransformerEncoder(seqTransEncoderLayer)

        if self.args.latent_usage == 'memory':
            seqTransDecoderLayer1 = TransformerDecoderLayer(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer2 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer3 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer4 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer5 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer6 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer7 = TransformerDecoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer8 = TransformerDecoderLayer(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer = nn.ModuleList([seqTransDecoderLayer1, seqTransDecoderLayer2, seqTransDecoderLayer3, seqTransDecoderLayer4,
                                                  seqTransDecoderLayer5, seqTransDecoderLayer6, seqTransDecoderLayer7, seqTransDecoderLayer8])
            self.decoder = TransformerDecoder(seqTransDecoderLayer)
        else:
            seqTransDecoderLayer1 = TransformerEncoderLayer(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer2 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer3 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer4 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer5 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer6 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer7 = TransformerEncoderLayerQaN(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer8 = TransformerEncoderLayer(d_model=num_channels,
                                                              nhead=self.args.num_heads,
                                                              dim_feedforward=self.args.ff_size,
                                                              dropout=self.args.dropout,
                                                              activation=self.args.activation,
                                                              batch_first=False)
            seqTransDecoderLayer = nn.ModuleList([seqTransDecoderLayer1, seqTransDecoderLayer2, seqTransDecoderLayer3, seqTransDecoderLayer4,
                                                  seqTransDecoderLayer5, seqTransDecoderLayer6, seqTransDecoderLayer7, seqTransDecoderLayer8])
            self.decoder = TransformerEncoder(seqTransDecoderLayer)

        self.bodyFinalLinear = nn.Linear(num_channels, 2 * args.smpl_dim)  # 右手, 左手
        self.objFinalLinear = nn.Linear(num_channels, 2 * 7)  # 工具, 对象

    def mask_cond(self, cond, force_mask=False):
        bs = cond.shape[1]
        if force_mask:
            return torch.zeros_like(cond)
        elif self.training and self.args.cond_mask_prob > 0.:
            mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.args.cond_mask_prob).view(1, bs, 1)  # 1-> use null_cond, 0-> use real cond
            return cond * (1. - mask)
        else:
            return cond

    def _get_embeddings(self, body_gt, obj_gt, pose_gt, zero_pose_obj):
        # input: T,B,2,21,3 body joints
        # T,B,2,N_point,3 obj keypoints
        # T,B,2,7 obj pose (which we don't use)
        # zero_pose: B,2,N_point,3
        T, B, _, N_joints, _ = body_gt.shape
        N_points = obj_gt.shape[-2]
        body_gt = body_gt.view(T,B,2,N_joints*3)
        obj_gt = obj_gt.view(T,B,2,N_points*3)

        # obj_points = data['obj_points'][:, :, :3].float()
        obj_shape_embedding = self.shapeEmbedding(zero_pose_obj[None].view(1,B,2,N_points*3))  # (1, B, 2, N_feature)

        gt = torch.cat([body_gt,obj_gt,pose_gt], dim=-1)  # (T, B, 2, N_gt)

        body = self.bodyEmbedding(body_gt[:self.args.past_len])  # (T, B, 2, N_feature)
        obj = self.objEmbedding(obj_gt[:self.args.past_len])  # (T, B, 2, N_feature)
        embedding = (body + obj + obj_shape_embedding).sum(dim=-2)  # (T, B, N_feature)
        
        embedding = self.PositionalEmbedding(embedding)  # (T, B, N_feature)
        embedding = self.encoder(embedding)  # (T, B, N_feature)

        return embedding, gt


    def calc_obj_pred(self, pose_pred, zero_pose_obj):
        # input: pose pred (T, B, 2, 7)
        # zero_pose_obj: (B, 2, N_points, 3)
        # return obj_pred: (T, B, 2, N_points, 3)
        # quaternion to matrix
        obj_gt_base = zero_pose_obj[None, ...]  # (1, B, 2, N_points, 3)
        translation = pose_pred[..., None, :3]  # (T, B, 2, 1, 3)
        quat_correct = torch.cat([pose_pred[..., -1:], pose_pred[..., -4:-1]],dim=-1)  # (T, B, 2, 4)
        rotation_matrix = quaternion_to_matrix(quat_correct)  # (T, B, 2, 3, 3)
        obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 1, 2, 4, 3)).permute(0, 1, 2, 4, 3).contiguous() + translation  # (T, B, 2, N_points, 3)

        return obj_pred

    def _decode(self, x, time_embedding, y=None, zero_pose_obj=None):
        # x: T,B,2,N_gt
        assert zero_pose_obj is not None
        T, B = x.shape[:2]

        body, obj, pose = torch.split(x, [self.args.num_joints*3, self.args.num_points*3, 7], dim=-1)
        body = self.bodyEmbedding(body)  # (T, B, 2, N_feature)
        obj = self.objEmbedding(obj)  # (T, B, 2, N_feature)

        decoder_input = (body + obj + time_embedding.unsqueeze(2)).sum(dim=-2)  # (T, B, N_feature)
        decoder_input = self.PositionalEmbedding(decoder_input)  # (T, B, N_feature)
        decoder_output = self.decoder(tgt=decoder_input, memory=y)  # (T, B, N_feature)

        body = self.bodyFinalLinear(decoder_output).view(T, B, 2, self.args.num_joints*3)  # (T, B, 2, 21*3)
        obj_pose = self.objFinalLinear(decoder_output).view(T, B, 2, 7)  # (T, B, 2, 7)
        obj = self.calc_obj_pred(obj_pose, zero_pose_obj).view(T, B, 2, -1)  # (T, B, 2, N_point*3)
        pred = torch.cat([body, obj, obj_pose], dim=-1)
        # T,B,2,N_gt
        return pred

    def forward(self, x, timesteps, zero_pose_obj, y=None):
        # print("[MDM forward] timesteps.shape =", timesteps.shape)
        time_embedding = self.embedTimeStep(timesteps)
        x = x.squeeze(1).permute(3, 0, 1, 2).contiguous()
        if y is not None:
            y = self.mask_cond(y['cond'])
        x_0 = self._decode(x, time_embedding, y, zero_pose_obj=zero_pose_obj)
        # [T B 2 N] -> [bs, 1, 2, nfeats, nframes]
        x_0 = x_0.permute(1, 2, 3, 0).unsqueeze(1).contiguous()
        return x_0

from diffusion import gaussian_diffusion as gd
from diffusion.respace import SpacedDiffusion, space_timesteps

def create_gaussian_diffusion(args):
    # default params
    predict_xstart = True  # we always predict x_start (a.k.a. x0), that's our deal!
    steps = args.diffusion_steps
    scale_beta = 1.  # no scaling
    timestep_respacing = ''  # can be used for ddim sampling, we don't use it.
    learn_sigma = False
    rescale_timesteps = False

    betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
    loss_type = gd.LossType.MSE

    if not timestep_respacing:
        timestep_respacing = [steps]

    return SpacedDiffusion(
        use_timesteps=space_timesteps(steps, timestep_respacing),
        betas=betas,
        model_mean_type=(
            gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
        ),
        model_var_type=(
            (
                gd.ModelVarType.FIXED_LARGE
                if not args.sigma_small
                else gd.ModelVarType.FIXED_SMALL
            )
            if not learn_sigma
            else gd.ModelVarType.LEARNED_RANGE
        ),
        loss_type=loss_type,
        rescale_timesteps=rescale_timesteps,
        lambda_vel=args.weight_v,
    )

def create_model_and_diffusion(args):
    model = MDM(args)
    diffusion = create_gaussian_diffusion(args)
    return model, diffusion