import os
from os.path import join, abspath, dirname, isfile, isdir
import sys
sys.path.append(join(dirname(abspath(__file__)), "../../../.."))
import torch
import torch.nn as nn
import numpy as np
from dataset_statistics.info import N_CATEGORY, N_ACTION


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=40):
        # max len!!!
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(1)

        self.register_buffer('pe', pe)

    # input: SxBxD
    def forward(self, x):
        # return x  # (debug) remove PE
        x = x + self.pe[:x.shape[0], :, :]
        return self.dropout(x)

class NormalDistDecoder(nn.Module):
    def __init__(self, num_feat_in, latentD):
        super(NormalDistDecoder, self).__init__()
        self.num_feat_in = num_feat_in

        self.mu = nn.Linear(num_feat_in, latentD)
        self.logvar = nn.Linear(num_feat_in, latentD)

    def forward(self, Xout):
        Xout = Xout.reshape(-1, self.num_feat_in)
        # return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
        return torch.distributions.normal.Normal(self.mu(Xout), torch.exp(0.5 * self.logvar(Xout)))

class InteractionVAE(nn.Module):
    def __init__(self, args):
        super(InteractionVAE, self).__init__()
        self.device = None
        self.args = args
        self.single_side = args.single_side
        num_channels = args.embedding_dim
        
        self.bodyEmbedding = nn.Linear(args.num_joints*3, num_channels)
        self.objEmbedding = nn.Linear(args.num_points*3, num_channels)
        self.use_obj_pose_embedding = args.obj_pose_embedding
        self.objPoseEmbedding = nn.Linear(7, num_channels)
        self.use_action_embedding = args.action_embedding
        self.actionEmbedding = nn.Linear(N_ACTION, num_channels)  # N_ACTION -> num_channels
        
        if self.use_action_embedding:
            feature_channels = num_channels * 2
        else:
            feature_channels = num_channels

        max_len = 120
        self.PositionalEmbedding = PositionalEncoding(max_len=max_len, d_model=feature_channels, dropout=args.dropout)

        from torch.nn import TransformerDecoderLayer, TransformerDecoder, TransformerEncoderLayer, TransformerEncoder
        seqTransEncoderLayer = TransformerEncoderLayer(d_model=feature_channels,
                                                          nhead=self.args.num_heads,
                                                          dim_feedforward=self.args.ff_size,
                                                          dropout=self.args.dropout,
                                                          activation=self.args.activation,
                                                          batch_first=False)
        self.encoder = TransformerEncoder(seqTransEncoderLayer,
                                                     num_layers=self.args.num_layers)

        self.latentNormal = NormalDistDecoder(feature_channels, args.latent_dim)
        self.latentEmbedding = nn.Linear(args.latent_dim, feature_channels)
        seqTransDecoderLayer = TransformerDecoderLayer(d_model=feature_channels,
                                                            nhead=self.args.num_heads,
                                                            dim_feedforward=self.args.ff_size,
                                                            dropout=self.args.dropout,
                                                            activation=self.args.activation,
                                                            batch_first=False)
        self.decoder = TransformerDecoder(seqTransDecoderLayer,
                                                        num_layers=self.args.num_layers)
        self.bodyFinalLinear = nn.Linear(feature_channels, args.num_joints*3)
        self.objFinalLinear = nn.Linear(feature_channels, 7)

    def _get_embeddings(self, body_gt, obj_gt, obj_pose_gt, action_gt):
        # body_gt: (T, B, N_joints, 3) / (T, B, 2, N_joints, 3)
        # obj_gt: (T, B, N_points, 3) / (T, B, 2, N_points, 3)
        # obj_pose_gt: (T, B, 7) / (T, B, 2, 7)
        T, B = body_gt.shape[:2]
        N_points = obj_gt.shape[2]
        
        if self.single_side:
            obj_embedding = self.objEmbedding(obj_gt.view(T, B, -1))  # (T, B, N) / (T, B, 2, N)
            body_embedding = self.bodyEmbedding(body_gt.view(T, B, -1))  # (T, B, N) / (T, B, 2, N)
            obj_pose_embedding = self.objPoseEmbedding(obj_pose_gt.view(T, B, 7))  # (T, B, N) / (T, B, 2, N)
        else:
            obj_embedding = self.objEmbedding(obj_gt.view(T, B, 2, -1))  # (T, B, N) / (T, B, 2, N)
            body_embedding = self.bodyEmbedding(body_gt.view(T, B, 2, -1))  # (T, B, N) / (T, B, 2, N)
            obj_pose_embedding = self.objPoseEmbedding(obj_pose_gt.view(T, B, 2, 7))  # (T, B, N) / (T, B, 2, N)
        body_template_embedding = body_embedding.clone()
        obj_template_embedding = obj_embedding.clone()
        obj_pose_template_embedding = obj_pose_embedding.clone()

        body_template_embedding[self.args.past_len:] = 0
        obj_template_embedding[self.args.past_len:] = 0
        obj_pose_template_embedding[self.args.past_len:] = 0
        # if self.args.template_type == 'endpose':
        #     body_template_embedding[self.args.past_len:] = body_template_embedding[self.args.past_len-1]
        #     obj_template_embedding[self.args.past_len:] = obj_template_embedding[self.args.past_len-1]
        # elif self.args.template_type == 'zero':
        #     body_template_embedding[self.args.past_len:] = 0
        #     obj_template_embedding[self.args.past_len:] = 0
        # elif self.args.template_type == 'embedding':
        #     body_template_embedding[self.args.past_len:] = body_template_embedding[self.args.past_len-1] + self.bodyFutureEmbedding
        #     obj_template_embedding[self.args.past_len:] = obj_template_embedding[self.args.past_len-1] + self.objFutureEmbedding
        
        # liuyun:
        action_embedding = self.actionEmbedding(action_gt.view(B, -1))  # (B, N)

        return body_embedding, obj_embedding, obj_pose_embedding, body_template_embedding, obj_template_embedding, obj_pose_template_embedding, body_gt, obj_gt, action_embedding

    def _encode(self, body_embedding, obj_embedding, obj_pose_embedding, action_embedding):
        if self.use_action_embedding:
            raise NotImplementedError
            integrated_body_embedding = torch.cat([body_embedding, action_embedding], dim=-1)
            integrated_obj_embedding = torch.cat([obj_embedding, action_embedding], dim=-1)
            integrated_obj_pose_embedding = torch.cat([obj_pose_embedding, action_embedding], dim=-1)
        else:
            integrated_body_embedding = body_embedding
            integrated_obj_embedding = obj_embedding
            integrated_obj_pose_embedding = obj_pose_embedding
        
        if self.use_obj_pose_embedding:
            encoder_input = torch.cat([integrated_body_embedding, integrated_obj_embedding, integrated_obj_pose_embedding], dim=0)  # (T*3, B, N*2 / N) / (T*3, B, 2, N*2 / N)
        else:
            encoder_input = torch.cat([integrated_body_embedding, integrated_obj_embedding], dim=0)  # (T*2, B, N*2 / N) / (T*2, B, 2, N*2 / N)
        
        if self.single_side:
            encoder_input = self.PositionalEmbedding(encoder_input)
        else:
            B, N_feature = encoder_input.shape[1], encoder_input.shape[-1]
            encoder_input = self.PositionalEmbedding(encoder_input.permute(0, 2, 1, 3).contiguous().view(-1, B, N_feature))
        
        feature = self.encoder(encoder_input).mean(dim=0)  # (T*3 / T*2, B, N*2 / N) / (2*T*3 / 2*T*2, B, N*2 / N)
        z_dist = self.latentNormal(feature)  # (B, D)
        return z_dist

    def _decode(self, body_template_embedding, obj_template_embedding, obj_pose_template_embedding, z_sample, action_embedding):
        z_sample = self.latentEmbedding(z_sample).unsqueeze(0)  # (T, B, D)
        # 1,B,n_embed
        if self.use_action_embedding:
            raise NotImplementedError
            integrated_body_template_embedding = torch.cat([body_template_embedding, action_embedding], dim=-1)
            integrated_obj_template_embedding = torch.cat([obj_template_embedding, action_embedding], dim=-1)
            integrated_obj_pose_template_embedding = torch.cat([obj_pose_template_embedding, action_embedding], dim=-1)
        else:
            integrated_body_template_embedding = body_template_embedding
            integrated_obj_template_embedding = obj_template_embedding
            integrated_obj_pose_template_embedding = obj_pose_template_embedding
        
        if self.use_obj_pose_embedding:
            decoder_input = torch.cat([integrated_body_template_embedding, integrated_obj_template_embedding, integrated_obj_pose_template_embedding], dim=0)  # (T*3, B, N*2 / N) / (T*3, B, 2, N*2 / N)
        else:
            decoder_input = torch.cat([integrated_body_template_embedding, integrated_obj_template_embedding], dim=0)  # (T*2, B, N*2 / N) / (T*2, B, 2, N*2 / N)
        
        if self.single_side:
            decoder_input = self.PositionalEmbedding(decoder_input)
        else:
            B, N_feature = decoder_input.shape[1], decoder_input.shape[-1]
            decoder_input = self.PositionalEmbedding(decoder_input.permute(0, 2, 1, 3).contiguous().view(-1, B, N_feature))
        
        decoder_output = self.decoder(tgt=decoder_input, memory=z_sample)

        if self.single_side:
            pred = decoder_output  # (T*3 / T*2, B, N*2 / N)
        else:
            pred = decoder_output.view(-1, 2, B, N_feature).permute(0, 2, 1, 3).contiguous()  # (T*3 / T*2, B, 2, N*2 / N)
        
        body_pred = self.bodyFinalLinear(pred)[:body_template_embedding.shape[0]]
        obj_pred = self.objFinalLinear(pred)[-body_template_embedding.shape[0]:]

        return body_pred, obj_pred

    def forward(self, body_gt, obj_gt, obj_pose_gt, action_gt):
        body_embedding, obj_embedding, obj_pose_embedding, body_template_embedding, obj_template_embedding, obj_pose_template_embedding, body_gt, obj_gt, action_embedding = self._get_embeddings(body_gt, obj_gt, obj_pose_gt, action_gt)

        z_dist = self._encode(body_embedding, obj_embedding, obj_pose_embedding, action_embedding)

        z_sample = z_dist.rsample()

        body_pred, obj_pred = self._decode(body_template_embedding, obj_template_embedding, obj_pose_template_embedding, z_sample, action_embedding)

        return body_pred, body_gt, obj_pred, z_dist

    def sample(self, body_gt, obj_gt, obj_pose_gt, action_gt):
        device = next(self.parameters()).device
        set_eval = self.training
        if set_eval:
            self.eval()

        with torch.no_grad():
            body_embedding, obj_embedding, obj_pose_embedding, body_template_embedding, obj_template_embedding, obj_pose_template_embedding, body_gt, obj_gt, action_embedding = self._get_embeddings(body_gt, obj_gt, obj_pose_gt, action_gt)
            z_dist = self._encode(body_embedding, obj_embedding, obj_pose_embedding, action_embedding)
            z_sample = torch.distributions.normal.Normal(
                loc=torch.zeros((body_embedding.shape[1], self.args.latent_dim), requires_grad=False).to(device),
                scale=torch.ones((body_embedding.shape[1], self.args.latent_dim), requires_grad=False).to(device)).rsample()
            # z_sample = z_sample.to(self.device)

            body_pred, obj_pred= self._decode(body_template_embedding, obj_template_embedding, obj_pose_template_embedding, z_sample, action_embedding)

        if set_eval:
            self.train()
        return body_pred, body_gt, obj_pred,  z_dist