import os
from os.path import join, abspath, dirname, isfile, isdir
import sys
from torch import nn
import torch
import random
from argparse import ArgumentParser, Namespace
sys.path.append(dirname(abspath(__file__)))
from pointnet import SimplePointnet
sys.path.append(join(dirname(abspath(__file__)), "../../../.."))
from dataset_statistics.info import N_CATEGORY, N_ACTION
# following https://arxiv.org/abs/1904.03419

class ContextLSTM(nn.Module):
    """
    input: 7dof object pose, object zero pose, body joints
    output: updated body joints, object poses
    """
    def __init__(self, args):
        super(ContextLSTM, self).__init__()
        
        # in their terminology, we have two nodes in this graph
        if isinstance(args, dict):
            args = Namespace(**args)
        num_channels_shape = 64
        num_channels_category = 128
        num_channels_action = 128
        num_channels = args.embedding_dim
        self.use_residual = args.residual
        self.num_layers = args.num_layers
        self.num_channels = num_channels
        self.human_dim = args.smpl_dim 
        self.obj_dim = 7 # rot first
        
        self.encode_action = args.encode_action

        self.body_embedding = nn.Sequential(
          nn.Linear(args.smpl_dim, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels)
        )
        self.obj_embedding = nn.Sequential(
          nn.Linear(self.obj_dim, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels)
        )

        # shape_embedder: 
        # input: (B, 2, N_points, 3)
        # output: (B, 2, num_channels_shape)
        # self.shape_embedder = nn.Sequential(
        #   nn.Linear(args.num_points*3, num_channels),
        #   nn.ReLU(),
        #   nn.Linear(num_channels, num_channels),
        #   nn.ReLU(),
        #   nn.Linear(num_channels, num_channels_shape)
        # )
        self.shape_embedder = SimplePointnet(c_dim=num_channels_shape)
        
        self.category_embedder = nn.Sequential(
          nn.Linear(N_CATEGORY, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels_category)
        )
        
        self.action_embedder = nn.Sequential(
          nn.Linear(N_ACTION, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels_action)
        )

        # Context RNN
        LSTM_input_size = self.num_channels + num_channels_shape + num_channels_category
        if self.encode_action:
          LSTM_input_size += num_channels_action
        self.context_lstm_obj = nn.LSTM(input_size=LSTM_input_size, hidden_size=num_channels, num_layers=args.num_layers)

        # Human RNN
        LSTM_input_size = self.num_channels
        if self.encode_action:
          LSTM_input_size += num_channels_action
        self.context_lstm_human = nn.LSTM(input_size=LSTM_input_size, hidden_size=num_channels, num_layers=args.num_layers)

        # Adjacency MLP
        # input *, num_channels_shape + num_channels*2
        # output: a score in [0,1]
        self.adj_mlp = nn.Sequential(
          nn.Linear(num_channels*2, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, 1),
          nn.Sigmoid()
        )

        # object final layer
        # input: hidden state of object node
        # output: object pose residual
        self.objfinal = nn.Sequential(
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, self.obj_dim*2)
        )

        # human final layer
        # input: mixed hidden state
        # output: human joints residual
        self.humanfinal = nn.Sequential(
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, self.human_dim*2)
        )

    def forward(self, obj_human_gt, obj_points, pred_len = 10):
        # T_past, B, 2, 21*3+7
        # B, 2, N_point, 3
        return self.forward_autoregressive(obj_human_gt, obj_points, pred_len)

    def initHidden(self, batch_size, device):
        return (torch.zeros(self.num_layers, batch_size, self.num_channels).to(device),
        torch.zeros(self.num_layers, batch_size, self.num_channels).to(device))

    def autoregressive_(self, init_pose, shape_feat, action_feat, category_feat, pred_len, h_0_h, c_0_h, h_0_o, c_0_o):
        # init_pose: 1, B, 2, 21*3 + 7
        # shape_feat: 1, B, 2, N_feature_shape
        # action_feat: 1, B, N_feature_action
        # category_feat: 1, B, 2, N_feature_category
        # return pred_len, B, 2, 21*3+7
        
        B = init_pose.shape[1]

        pose_preds = [init_pose]  # [human, obj]
        h_t_h, c_t_h, h_t_o, c_t_o = h_0_h, c_0_h, h_0_o, c_0_o
        for i in range(pred_len-1):
            human_feat = self.body_embedding(pose_preds[-1][..., :self.human_dim]).sum(dim=-2)  # (1, B, N_feature)
            obj_feat = self.obj_embedding(pose_preds[-1][..., -self.obj_dim:]).sum(dim=-2)  # (1, B, N_feature)

            # mixed input
            mix_coef = self.adj_mlp(torch.cat([h_t_h[-1,None], h_t_o[-1,None]], dim=-1))  # (1, B, 1)
            human_mix_feature = mix_coef*human_feat + (1-mix_coef)*obj_feat
            obj_mix_feature = (1-mix_coef)*human_feat + mix_coef*obj_feat

            if not self.encode_action:
              rnn_input_obj = torch.cat([obj_mix_feature, shape_feat.sum(dim=-2), category_feat.sum(dim=-2)], dim=-1)  # (1, B, N_feature + N_feature_shape + N_feature_category)
            else:
              rnn_input_obj = torch.cat([obj_mix_feature, shape_feat.sum(dim=-2), category_feat.sum(dim=-2), action_feat], dim=-1)  # (1, B, N_feature + N_feature_shape + N_feature_category + N_feature_action)
            if not self.encode_action:
              rnn_input_human = human_mix_feature
            else:
              rnn_input_human = torch.cat([human_mix_feature, action_feat], dim=-1)
            
            obj_node_feat, (h_t_o, c_t_o) = self.context_lstm_obj(rnn_input_obj, (h_t_o, c_t_o))
            human_node_feat, (h_t_h, c_t_h) = self.context_lstm_human(rnn_input_human, (h_t_h, c_t_h))

            # compute mixed features according to rnn 
            human_pose = self.humanfinal(human_node_feat).view(1, B, 2, -1).contiguous()
            obj_pose = self.objfinal(obj_node_feat).view(1, B, 2, -1).contiguous()
            pose_pred = torch.cat([human_pose, obj_pose], dim=-1)
            if self.use_residual:
                pose_pred = pose_pred + pose_preds[-1].detach().clone()
            pose_preds.append(pose_pred)

        pose_preds = torch.cat(pose_preds, dim=0) # T_pred, B, 2, 21*3+7

        return pose_preds

    def autoregressive_encoder(self, pose_gt, shape_feat, action_feat, category_feat):
        # input: 
        # pose_gt: T_past, B, 2, 21*3 + 7
        # shape_feat: 1, B, 2, shape_dim
        # action_feat: 1, B, N_feature_action
        # category_feat: 1, B, 2, N_feature_category
        # return hiddens
        T_past, B = pose_gt.shape[:2]
        h_t_h, c_t_h = self.initHidden(B, pose_gt.device)  # (N_layer, B, N_feature), (N_layer, B, N_feature)
        h_t_o, c_t_o = self.initHidden(B, pose_gt.device)  # (N_layer, B, N_feature), (N_layer, B, N_feature)
        
        for i in range(T_past):
            human_feat = self.body_embedding(pose_gt[i][None, ..., :self.human_dim]).sum(dim=-2)  # (1, B, N_feature)
            obj_feat = self.obj_embedding(pose_gt[i][None, ..., -self.obj_dim:]).sum(dim=-2)  # (1, B, N_feature)

            # mixed input
            mix_coef = self.adj_mlp(torch.cat([h_t_h[-1, None], h_t_o[-1, None]], dim=-1))  # (1, B, 1)
            human_mix_feature = mix_coef*human_feat + (1-mix_coef)*obj_feat  # (1, B, N_feature)
            obj_mix_feature = (1-mix_coef)*human_feat + mix_coef*obj_feat  # (1, B, N_feature)
            if not self.encode_action:
              rnn_input_obj = torch.cat([obj_mix_feature, shape_feat.sum(dim=-2), category_feat.sum(dim=-2)], dim=-1)  # (1, B, N_feature + N_feature_shape + N_feature_category)
            else:
              rnn_input_obj = torch.cat([obj_mix_feature, shape_feat.sum(dim=-2), category_feat.sum(dim=-2), action_feat], dim=-1)  # (1, B, N_feature + N_feature_shape + N_feature_category + N_feature_action)
            if not self.encode_action:
              rnn_input_human = human_mix_feature
            else:
              rnn_input_human = torch.cat([human_mix_feature, action_feat], dim=-1)
            
            obj_node_feat, (h_t_o, c_t_o) = self.context_lstm_obj(rnn_input_obj, (h_t_o, c_t_o))
            human_node_feat, (h_t_h, c_t_h) = self.context_lstm_human(rnn_input_human, (h_t_h, c_t_h))

        # pose init
        human_pose = self.humanfinal(human_node_feat).view(1, B, 2, -1).contiguous()
        obj_pose = self.objfinal(obj_node_feat).view(1, B, 2, -1).contiguous()
        pose_pred = torch.cat([human_pose, obj_pose], dim=-1)
        if self.use_residual:
            pose_pred = pose_pred + pose_gt[-1].detach()

        return (h_t_o, c_t_o), (h_t_h, c_t_h), pose_pred

    def forward_autoregressive(self, obj_human_gt, obj_points, action_encoding, category_encoding, pred_len = 25):
        # obj_human_gt: T_past, B, 2, 21*3+7
        # obj_points: B, 2, N_point, 3
        # action_encoding: B, N_action
        # category_encoding: B, 2, N_category
        # output: pred_len, B, 2, 21*3+7

        T_past, B = obj_human_gt.shape[:2]
        # obj_points = obj_points.view(B, 2, -1)
        obj_points = obj_points.view(B, 2, -1, 3)
        shape_feat = self.shape_embedder(obj_points).view(1, B, 2, -1)  # (1, B, 2, N_feature_shape)
        action_feat = self.action_embedder(action_encoding).view(1, B, -1)  # (1, B, N_feature_category)
        category_feat = self.category_embedder(category_encoding).view(1, B, 2, -1)  # (1, B, 2, N_feature_category)
        (h_t_o, c_t_o), (h_t_h, c_t_h), pose_init = self.autoregressive_encoder(obj_human_gt, shape_feat, action_feat, category_feat)
        pose_preds = self.autoregressive_(pose_init, shape_feat, action_feat, category_feat, pred_len, h_t_h, c_t_h, h_t_o, c_t_o)

        return pose_preds

if __name__=='__main__':
    # test lstm
    device = 'cuda'
    shape_rand = torch.randn(16, 12,3).unsqueeze(0).to(device)
    obj_rand = torch.randn(10, 16, 9+21*3).to(device)
    args = dict(
        embedding_dim = 128,
        residual = 1,
        num_layers = 4,
        smpl_dim = 21*3
    )

    model = ContextLSTM(args).to(device)
    print(model(obj_rand, shape_rand).size())
    # T_future, B, N_feat