from torch import nn
import torch
import random


class ObjHumanLSTM(nn.Module):
    """
    c_in: input point feature dimension exculding xyz
    """
    def __init__(self, args):
        super(ObjHumanLSTM, self).__init__()
        
        # pointnet: 
        # input: shape, B,N_points,3
        # output: B,N_feature

        # RNN： 
        # inference
        # input: history pose: T_past 
        # T_future, B, 4d quat+3d translation+ shape feature

        # output: T_past+T_future,B,N_params
        num_channels_shape = 64
        num_channels = args.embedding_dim
        self.use_residual = args.residual
        self.num_layers = args.num_layers
        self.num_channels = num_channels
        self.output_dim = args.smpl_dim + 7
        self.body_embedding = nn.Sequential(
          nn.Linear(args.smpl_dim, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels)
        )
        self.obj_embedding = nn.Sequential(
          nn.Linear(7, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels)
        )

        self.shape_embedder = nn.Sequential(
          nn.Linear(args.num_points*3, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels),
          nn.ReLU(),
          nn.Linear(num_channels, num_channels_shape)
        )
        self.lstm = nn.LSTM(input_size = self.num_channels*4+num_channels_shape*2, hidden_size=num_channels, num_layers=args.num_layers)  # batch_first is False, 
        self.finalLinear = nn.Linear(num_channels, self.output_dim*2)  # 2个手和2个物体

    def forward(self, obj_human_gt, obj_points):
        # obj_human_gt: T, B, 2, smpl_dim + 7
        # obj_points: B, 2, N_point, 3
        # output: T, B, 2, smpl_dim + 7

        T, B = obj_human_gt.shape[:2]
        shape_feat = self.shape_embedder(obj_points.view(B, 2, -1)).unsqueeze(0).view(1, B, -1)  # 1, B, N_feature_shape*2
        
        obj_human_feats = torch.cat([self.body_embedding(obj_human_gt[..., :self.output_dim-7]), self.obj_embedding(obj_human_gt[..., -7:])], dim=-1).view(T, B, -1)  # (T, B, N_feature*4)
        rnn_input = torch.cat([obj_human_feats, shape_feat.repeat(T, 1, 1)], dim=-1)  # (T, B, 2, N_feature*4+N_frature_shape*2)

        h_t,c_t = self.initHidden(B, obj_human_gt.device)
        pose_preds = None
        use_teacher_forcing = True if random.random() < 0.8 else False
        if use_teacher_forcing:
            pose_preds = []
            for i in range(T):
                # 1,B,2,N_gt
                pose_pred, (h_t,c_t) = self.lstm(rnn_input[i].unsqueeze(0), (h_t,c_t))
                pose_pred = self.finalLinear(pose_pred).view(1, B, 2, self.output_dim)  # 1, B, 2, smpl_dim + 7
                if self.use_residual:
                    pose_pred = pose_pred + obj_human_gt[i].unsqueeze(0)
                pose_preds.append(pose_pred)
            pose_preds = torch.cat(pose_preds, dim=0)
        else:
            pose_feat_init, (h_t,c_t) = self.lstm(rnn_input[0].unsqueeze(0), (h_t,c_t))
            pose_pred_init = self.finalLinear(pose_feat_init).view(1, B, 2, self.output_dim)  # 1, B, 2, smpl_dim + 7
            if self.use_residual:
                pose_pred_init = pose_pred_init + obj_human_gt[0].unsqueeze(0)
            pose_preds = self.autoregressive_(h_t, c_t, pose_pred_init, shape_feat, T)
        
        # from rnn output to obj_pred
        # T,B,num_channels
        # prediction shift, we must make sure the last 10 items correspond to predictions
        return pose_preds

    def initHidden(self, batch_size, device):
        return (torch.zeros(self.num_layers, batch_size, self.num_channels).to(device), torch.zeros(self.num_layers, batch_size, self.num_channels).to(device))

    def autoregressive_(self, h_0, c_0, init_pose, shape_feat, pred_len):
        # input: init hidden
        # input: init pose
        # shape_feat: 1,B,shape_dim
        # return pred_len,B,smpl+7
        B = init_pose.shape[1]
        pose_preds = [init_pose]
        h_t,c_t = h_0,c_0
        for i in range(pred_len-1):
            obj_human_feat = torch.cat([self.body_embedding(pose_preds[-1][..., :self.output_dim-7]), self.obj_embedding(pose_preds[-1][..., -7:])], dim=-1).view(1, B, -1)  # (1, B, N_feature*4)
            rnn_input_i =  torch.cat([obj_human_feat, shape_feat], dim=-1)
            pose_feat, (h_t,c_t) = self.lstm(rnn_input_i, (h_t,c_t))
            pose_pred = self.finalLinear(pose_feat).view(1, B, 2, self.output_dim)  # 1, B, 2, smpl_dim + 7
            if self.use_residual:
                pose_pred = pose_pred + pose_preds[-1].detach().clone()
            pose_preds.append(pose_pred)
        pose_preds = torch.cat(pose_preds, dim=0)  # (T_pred, B, 2, smpl_dim + 7)

        return pose_preds


    def forward_autoregressive(self, obj_human_gt, obj_points, pred_len = 25):
        # obj_gt: T_past,B,9+smpl
        # obj_points: 1,B,N_pointcloud,3
        # output: pred_len,B,9+smpl
        
        T_past, B = obj_human_gt.shape[:2]
        shape_feat = self.shape_embedder(obj_points).unsqueeze(0).view(1, B, -1)  # (1, B, N_feature_shape*2)
        obj_human_feats_past = torch.cat([self.body_embedding(obj_human_gt[..., :self.output_dim-7]), self.obj_embedding(obj_human_gt[..., -7:])], dim=-1).view(T_past, B, -1)  # (T_past, B, N_feature*4)
        rnn_input = torch.cat([obj_human_feats_past, shape_feat.repeat(T_past,1,1)], dim=-1)
        # T_past, B, 208
        h_t,c_t = self.initHidden(B, obj_human_gt.device)
        for i in range(T_past):
            # 1,B,73
            pose_feat, (h_t,c_t) = self.lstm(rnn_input[i].unsqueeze(0), (h_t,c_t))
            pose_pred = self.finalLinear(pose_feat).view(1, B, 2, -1)  # (1, B, 2, smpl_dim + 7)
        if self.use_residual:
            pose_pred = pose_pred + obj_human_gt[-1,:,:]
        pose_preds = self.autoregressive_(h_t,c_t,pose_pred,shape_feat,pred_len)

        return pose_preds

if __name__=='__main__':
    # test lstm
    device = 'cuda'
    shape_rand = torch.randn(16, 512,4).unsqueeze(0).to(device)
    obj_rand = torch.randn(20, 16, 9).to(device)
    model = ObjLSTM().to(device)
    print(model(obj_rand, shape_rand).size())