import logging
import itertools
import torch
import torch.nn as nn
from torch.nn.functional import softmax, relu
import torch.nn.functional as F
from torch.nn import Parameter
from crowd_nav.policy.helpers import mlp, GAT
import numpy as np


class Temporal_Attention(nn.Module):
    def __init__(self, outfea):
        super(Temporal_Attention, self).__init__()
        self.qff = nn.Linear(outfea, outfea)
        self.kff = nn.Linear(outfea, outfea)
        self.vff = nn.Linear(outfea, outfea)

        self.ff = nn.Sequential(
            nn.Linear(outfea, outfea),
            GELU(),
            nn.Linear(outfea, outfea),
        )

        self.ln = nn.LayerNorm(outfea)

        self.d = 4
        # self.k = k
        # self.device = device

    def forward(self, x, Mask=False):
        x = x.permute(0,3,2,1)
        query = self.qff(x)
        key = self.kff(x)
        value = self.vff(x)

        query = torch.cat(torch.split(query, self.d, -1), 0).permute(0,2,1,3)
        key = torch.cat(torch.split(key, self.d, -1), 0).permute(0,2,3,1)
        value = torch.cat(torch.split(value, self.d, -1), 0).permute(0,2,1,3)

        a = torch.matmul(query, key)
        a /= (self.d ** 0.5)

        # if Mask == True:
        #     batch_size = x.shape[0]
        #     num_steps = x.shape[1]
        #     num_vertexs = x.shape[2]
        #     mask = torch.ones(num_steps, num_steps).to(self.device) # [T,T]
        #     mask = torch.tril(mask) # [T,T]但是对角线以上的值变成0了
        #     mask = torch.unsqueeze(torch.unsqueeze(mask, dim=0), dim=0) # [1,1,T,T]
        #     mask = mask.repeat(self.k * batch_size, num_vertexs, 1, 1) # [k*B,N,T,T]
        #     mask = mask.to(torch.bool)
        #     zero_vec = (-2 ** 15 + 1)*torch.ones_like(a).to(self.device) # [k*B,N,T,T]里面元素全是负无穷大
        #     a = torch.where(mask, a, zero_vec)

        a = torch.softmax(a, -1)

        value = torch.matmul(a, value)
        value = torch.cat(torch.split(value, x.shape[0], 0), -1).permute(0,2,1,3)
        value = self.ff(value) + x

        return self.ln(value).permute(0,3,2,1)


class GELU(nn.Module):
    def __init__(self):
        super(GELU, self).__init__()

    def forward(self, x):
        return 0.5*x*(1+F.tanh(np.sqrt(2/np.pi)*(x+0.044715*torch.pow(x,3))))


class ConvTemporalGraphical(nn.Module):
    # Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py
    r"""The basic module for applying a graph convolution.
    Args:
        in_channels (int): Number of channels in the input sequence data
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int): Size of the graph convolving kernel
        t_kernel_size (int): Size of the temporal convolving kernel
        t_stride (int, optional): Stride of the temporal convolution. Default: 1
        t_padding (int, optional): Temporal zero-padding added to both sides of
            the input. Default: 0
        t_dilation (int, optional): Spacing between temporal kernel elements.
            Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output.
            Default: ``True``
    Shape:
        - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
        - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
        - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
        - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
        where
            :math:`N` is a batch size,
            :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
            :math:`T_{in}/T_{out}` is a length of input/output sequence,
            :math:`V` is the number of graph nodes.
    """

    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 t_kernel_size=1,
                 t_stride=1,
                 t_padding=0,
                 t_dilation=1,
                 bias=True):
        super(ConvTemporalGraphical, self).__init__()
        self.kernel_size = kernel_size
        self.conv = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=(t_kernel_size, 1),
            padding=(t_padding, 0),
            stride=(t_stride, 1),
            dilation=(t_dilation, 1),
            bias=bias)

    def forward(self, x, A): # [ 1，2，8，3]
        assert A.size(1) == self.kernel_size
        x = self.conv(x)  # [1,32,8,7]--->[1,16,8,7]  时间dimension changed
        x = torch.einsum('nctv,ntvw->nctw', (x, A))  # 融入图的拓扑信息，但没在"空间"做卷积
        return x.contiguous(), A


class st_gcn(nn.Module):
    r"""Applies a spatial temporal graph convolution over an input graph sequence.
    Args:
        in_channels (int): Number of channels in the input sequence data
        out_channels (int): Number of channels produced by the convolution
        kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel
        stride (int, optional): Stride of the temporal convolution. Default: 1
        dropout (int, optional): Dropout rate of the final output. Default: 0
        residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``
    Shape:
        - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format     二维卷积就是对后面两个维度T_{in}, V进行操作
        - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
        - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
        - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
        where
            :math:`N` is a batch size,
            :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
            :math:`T_{in}/T_{out}` is a length of input/output sequence,
            :math:`V` is the number of graph nodes.
    """

    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,  # [kernel_size,sequence_length]
                 use_mdn=False,
                 stride=1,
                 dropout=0,
                 residual=True):
        super(st_gcn, self).__init__()

        #         print("outstg",out_channels)

        assert len(kernel_size) == 2
        assert kernel_size[0] % 2 == 1
        padding = ((kernel_size[0] - 1) // 2, 0)
        self.use_mdn = use_mdn

        self.gcn = ConvTemporalGraphical(in_channels, out_channels,
                                         kernel_size[1]) # kernel_size [3,8]

        self.gat0 = GraphAttentionLayer_history(out_channels, out_channels)
        self.gat1 = GraphAttentionLayer_history(out_channels, out_channels)

        # self.tcn = nn.Sequential(
        #     nn.BatchNorm2d(out_channels),
        #     nn.PReLU(),
        #     nn.Conv2d(
        #         out_channels,
        #         out_channels,
        #         (kernel_size[0], 1),  # the second dimension of kernel是1，其实就是只对 倒数第二个维度（即时间） 进行卷积，最后一个维度是行人数量，即节点数量
        #         (stride, 1),
        #         padding,
        #     ),
        #     nn.BatchNorm2d(out_channels),
        #     nn.Dropout(dropout, inplace=True),
        # )
        self.t_Attention_n = nn.ModuleList([Temporal_Attention(out_channels) for i in range(1)])

        if not residual:
            self.residual = lambda x: 0

        elif (in_channels == out_channels) and (stride == 1):
            self.residual = lambda x: x

        else:
            self.residual = nn.Sequential(
                nn.Conv2d(
                    in_channels,
                    out_channels,
                    kernel_size=1,
                    stride=(stride, 1)),
                nn.BatchNorm2d(out_channels),
            )

        self.prelu = nn.PReLU()

    def forward(self, x, A, adj):

        res = self.residual(x) # [ 1，2，8，3]  ----> [1，5，8，3]  [1,32,8,num] [1,8,num,32]

        # x, A = self.gcn(x, A)

        x = x.permute(0,2,3,1)
        H1, self.attention_weights = self.gat0(x, adj)
        H2, _ = self.gat1(H1, adj)
        x = H1 + H2 + x

        x = x.permute(0,3,1,2)
        # x = self.tcn(x) + res
        for i in range(1):
            x = self.t_Attention_n[i](x)
        x = x + res

        if not self.use_mdn:
            x = self.prelu(x)

        return x, A



class Social_Stgcnn(nn.Module):  # 引入TXP-CNN
    def __init__(self, config, robot_state_dim, human_state_dim, n_stgcnn=1, n_txpcnn=5, input_feat=32, output_feat=32, # TODO 5,
                 seq_len=4, pred_seq_len=4, kernel_size=3):
        super(Social_Stgcnn, self).__init__()
        self.name = 'Social_Stgcnn'
        self.n_stgcnn = n_stgcnn
        self.n_txpcnn = n_txpcnn

        self.st_gcns = nn.ModuleList()
        self.st_gcns.append(st_gcn(input_feat, output_feat, (kernel_size, seq_len)))
        for j in range(1, self.n_stgcnn):
            self.st_gcns.append(st_gcn(output_feat, output_feat, (kernel_size, seq_len)))

        self.tpcnns = nn.ModuleList()
        self.tpcnns.append(nn.Conv2d(seq_len, pred_seq_len, 3, padding=1))
        for j in range(1, self.n_txpcnn):
            self.tpcnns.append(nn.Conv2d(pred_seq_len, pred_seq_len, 3, padding=1))
        self.tpcnn_ouput = nn.Conv2d(pred_seq_len, pred_seq_len, 3, padding=1)

        self.prelus = nn.ModuleList()
        for j in range(self.n_txpcnn):
            self.prelus.append(nn.PReLU())

        # from RGL
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection
        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)

        self.w_a = mlp(2 * self.X_dim, [2 * self.X_dim, 1], last_relu=True)

        # for visualization
        self.attention_weights = None

        # from tree
        # self.gat0 = GraphAttentionLayer_history(self.X_dim, self.X_dim)
        # self.gat1 = GraphAttentionLayer_history(self.X_dim, self.X_dim)


    def forward(self, state):
        robot_state, human_states = state          # [1 or 100,8,1,9] [1 or 100,8,5,5]

        # compute feature matrix X
        robot_state_embedings = self.w_r(robot_state) # FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
        human_state_embedings = self.w_h(human_states)
        X = torch.cat([robot_state_embedings, human_state_embedings], dim=2)

        # compute matrix A
        if not self.layerwise_graph:
            normalized_A = self.compute_similarity_matrix(X)
            # value_X = self.w_v(X)
            # self.attention_weights = normalized_A[0, 0, :].data.cpu().numpy()

        # stgcn no good
        v = X.permute(0,3,1,2)   # The size of v should be [ 1，2(x,y)，8(obs length)，3(human_num+1)]     [1,32,8,6]
        a = normalized_A   # [8(obs length)，3(human_num)，3(human_num)]      [8,6,6]
        adj = self.compute_adjectory_matrix(state)

        for k in range(self.n_stgcnn):
            v, a = self.st_gcns[k](v, a, adj)  # 只对graph sequence做卷积，邻接矩阵不使用，直接最后输出

        v_robot = v.view(v.shape[0], v.shape[2], v.shape[3], v.shape[1])

        v = v.view(v.shape[0], v.shape[2], v.shape[1], v.shape[3])
        v = self.prelus[0](self.tpcnns[0](v)) # [1,predicted_length,5,6]

        for k in range(1, self.n_txpcnn - 1):  #维度不变
            v = self.prelus[k](self.tpcnns[k](v)) + v

        v = self.tpcnn_ouput(v)
        v_human = v.view(v.shape[0], v.shape[1], v.shape[3], v.shape[2])  # [1,5（预测轨迹的五元组表示）,12(预测轨迹长度）,6]

        return v_robot, v_human   # [1,sequence_length,num,feature]


        # GAT nice!
        # adj = self.compute_adjectory_matrix(state)
        # if robot_state.shape[0]==1:
        #     H1, self.attention_weights = self.gat0(X, adj)
        # else:
        #     H1, _ = self.gat0(X, adj)
        # H2, _ = self.gat1(H1, adj)
        # if self.skip_connection:
        #     output = H1 + H2 + X
        # else:
        #     output = H2
        # return output

    def compute_adjectory_matrix(self, state): # [1,6,32]--->[1,8,6,32] # [1 or 100,8,1,9] [1 or 100,8,5,5]
        robot_state = state[0]
        human_state = state[1]
        robot_num = robot_state.size()[2]
        human_num = human_state.size()[2]
        Num = robot_num + human_num
        adj = torch.ones((Num, Num))
        for i in range(robot_num, robot_num+human_num):
            adj[i][0] = 0
        adj = adj.repeat(robot_state.size()[0], robot_state.size()[1], 1, 1)
        return adj

    def compute_similarity_matrix(self, X):
        indices = [pair for pair in itertools.product(list(range(X.size(2))), repeat=2)]
        selected_features = torch.index_select(X, dim=2, index=torch.LongTensor(indices).reshape(-1).to(torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
        pairwise_features = selected_features.reshape((X.size(0),X.size(1), X.size(2) * X.size(2), X.size(3) * 2))
        A = self.w_a(pairwise_features).reshape(X.size(0),X.size(1), X.size(2), X.size(2))
        return A



class GraphAttentionLayer_history(nn.Module):
    """
    Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
    """

    def __init__(self, in_features, out_features, concat=True):
        super(GraphAttentionLayer_history, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.concat = concat

        self.w_a = mlp(2 * self.in_features, [2 * self.in_features, 1], last_relu=True)
        self.leakyrelu = nn.LeakyReLU(negative_slope=-0.2)
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    def forward(self, input, adj):

        # shape of input is batch_size, graph_size,feature_dims
        # shape of adj is batch_size, graph_size, graph_size
        assert len(input.shape) == 4
        assert len(adj.shape) == 4
        A = self.compute_similarity_matrix(input)
        e = self.leakyrelu(A)
        zero_vec = -9e15 * torch.ones_like(e)
        attention = torch.where(adj.to(self.device) > 0, e.to(self.device), zero_vec.to(self.device))
        attention = nn.functional.softmax(attention, dim=3)
        next_H = torch.matmul(attention, input)
        return next_H, attention[0, 0, :].data.cpu().numpy()

    def compute_similarity_matrix(self, X):
        indices = [pair for pair in itertools.product(list(range(X.size(2))), repeat=2)]
        selected_features = torch.index_select(X, dim=2, index=torch.LongTensor(indices).reshape(-1).to(self.device))
        pairwise_features = selected_features.reshape((X.size(0),X.size(1), X.size(2) * X.size(2), X.size(3) * 2))
        A = self.w_a(pairwise_features).reshape(X.size(0),X.size(1), X.size(2), X.size(2))
        return A



class RGL(nn.Module):
    def __init__(self, config, robot_state_dim, human_state_dim):
        """ The current code might not be compatible with models trained with previous version
        """
        super().__init__()
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection

        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)

        if self.similarity_function == 'embedded_gaussian':
            self.w_a = Parameter(torch.randn(self.X_dim, self.X_dim))
            nn.init.orthogonal_(self.w_a.data)
        elif self.similarity_function == 'concatenation':
            self.w_a = mlp(2 * X_dim, [2 * X_dim, 1], last_relu=True)

        self.w_v = mlp(X_dim, [X_dim], last_relu=True)

        embedding_dim = self.X_dim
        self.Ws = torch.nn.ParameterList()
        for i in range(self.num_layer):
            if i == 0:
                self.Ws.append(Parameter(torch.randn(self.X_dim, embedding_dim)))
            elif i == self.num_layer - 1:
                self.Ws.append(Parameter(torch.randn(embedding_dim, final_state_dim)))
            else:
                self.Ws.append(Parameter(torch.randn(embedding_dim, embedding_dim)))

        # for visualization
        self.attention_weights = None

    def compute_similarity_matrix(self, X):
        if self.similarity_function == 'embedded_gaussian':
            A = torch.matmul(torch.matmul(X, self.w_a), X.permute(0, 2, 1))
            normalized_A = softmax(A, dim=2)
        elif self.similarity_function == 'gaussian':
            A = torch.matmul(X, X.permute(0, 2, 1))
            normalized_A = softmax(A, dim=2)
        elif self.similarity_function == 'cosine':
            A = torch.matmul(X, X.permute(0, 2, 1))
            magnitudes = torch.norm(A, dim=2, keepdim=True)
            norm_matrix = torch.matmul(magnitudes, magnitudes.permute(0, 2, 1))
            normalized_A = torch.div(A, norm_matrix)
        elif self.similarity_function == 'cosine_softmax':
            A = torch.matmul(X, X.permute(0, 2, 1))
            magnitudes = torch.norm(A, dim=2, keepdim=True)
            norm_matrix = torch.matmul(magnitudes, magnitudes.permute(0, 2, 1))
            normalized_A = softmax(torch.div(A, norm_matrix), dim=2)
        elif self.similarity_function == 'concatenation':
            indices = [pair for pair in itertools.product(list(range(X.size(1))), repeat=2)]
            selected_features = torch.index_select(X, dim=1, index=torch.LongTensor(indices).reshape(-1))
            pairwise_features = selected_features.reshape((-1, X.size(1) * X.size(1), X.size(2) * 2))
            A = self.w_a(pairwise_features).reshape(-1, X.size(1), X.size(1))
            normalized_A = softmax(A, dim=2)
        elif self.similarity_function == 'squared':
            A = torch.matmul(X, X.permute(0, 2, 1))
            squared_A = A * A
            normalized_A = squared_A / torch.sum(squared_A, dim=2, keepdim=True)
        elif self.similarity_function == 'equal_attention':
            normalized_A = (torch.ones(X.size(1), X.size(1)) / X.size(1)).expand(X.size(0), X.size(1), X.size(1))
        elif self.similarity_function == 'diagonal':
            normalized_A = (torch.eye(X.size(1), X.size(1))).expand(X.size(0), X.size(1), X.size(1))
        else:
            raise NotImplementedError

        return normalized_A

    def forward(self, state):
        """
        Embed current state tensor pair (robot_state, human_states) into a latent space
        Each tensor is of shape (batch_size, # of agent, features)
        :param state:
        :return:
        """
        robot_state, human_states = state

        # compute feature matrix X
        robot_state_embedings = self.w_r(robot_state)
        human_state_embedings = self.w_h(human_states)
        X = torch.cat([robot_state_embedings, human_state_embedings], dim=1)

        # compute matrix A
        if not self.layerwise_graph:
            normalized_A = self.compute_similarity_matrix(X)
            value_X = self.w_v(X)
            self.attention_weights = normalized_A[0, 0, :].data.cpu().numpy()

        next_H = H = value_X
        for i in range(self.num_layer):
            if self.layerwise_graph:
                A = self.compute_similarity_matrix(H)
                next_H = relu(torch.matmul(torch.matmul(A, H), self.Ws[i]))
            else:
                next_H = relu(torch.matmul(torch.matmul(normalized_A, H), self.Ws[i]))

            if self.skip_connection:
                next_H += H
            H = next_H

        return next_H

class GAT_RL(nn.Module):
    def __init__(self, config, robot_state_dim, human_state_dim):
        """ The current code might not be compatible with models trained with previous version
        """
        super().__init__()
        self.name = 'GAT_RL'
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection
        self.gat0 = GraphAttentionLayer(self.X_dim, self.X_dim)
        self.gat1 = GraphAttentionLayer(self.X_dim, self.X_dim)

        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)
        # for visualization
        self.attention_weights = None

    def compute_adjectory_matrix(self, state):
        robot_state = state[0]
        human_state = state[1]
        robot_num = robot_state.size()[1]
        human_num = human_state.size()[1]
        Num = robot_num + human_num
        adj = torch.ones((Num, Num))
        for i in range(robot_num, robot_num+human_num):
            adj[i][0] = 0
        adj = adj.repeat(robot_state.size()[0], 1, 1)
        return adj

    def forward(self, state):
        """
        Embed current state tensor pair (robot_state, human_states) into a latent space
        Each tensor is of shape (batch_size, # of agent, features)
        :param state:
        :return:
        """
        robot_state, human_states = state
        adj = self.compute_adjectory_matrix(state)
        # compute feature matrix X
        robot_state_embedings = self.w_r(robot_state)
        human_state_embedings = self.w_h(human_states)
        X = torch.cat([robot_state_embedings, human_state_embedings], dim=1)
        if robot_state.shape[0]==1:
            H1, self.attention_weights = self.gat0(X, adj)
        else:
            H1, _ = self.gat0(X, adj)
        H2, _ = self.gat1(H1, adj)
        if self.skip_connection:
            output = H1 + H2 + X
        else:
            output = H2
        return output

class GraphAttentionLayer(nn.Module):
    """
    Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
    """

    def __init__(self, in_features, out_features, concat=True):
        super(GraphAttentionLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.concat = concat

        self.w_a = mlp(2 * self.in_features, [2 * self.in_features, 1], last_relu=True)
        self.leakyrelu = nn.LeakyReLU(negative_slope=-0.2)

    def forward(self, input, adj):

        # shape of input is batch_size, graph_size,feature_dims
        # shape of adj is batch_size, graph_size, graph_size
        assert len(input.shape) == 3
        assert len(adj.shape) == 3
        A = self.compute_similarity_matrix(input)
        e = self.leakyrelu(A)
        zero_vec = -9e15 * torch.ones_like(e)
        attention = torch.where(adj.to('cuda') > 0, e.to('cuda'), zero_vec.to('cuda'))
        attention = nn.functional.softmax(attention, dim=2)
        next_H = torch.matmul(attention, input)
        return next_H, attention[0, 0, :].data.cpu().numpy()

    def compute_similarity_matrix(self, X):
        indices = [pair for pair in itertools.product(list(range(X.size(1))), repeat=2)]
        selected_features = torch.index_select(X, dim=1, index=torch.LongTensor(indices).reshape(-1).to('cuda'))
        pairwise_features = selected_features.reshape((-1, X.size(1) * X.size(1), X.size(2) * 2))
        A = self.w_a(pairwise_features).reshape(-1, X.size(1), X.size(1))
        return A



class GAT_RL2(nn.Module):
    def __init__(self, config, robot_state_dim, human_state_dim, device):
        """ The current code might not be compatible with models trained with previous version
        """
        super().__init__()
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection
        self.nheads = 1
        self.device = device

        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)

        embedding_dim = self.X_dim
        for i in range(self.num_layer):
            if i == 0:
                self.gat0 = GAT(in_feats=self.X_dim, hid_feats=embedding_dim, out_feats=embedding_dim, dropout=0.0,
                                alpha=-0.2, nheads=self.nheads)
                self.add_module('GAT0', self.gat0)
            if i == self.num_layer - 1:
                self.gat1 = GAT(in_feats=embedding_dim, hid_feats=embedding_dim, out_feats=final_state_dim, dropout=0.0,
                                alpha=-0.2, nheads=self.nheads)
                self.add_module('GAT1', self.gat1)

        # for visualization
        self.A = None

    def compute_adjectory_matrix(self, state):
        robot_state = state[0]
        human_state = state[1]
        robot_num = robot_state.size()[1]
        human_num = human_state.size()[1]
        Num = robot_num + human_num
        adj = torch.ones((Num, Num))
        for i in range(robot_num, robot_num+human_num):
            adj[i][0] = 0
        adj = adj.repeat(robot_state.size()[0], 1, 1)
        adj = adj.to(self.device)
        return adj

    def forward(self, state):
        """
        Embed current state tensor pair (robot_state, human_states) into a latent space
        Each tensor is of shape (batch_size, # of agent, features)
        :param state:
        :return:
        """
        robot_state, human_states = state
        # robot_state.
        adj = self.compute_adjectory_matrix(state)
        assert robot_state.shape[0] == human_states.shape[0]
        robot_state_embedings = self.w_r(robot_state)
        human_state_embedings = self.w_h(human_states)
        X = torch.cat([robot_state_embedings, human_state_embedings], dim=1)
        next_H = self.gat0(X, adj) + X
        next_H = self.gat1(next_H, adj) + X
        return next_H



