import torch
import torch.nn as nn
from .helpers import SinusoidalPosEmb

def mlp(input_dim, mlp_dims, last_relu=False):
    layers = []
    mlp_dims = [input_dim] + mlp_dims
    for i in range(len(mlp_dims) - 1):
        mlp_i=nn.Linear(mlp_dims[i], mlp_dims[i + 1])
        nn.init.xavier_uniform_(mlp_i.weight)
        nn.init.constant_(mlp_i.bias, 0.0)
        layers.append(mlp_i)
        if i != len(mlp_dims) - 2 or last_relu:
            layers.append(nn.ReLU())
    net = nn.Sequential(*layers)
    # print(net)
    return net

class CrossAttention(torch.nn.Module):
    def __init__(self, state_dim, t_dim, num_heads):
        super(CrossAttention, self).__init__()
        self.state_dim = state_dim
        self.t_dim = t_dim
        self.num_heads = num_heads
        self.time_encoding = nn.Sequential(
            nn.Linear(state_dim+t_dim, state_dim),
            nn.Mish(),
            nn.Linear(state_dim, state_dim),
        )
        self.atten = nn.MultiheadAttention(state_dim, num_heads, batch_first=True)

    def forward(self, query, key, time_encoding):
        time_encoding = time_encoding.expand(-1, query.size(1), -1)
        query_with_time = self.time_encoding(torch.cat([query, time_encoding], dim=-1))
        atten_output, _ = self.atten(query_with_time, key, key)
        return atten_output

class DiffAttention(nn.Module):
    def __init__(
        self,
        state_dim,
        action_dim,
        graph_dims,
        hidden_dim=256,
        t_dim=16,
        activation='mish'
    ):
        super(DiffAttention, self).__init__()
        _act = nn.Mish if activation == 'mish' else nn.ReLU
        self.time_mlp = nn.Sequential(
            SinusoidalPosEmb(t_dim),
            nn.Linear(t_dim, t_dim * 2),
            _act(),
            nn.Linear(t_dim * 2, t_dim)
        )
        robot_state_dim = 2
        self.encoder = mlp(robot_state_dim, graph_dims, last_relu=True)  # inputs,64,32
        self.crossatten = CrossAttention(state_dim=state_dim, t_dim=t_dim, num_heads=8)
        self.mid_layer = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            _act(),
            nn.Linear(hidden_dim, hidden_dim),
            _act(),
            nn.Linear(hidden_dim, action_dim)
        )
        self.final_layer = nn.Tanh()

    def forward(self, x, time, state):
        x = self.encoder(x)  # [batch, drone_num, state] -> [batch, drone_num, graph_dim]
        t = self.time_mlp(time).unsqueeze(1)
        # state = state.reshape(state.size(0), -1)
        # x = torch.cat([x, t, state], dim=2)
        x = self.crossatten(x, state, t)
        x = self.mid_layer(x)
        return self.final_layer(x)