import torch.nn as nn
import torch

from .base_laysers import Encoder, Q_Net, GraphAttention, SchedulerModel


class GACPN_R1(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions):
        super(GACPN_R1, self).__init__()

        self.encoder = Encoder(num_inputs, hidden_dim)
        self.gat_encoder = GraphAttention(hidden_dim, hidden_dim, dropout=0, negative_slope=0, num_heads=4,
                                          average=True)
        self.scheduler_1 = SchedulerModel(hidden_dim)
        self.graphAtt_1 = GraphAttention(hidden_dim, hidden_dim // 4, dropout=0, negative_slope=0.2, num_heads=4,
                                         bias=True, normalize=True)
        self.q_net = Q_Net(hidden_dim * 2, num_actions)

    def forward(self, x, mask):
        encoded = self.encoder(x)

        gat_encoder = self.gat_encoder(encoded, mask)

        adj1 = self.scheduler_1(gat_encoder, mask)
        comm = self.graphAtt_1(encoded, adj1)

        q = self.q_net(torch.cat([comm, encoded], dim=-1))

        return q, adj1


class GACPN_R2(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions):
        super(GACPN_R2, self).__init__()

        self.encoder = Encoder(num_inputs, hidden_dim)
        self.gat_encoder = GraphAttention(hidden_dim, hidden_dim, dropout=0, negative_slope=0, num_heads=4,
                                          average=True)
        self.scheduler_1 = SchedulerModel(hidden_dim)
        self.scheduler_2 = SchedulerModel(hidden_dim)
        self.graphAtt_1 = GraphAttention(hidden_dim, hidden_dim // 4, dropout=0, negative_slope=0.2, num_heads=4,
                                         bias=True, normalize=True)
        self.graphAtt_2 = GraphAttention(hidden_dim, hidden_dim, dropout=0, negative_slope=0.2, num_heads=4,
                                         bias=True, normalize=True, average=True)
        self.q_net = Q_Net(hidden_dim * 2, num_actions)

    def forward(self, x, mask):
        encoded = self.encoder(x)

        gat_encoded = self.gat_encoder(encoded, mask)

        adj1 = self.scheduler_1(gat_encoded, mask)
        comm = self.graphAtt_1(encoded, adj1)

        adj2 = self.scheduler_2(gat_encoded, mask)
        comm = self.graphAtt_2(comm, adj2)

        q = self.q_net(torch.cat([comm, encoded], dim=-1))

        return q, adj2


class GACPN_R3(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions):
        super(GACPN_R3, self).__init__()

        self.encoder = Encoder(num_inputs, hidden_dim)

        self.gat_encoder = GraphAttention(hidden_dim, hidden_dim, dropout=0, negative_slope=0, num_heads=4,
                                          average=True)

        self.comm_times = 2
        self.scheduler_layers = nn.ModuleList([SchedulerModel(hidden_dim) for _ in range(self.comm_times - 1)])
        self.scheduler_last = SchedulerModel(hidden_dim)

        self.graphAtt_layers = nn.ModuleList([GraphAttention(hidden_dim, hidden_dim // 4, dropout=0, negative_slope=0.2,
                                                             num_heads=4, bias=True, normalize=True) for _ in
                                              range(self.comm_times - 1)])

        self.graphAtt_last = GraphAttention(hidden_dim, hidden_dim, dropout=0, negative_slope=0.2, num_heads=4,
                                            bias=True, normalize=True, average=True)
        self.q_net = Q_Net(hidden_dim * 2, num_actions)

    def forward(self, x, mask):
        batch_size, n_agent, num_dims, dim_length = x.shape
        x1 = torch.reshape(x, (batch_size, n_agent, num_dims * dim_length))

        encoded = self.encoder(x1)

        gat_encoder = self.gat_encoder(encoded, mask)

        temp = gat_encoder
        adj = None
        comm = None
        for graph, scheduler in zip(self.graphAtt_layers, self.scheduler_layers):
            adj = scheduler(gat_encoder, mask)
            comm = graph(temp, adj)
            temp = comm

        adj = self.scheduler_last(gat_encoder, mask)
        comm = self.graphAtt_last(temp, adj)

        q = self.q_net(torch.cat([comm, encoded], dim=-1))

        return q, adj
