import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
from action_utils import select_action, translate_action
from gnn_layers import GraphAttention, GraphAttentionV2, GeneralAttention, GraphAttentionV2SharedW
import logging

class MAGICTNNLRR(nn.Module):
    """
    The communication protocol of Multi-Agent Graph AttentIon Communication (MAGIC)
    """

    def __init__(self, args):
        super(MAGICTNNLRR, self).__init__()
        """
        Initialization method for the MAGIC communication protocol (2 rounds of communication)

        Arguments:
            args (Namespace): Parse arguments
        """

        self.args = args
        self.nagents = args.nagents
        self.hid_size = args.hid_size

        dropout = 0
        negative_slope = 0.2

        if args.use_gat_v2:
            self.sub_processor1 = GraphAttentionV2(args.hid_size, args.gat_hid_size, dropout=dropout,
                                                   negative_slope=negative_slope, num_heads=args.gat_num_heads,
                                                   self_loop_type=args.self_loop_type1, average=False,
                                                   normalize=args.first_gat_normalize)
            self.sub_processor2 = GraphAttentionV2(args.gat_hid_size * args.gat_num_heads, args.hid_size,
                                                   dropout=dropout,
                                                   negative_slope=negative_slope, num_heads=args.gat_num_heads_out,
                                                   self_loop_type=args.self_loop_type2, average=True,
                                                   normalize=args.second_gat_normalize)
        elif args.use_gat:
            self.sub_processor1 = GraphAttention(args.hid_size, args.gat_hid_size, dropout=dropout,
                                                 negative_slope=negative_slope, num_heads=args.gat_num_heads,
                                                 self_loop_type=args.self_loop_type1, average=False,
                                                 normalize=args.first_gat_normalize)
            self.sub_processor2 = GraphAttention(args.gat_hid_size * args.gat_num_heads, args.hid_size, dropout=dropout,
                                                 negative_slope=negative_slope, num_heads=args.gat_num_heads_out,
                                                 self_loop_type=args.self_loop_type2, average=True,
                                                 normalize=args.second_gat_normalize)
        elif args.use_general_attention:
            self.sub_processor1 = GeneralAttention(args.hid_size, args.gat_hid_size, dropout=dropout,
                                                   self_loop_type=args.self_loop_type1, average=False,
                                                   normalize=args.first_gat_normalize)
            self.sub_processor2 = GeneralAttention(args.gat_hid_size * args.gat_num_heads, args.hid_size,
                                                   dropout=dropout,
                                                   self_loop_type=args.self_loop_type2, average=True,
                                                   normalize=args.second_gat_normalize)
        elif args.use_gat_v2_SharedW:
            self.sub_processor1 = GraphAttentionV2SharedW(args.hid_size, args.gat_hid_size, dropout=dropout,
                                                   negative_slope=negative_slope, num_heads=args.gat_num_heads,
                                                   self_loop_type=args.self_loop_type1, average=False,
                                                   normalize=args.first_gat_normalize)
            self.sub_processor2 = GraphAttentionV2SharedW(args.gat_hid_size * args.gat_num_heads, args.hid_size,
                                                   dropout=dropout,
                                                   negative_slope=negative_slope, num_heads=args.gat_num_heads_out,
                                                   self_loop_type=args.self_loop_type2, average=True,
                                                   normalize=args.second_gat_normalize)

        # initialize the gat encoder for the Scheduler
        if args.use_gat_encoder:
            self.gat_encoder = GraphAttention(args.hid_size, args.gat_encoder_out_size, dropout=dropout,
                                              negative_slope=negative_slope, num_heads=args.ge_num_heads,
                                              self_loop_type=1, average=True, normalize=args.gat_encoder_normalize)
        elif args.use_gat_v2_SharedW_encoder:
            self.gat_encoder = GraphAttentionV2SharedW(args.hid_size, args.gat_encoder_out_size, dropout=dropout,
                                              negative_slope=negative_slope, num_heads=args.ge_num_heads,
                                              self_loop_type=1, average=True, normalize=args.gat_encoder_normalize)
        else:
            raise NotImplementedError


        self.obs_encoder = nn.Linear(args.obs_size, args.hid_size)

        self.init_hidden(args.batch_size)
        self.lstm_cell = nn.LSTMCell(args.hid_size, args.hid_size)

        # initialize mlp layers for the sub-schedulers
        if not args.first_graph_complete:
            if args.use_gat_encoder or args.use_gat_v2_SharedW_encoder:
                self.sub_scheduler_mlp1_part1 = nn.Sequential(
                    nn.Linear(args.gat_encoder_out_size * 2, args.gat_encoder_out_size // 2),
                    nn.ReLU(),
                    nn.Linear(args.gat_encoder_out_size // 2, args.gat_encoder_out_size // 2),
                    nn.ReLU())
                self.sub_scheduler_mlp1_part2 = nn.Sequential(
                    nn.Linear(args.gat_encoder_out_size // 2, 2))
            else:
                self.sub_scheduler_mlp1 = nn.Sequential(
                    nn.Linear(self.hid_size * 2, self.hid_size // 2),
                    nn.ReLU(),
                    nn.Linear(self.hid_size // 2, self.hid_size // 8),
                    nn.ReLU(),
                    nn.Linear(self.hid_size // 8, 2))

        if args.learn_second_graph and not args.second_graph_complete:
            if args.use_gat_encoder:
                self.sub_scheduler_mlp2 = nn.Sequential(
                    nn.Linear(args.gat_encoder_out_size * 2, args.gat_encoder_out_size // 2),
                    nn.ReLU(),
                    nn.Linear(args.gat_encoder_out_size // 2, args.gat_encoder_out_size // 2),
                    nn.ReLU(),
                    nn.Linear(args.gat_encoder_out_size // 2, 5))
            else:
                self.sub_scheduler_mlp2 = nn.Sequential(
                    nn.Linear(self.hid_size * 2, self.hid_size // 2),
                    nn.ReLU(),
                    nn.Linear(self.hid_size // 2, self.hid_size // 8),
                    nn.ReLU(),
                    nn.Linear(self.hid_size // 8, 5))

        if args.message_encoder:
            self.message_encoder = nn.Linear(args.hid_size, args.hid_size)
        if args.message_decoder:
            self.message_decoder = nn.Linear(args.hid_size, args.hid_size)

        # initialize weights as 0
        if args.comm_init == 'zeros':
            if args.message_encoder:
                self.message_encoder.weight.data.zero_()
            if args.message_decoder:
                self.message_decoder.weight.data.zero_()
            if not args.first_graph_complete:
                self.sub_scheduler_mlp1.apply(self.init_linear)
            if args.learn_second_graph and not args.second_graph_complete:
                self.sub_scheduler_mlp2.apply(self.init_linear)

        # initialize the action head (in practice, one action head is used)
        self.action_heads = nn.ModuleList([nn.Linear(2 * args.hid_size, o)
                                           for o in args.naction_heads])
        # initialize the value head
        self.value_head = nn.Linear(2 * self.hid_size, 1)

    def forward(self, x, info={}):
        """
        Forward function of MAGIC (two rounds of communication)

        Arguments:
            x (list): a list for the input of the communication protocol [observations, (previous hidden states, previous cell states)]
            observations (tensor): the observations for all agents [1 (batch_size) * n * obs_size]
            previous hidden/cell states (tensor): the hidden/cell states from the previous time steps [n * hid_size]

        Returns:
            action_out (list): a list of tensors of size [1 (batch_size) * n * num_actions] that represent output policy distributions
            value_head (tensor): estimated values [n * 1]
            next hidden/cell states (tensor): next hidden/cell states [n * hid_size]
        """

        # n: number of agents

        obs, extras = x

        # encoded_obs: [1 (batch_size) * n * hid_size]
        # obs = obs.view(-1, obs.size()[-1]) 不是因为维度不对，是因为mac内存太小了可能是。
        encoded_obs = self.obs_encoder(obs)
        hidden_state, cell_state = extras

        batch_size = encoded_obs.size()[0]
        n = self.nagents

        num_agents_alive, agent_mask = self.get_agent_mask(batch_size, info)

        # if self.args.comm_mask_zero == True, block the communiction (can also comment out the protocol to make training faster)
        if self.args.comm_mask_zero:
            agent_mask *= torch.zeros(n, 1)

        hidden_state, cell_state = self.lstm_cell(encoded_obs.squeeze(), (hidden_state, cell_state))

        # comm: [n * hid_size]
        comm = hidden_state
        if self.args.message_encoder:
            comm = self.message_encoder(comm)

        # mask communcation from dead agents (only effective in Traffic Junction)
        comm = comm * agent_mask
        comm_ori = comm.clone()

        # sub-scheduler 1
        # if args.first_graph_complete == True, sub-scheduler 1 will be disabled
        if not self.args.first_graph_complete:
            if self.args.use_gat_encoder:
                adj_complete = self.get_complete_graph(agent_mask)
                encoded_state1, _ = self.gat_encoder(comm, adj_complete)  # 怎么能使用全1矩阵呢？答：里面算注意力了其实，这个全1矩阵是为了和后面调度器形式上统一
                adj1, sub_scheduler_attention1 = self.sub_scheduler(self.sub_scheduler_mlp1_part1,self.sub_scheduler_mlp1_part2, encoded_state1, agent_mask,
                                                     self.args.directed)
            elif self.args.use_gat_v2_SharedW_encoder:
                adj_complete = self.get_complete_graph(agent_mask)
                encoded_state1, _ = self.gat_encoder(comm, adj_complete)  # 怎么能使用全1矩阵呢？答：里面算注意力了其实，这个全1矩阵是为了和后面调度器形式上统一
                adj1, sub_scheduler_attention1 = self.sub_scheduler(self.sub_scheduler_mlp1, encoded_state1, agent_mask,
                                                     self.args.directed)
            else:
                adj1 = self.sub_scheduler(self.sub_scheduler_mlp1, comm, agent_mask, self.args.directed)
        else:
            adj1 = self.get_complete_graph(agent_mask)

        # sub-processor 1
        sub_processor1_output = self.sub_processor1(comm, adj1)
        comm = F.elu(sub_processor1_output[0])
        attention1 = sub_processor1_output[1]
        # sub_scheduler_attention1 = sub_scheduler_attention  # use this instead of attention1 to rank
        attention1_nuclear = self.get_matrix_nuclear(attention1.detach())
        # attention1_nuclear += self.get_matrix_nuclear(sub_scheduler_attention1.detach())
        attention1_nuclear += self.get_TNN(sub_scheduler_attention1.detach())

        # sub-scheduler 2
        if self.args.learn_second_graph and not self.args.second_graph_complete:
            if self.args.use_gat_encoder:
                if self.args.first_graph_complete:
                    adj_complete = self.get_complete_graph(agent_mask)
                    encoded_state2 = self.gat_encoder(comm_ori, adj_complete)
                else:
                    encoded_state2 = encoded_state1
                adj2 = self.sub_scheduler(self.sub_scheduler_mlp2, encoded_state2, agent_mask, self.args.directed)
            else:
                adj2 = self.sub_scheduler(self.sub_scheduler_mlp2, comm_ori, agent_mask, self.args.directed)
        elif not self.args.learn_second_graph and not self.args.second_graph_complete:
            adj2 = adj1
        else:
            adj2 = self.get_complete_graph(agent_mask)

        # sub-processor 2
        comm, attention2 = self.sub_processor2(comm, adj2)
        attention2_nuclear = self.get_matrix_nuclear(attention2.detach())

        # attention adjacent matrix visulization
        # aa = attention1.detach()[:, :, 0]
        # bb = attention2.detach()[:, :, 0]
        # _, indices_attention1 = torch.sort(aa)
        # _, indices_attention2 = torch.sort(bb)
        # from matplotlib import pyplot as plt
        # x = np.arange(aa.shape[0])
        # for i in x:
        #     # if i ==3:
        #         y = bb[i]
        #         plt.plot(x,y)
        # plt.show()

        # mask communication to dead agents (only effective in Traffic Junction)
        comm = comm * agent_mask

        if self.args.message_decoder:
            comm = self.message_decoder(comm)

        value_head = self.value_head(torch.cat((hidden_state, comm), dim=-1))
        h = hidden_state.view(batch_size, n, self.hid_size)
        c = comm.view(batch_size, n, self.hid_size)

        action_out = [F.log_softmax(action_head(torch.cat((h, c), dim=-1)), dim=-1) for action_head in
                      self.action_heads]


        return action_out, value_head, (hidden_state.clone(), cell_state.clone()), \
               [sub_scheduler_attention1.unsqueeze(0),attention2.unsqueeze(0)],\
               [attention1_nuclear, attention2_nuclear]

    def get_matrix_nuclear(self, matrix):
        # matrix = matrix.detach()
        every_head_rank = 0
        all_head_rank = 0
        sorted_every_head_rank = 0
        atten_allHeads = matrix.permute(2, 0, 1)
        sorted_atten_allHeads,_ = torch.sort(atten_allHeads,descending=True,dim=-1)
        num_heads = atten_allHeads.shape[0]
        # nuclear norm for each head
        try:
            for atten_Head in atten_allHeads:
                _, sigma, _ = torch.svd(atten_Head, some=True, compute_uv=False)
                nuclear = torch.sum(sigma)
                every_head_rank += nuclear
            for atten_Head in sorted_atten_allHeads:
                _, sigma, _ = torch.svd(atten_Head, some=True, compute_uv=False)
                nuclear = torch.sum(sigma)
                sorted_every_head_rank += nuclear
            every_head_rank /= num_heads
            sorted_every_head_rank /= num_heads
        except Exception as e:
            print(e.args)
            print(str(e))
            print(repr(e))
        # nuclear norm for all heads
        atten_allHeads = atten_allHeads.view(num_heads, -1)
        _, sigma, _ = torch.svd(atten_allHeads, some=True, compute_uv=False)
        nuclear = torch.sum(sigma)
        all_head_rank += nuclear
        # nuclear norm for sorted matrix

        return [[every_head_rank.item() if every_head_rank!=0 else every_head_rank], \
               [all_head_rank.item()], \
               [sorted_every_head_rank.item() if sorted_every_head_rank != 0 else sorted_every_head_rank]]

    def get_TNN(self,three_way_tensor):
        # three_way_tensor = three_way_tensor.softmax(1)
        third_dimension_after_FFT = three_way_tensor.size()[-1] * self.args.fft_times
        tensor_A = torch.fft.fftn(three_way_tensor,s=third_dimension_after_FFT,dim=2)
        # if three_way_tensor.size()[-1]==3:
        #     A_ = torch.block_diag(tensor_A[:, :, 0], tensor_A[:, :, 1], tensor_A[:, :, 2])
        # elif three_way_tensor.size()[-1]==5:
        #     A_ = torch.block_diag(tensor_A[:, :, 0], tensor_A[:, :, 1], tensor_A[:, :, 2], tensor_A[:, :, 3], tensor_A[:, :, 4])
        # else:
        #     raise NotImplementedError
        A_ = tensor_A[:, :, 0]
        for i in range(1, third_dimension_after_FFT):
            A_ = torch.block_diag(A_, tensor_A[:, :, i])
        _, sigma, _ = torch.svd(A_, some=True)  # atten[N,N,num_heads],
        nuclear = torch.sum(sigma)
        return [[nuclear.item()/third_dimension_after_FFT]]


    def get_agent_mask(self, batch_size, info):
        """
        Function to generate agent mask to mask out inactive agents (only effective in Traffic Junction)

        Returns:
            num_agents_alive (int): number of active agents
            agent_mask (tensor): [n, 1]
        """

        n = self.nagents

        if 'alive_mask' in info:
            agent_mask = torch.from_numpy(info['alive_mask'])
            num_agents_alive = agent_mask.sum()
        else:
            agent_mask = torch.ones(n)
            num_agents_alive = n

        agent_mask = agent_mask.view(n, 1).clone()

        return num_agents_alive, agent_mask

    def init_linear(self, m):
        """
        Function to initialize the parameters in nn.Linear as o
        """
        if type(m) == nn.Linear:
            m.weight.data.fill_(0.)
            m.bias.data.fill_(0.)

    def init_hidden(self, batch_size):
        """
        Function to initialize the hidden states and cell states
        """
        return tuple((torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True),
                      torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True)))

    def sub_scheduler(self, sub_scheduler_mlp1, sub_scheduler_mlp2, hidden_state, agent_mask, directed=True):
        """
        Function to perform a sub-scheduler

        Arguments:
            sub_scheduler_mlp (nn.Sequential): the MLP layers in a sub-scheduler
            hidden_state (tensor): the encoded messages input to the sub-scheduler [n * hid_size]
            agent_mask (tensor): [n * 1]
            directed (bool): decide if generate directed graphs

        Return:
            adj (tensor): a adjacency matrix which is the communication graph [n * n]
        """

        # hidden_state: [n * hid_size]
        n = self.args.nagents
        hid_size = hidden_state.size(-1)
        # hard_attn_input: [n * n * (2*hid_size)]
        hard_attn_input = torch.cat([hidden_state.repeat(1, n).view(n * n, -1), hidden_state.repeat(n, 1)], dim=1).view(
            n, -1, 2 * hid_size)
        # hard_attn_output: [n * n * 2]
        encoded_hard_attn_input = sub_scheduler_mlp1(hard_attn_input)
        # soft_attn_output = encoded_hard_attn_input.softmax(-1)
        if directed:
            hard_attn_output= self.gumbel_softmax(sub_scheduler_mlp2(encoded_hard_attn_input), hard=True)
        else:
            raise NotImplementedError
            hard_attn_output = F.gumbel_softmax(
                0.5 * sub_scheduler_mlp(hard_attn_input) + 0.5 * sub_scheduler_mlp(hard_attn_input.permute(1, 0, 2)),
                hard=True)
        # hard_attn_output: [n * n * 1]
        hard_attn_output = torch.narrow(hard_attn_output, 2, 1, 1)
        # agent_mask and agent_mask_transpose: [n * n]
        agent_mask = agent_mask.expand(n, n)
        agent_mask_transpose = agent_mask.transpose(0, 1)
        # adj: [n * n]
        adj = hard_attn_output.squeeze() * agent_mask * agent_mask_transpose

        # half_for_hard = torch.narrow(encoded_hard_attn_input, 2, 1, 1)
        # soft_for_hard = torch.narrow(soft_attn_output, 2, 0, 1)
        # soft_for_hard = F.softmax(soft_for_hard, dim=1)
        return adj, encoded_hard_attn_input

        '''  前期想在sub_scheduler中约束gumbel层前的核范数，后期再看吧，这块先不动了，做做对比实验
        # aaa = hidden_state.repeat(1, n).view(n * n, -1)
        # bbb = hidden_state.repeat(n, 1)
        # ccc = torch.cat([hidden_state.repeat(1, n).view(n * n, -1), hidden_state.repeat(n, 1)], dim=1)
        attn_input = torch.cat([hidden_state.repeat(1, n).view(n * n, -1), hidden_state.repeat(n, 1)], dim=1).view(
            n, -1, 2 * hid_size)
        # hard_attn_output: [n * n * 2]
        attn_mid = sub_scheduler_mlp(attn_input)
        soft_attn_output = torch.narrow(attn_mid, 2, 1, 1).squeeze(-1)
        soft_attn_output = F.softmax(soft_attn_output, dim=1)
        if directed:
            hard_attn_output = F.gumbel_softmax(attn_mid, hard=True)
        else:
            # 这里先不改了
            hard_attn_output = F.gumbel_softmax(
                0.5 * sub_scheduler_mlp(hard_attn_input) + 0.5 * sub_scheduler_mlp(hard_attn_input.permute(1, 0, 2)),
                hard=True)
        # hard_attn_output: [n * n * 1]
        hard_attn_output = torch.narrow(hard_attn_output, 2, 1, 1)
        # agent_mask and agent_mask_transpose: [n * n]
        agent_mask = agent_mask.expand(n, n)
        agent_mask_transpose = agent_mask.transpose(0, 1)
        # adj: [n * n]
        adj = hard_attn_output.squeeze() * agent_mask * agent_mask_transpose

        return adj, soft_attn_output
        '''

    def get_complete_graph(self, agent_mask):
        """
        Function to generate a complete graph, and mask it with agent_mask
        """
        n = self.args.nagents
        adj = torch.ones(n, n)
        agent_mask = agent_mask.expand(n, n)
        agent_mask_transpose = agent_mask.transpose(0, 1)
        adj = adj * agent_mask * agent_mask_transpose

        return adj

    def gumbel_softmax(self,logits, tau=1, hard=False, eps=1e-10, dim=-1):
        import warnings
        if eps != 1e-10:
            warnings.warn("`eps` parameter is deprecated and has no effect.")

        gumbels = -torch.empty_like(logits,
                                    memory_format=torch.legacy_contiguous_format).exponential_().log()  # ~Gumbel(0,1)
        # TODO gumbels = (logits + gumbels) / tau  # ~Gumbel(logits,tau)
        gumbels = logits / tau  # ~Gumbel(logits,tau)
        y_soft = gumbels.softmax(dim)

        if hard:
            # Straight through.
            index = y_soft.max(dim, keepdim=True)[1]
            y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
            ret = y_hard - y_soft.detach() + y_soft
        else:
            # Reparametrization trick.
            ret = y_soft

        return ret