import torch
from torch import nn
import math
from .base_laysers import Encoder as ObsEncoder
from .base_laysers import Q_Net
from .DGNS import AttModel


class MultiObsEncoder(nn.Module):

    def __init__(self, hidden_dim, input_dim):
        super(MultiObsEncoder, self).__init__()

        self.self_info_embedding = nn.Linear(input_dim, hidden_dim)
        self.resource_embedding = nn.Linear(input_dim, hidden_dim)
        self.agent_embedding = nn.Linear(input_dim, hidden_dim)

        self.encoder1 = Encoder(hidden_dim, feedforward_dim=256, num_heads=2, drop_prob=0.1, num_layers=1)
        # self.encoder2 = Encoder(hidden_dim, feedforward_dim=512, num_heads=2, drop_prob=0.1, num_layers=2)

        self.linear = nn.Linear(hidden_dim * 3, hidden_dim)

    def forward(self, x, mask=None):
        batch_size, num_agent, num_dim, hidden_dim = x.shape
        mask = torch.ones((batch_size, 3, 3), dtype=torch.bool, device=x.device)
        self_infos, resources, agents = x[:, :, 0, :], x[:, :, 1, :], x[:, :, 2, :]
        all_outputs = []
        for i in range(num_agent):
            info = self.self_info_embedding(self_infos[:, i, :])
            resource = self.resource_embedding(resources[:, i, :])
            agent = self.agent_embedding(agents[:, i, :])
            emb = torch.stack((info, resource, agent), dim=1)
            encoder_output1 = self.encoder1(emb, src_mask=mask)
            # encoder_output2 = self.encoder2(encoder_output1, src_mask=mask)
            all_outputs.append(encoder_output1)

        all_outputs_tensor = torch.stack(all_outputs, dim=1)
        flattened = torch.flatten(all_outputs_tensor, start_dim=2)
        res = self.linear(flattened)

        return res


class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super().__init__()
        self.softmax = nn.Softmax(dim=-1)  # 对最后一维进行softmax

    def forward(self, q, k, v, mask=None, e=1e-12):
        d_k = q.size(-1)
        mask = torch.unsqueeze(mask, 1).repeat(1, 2, 1, 1)
        attn_score = torch.matmul(q, k.permute(0, 1, 3, 2)) / math.sqrt(d_k)  # shape:[B,num_head,seq_len,seq_len]
        attn_score = self.softmax(attn_score)  # 对每行就行softmax，最后一个维度是需要softmax
        if mask is not None:
            attn_score = attn_score.masked_fill(mask == 0, -9e15)
        output = torch.matmul(attn_score, v)  # shape:[B,num_head,seq_len,head_dim]
        return output, attn_score


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, num_heads):
        super().__init__()
        assert d_model % num_heads == 0, print("d_model={0}, num_heads={1}".format(d_model, num_heads))
        self.num_head = num_heads
        self.d_model = d_model
        self.head_dim = d_model / num_heads
        self.WQ = nn.Linear(d_model, d_model)  # [batch_size,seq_len,embed_dim]
        self.WK = nn.Linear(d_model, d_model)
        self.WV = nn.Linear(d_model, d_model)
        self.linear = nn.Linear(d_model, d_model)
        self.self_attention = ScaledDotProductAttention()

    def forward(self, query, key, value, mask=None, return_attention=None):
        batch_size, seq_len, d_model = query.shape
        # 将向量拆分为多个头
        Query = self.WQ(query).view(batch_size, seq_len, self.num_head, -1).permute(0, 2, 1, 3)
        # [batch_size,seq_len, embed_dim] -> [batch_size,seq_len,num_head,head_dim] ->
        # [batch_size,num_head,seq_len,head_dim]
        Key = self.WK(key).view(batch_size, seq_len, self.num_head, -1).permute(0, 2, 1, 3)
        Value = self.WV(value).view(batch_size, seq_len, self.num_head, -1).permute(0, 2, 1, 3)
        output, attention_score = self.self_attention(Query, Key, Value, mask=mask)
        output = output.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_len, d_model)
        o = self.linear(output)
        if return_attention is not None:
            return o, attention_score
        else:
            return o


class PositionWiseFeedForward(nn.Module):
    def __init__(self, d_model, ffn_dim, drop_prob=0.1):
        super().__init__()
        self.linear1 = nn.Linear(d_model, ffn_dim)
        self.Dropout = nn.Dropout(drop_prob)
        self.ReLU = nn.ReLU()
        self.linear2 = nn.Linear(ffn_dim, d_model)

    def forward(self, x):
        x = self.linear1(x)
        x = self.ReLU(x)
        x = self.Dropout(x)
        x = self.linear2(x)
        return x


# Encoder layer层
class EncoderLayer(nn.Module):
    def __init__(self, d_model, feedforward_dim, num_heads, drop_prob):
        super().__init__()
        self.multi_attention = MultiHeadAttention(d_model, num_heads)
        self.norm1 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(drop_prob)
        self.ffn = PositionWiseFeedForward(d_model, feedforward_dim)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout2 = nn.Dropout(drop_prob)

    def forward(self, x, src_mask):
        _x = x
        x = self.multi_attention(x, x, x, src_mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)

        _x = x
        x = self.ffn(x)
        x = self.dropout2(x)
        x = self.norm2(x + _x)

        return x


# N层Encoder layer
class Encoder(nn.Module):
    def __init__(self, hidden_dim, feedforward_dim, num_heads, num_layers, drop_prob):
        super().__init__()
        self.layers = nn.ModuleList(
            [EncoderLayer(d_model=hidden_dim, feedforward_dim=feedforward_dim, num_heads=num_heads, drop_prob=drop_prob)
             for _ in range(num_layers)])

    def forward(self, x, src_mask):
        for layer in self.layers:
            x = layer(x, src_mask)
        return x


class TranDGN(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions, detached_obs_dim):
        super(TranDGN, self).__init__()
        # self.obs_encoder = ObsEncoder(num_inputs, hidden_dim)

        self.encoder1 = Encoder(hidden_dim, feedforward_dim=512, num_heads=2, drop_prob=0.1, num_layers=2)
        self.encoder2 = Encoder(hidden_dim, feedforward_dim=512, num_heads=2, drop_prob=0.1, num_layers=2)

        self.q_net = Q_Net(hidden_dim * 2, num_actions)

        self.multi = MultiObsEncoder(hidden_dim=hidden_dim, input_dim=detached_obs_dim)

    def forward(self, x, mask):
        encoded = self.multi(x)
        bool_mask = mask == 0
        # encoded_obs = self.obs_encoder(x)

        encoded_1 = self.encoder1(encoded, bool_mask)
        encoded_2 = self.encoder2(encoded_1, bool_mask)

        q = self.q_net(torch.cat([encoded, encoded_2], dim=-1))

        return q, mask


class TranDGNWithoutMulti(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions, detached_obs_dim):
        super(TranDGNWithoutMulti, self).__init__()
        self.obs_encoder = ObsEncoder(detached_obs_dim * 3, hidden_dim)

        self.encoder1 = Encoder(hidden_dim, feedforward_dim=256, num_heads=2, drop_prob=0.1, num_layers=1)
        # self.encoder2 = Encoder(hidden_dim, feedforward_dim=128, num_heads=2, drop_prob=0.1, num_layers=1)

        self.q_net = Q_Net(hidden_dim * 2, num_actions)

        # self.multi = MultiObsEncoder(hidden_dim=hidden_dim, input_dim=detached_obs_dim)

    def forward(self, x, mask):
        batch_size, n_agent, num_dims, dim_length = x.shape
        h1 = torch.reshape(x, (batch_size, n_agent, num_dims * dim_length))

        encoded = self.obs_encoder(h1)
        bool_mask = mask == 0
        # encoded_obs = self.obs_encoder(x)

        encoded_1 = self.encoder1(encoded, bool_mask)
        # encoded_2 = self.encoder2(encoded_1, bool_mask)

        q = self.q_net(torch.cat([encoded, encoded_1], dim=-1))

        return q, mask


class TranDGNWithoutProcessor(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions, detached_obs_dim):
        super(TranDGNWithoutProcessor, self).__init__()
        # self.obs_encoder = ObsEncoder(num_inputs, hidden_dim)

        self.encoder1 = AttModel(n_agent, hidden_dim, hidden_dim, hidden_dim)
        self.encoder2 = AttModel(n_agent, hidden_dim, hidden_dim, hidden_dim)

        self.q_net = Q_Net(hidden_dim * 2, num_actions)

        self.multi = MultiObsEncoder(hidden_dim=hidden_dim, input_dim=detached_obs_dim)

    def forward(self, x, mask):
        encoded = self.multi(x)
        bool_mask = mask == 0
        # encoded_obs = self.obs_encoder(x)

        encoded_1 = self.encoder1(encoded, bool_mask)
        encoded_2 = self.encoder2(encoded_1, bool_mask)

        q = self.q_net(torch.cat([encoded, encoded_2], dim=-1))

        return q, mask
    

class TranDGNWithoutTran(nn.Module):
    def __init__(self, n_agent, num_inputs, hidden_dim, num_actions, detached_obs_dim):
        super(TranDGNWithoutTran, self).__init__()
        # self.obs_encoder = ObsEncoder(num_inputs, hidden_dim)

        self.encoder1 = nn.Linear(hidden_dim, hidden_dim * 2)
        self.encoder2 = nn.Linear(hidden_dim * 2, hidden_dim)

        self.q_net = Q_Net(hidden_dim * 2, num_actions)

        self.multi = MultiObsEncoder(hidden_dim=hidden_dim, input_dim=detached_obs_dim)

    def forward(self, x, mask):
        encoded = self.multi(x)
        bool_mask = mask == 0

        encoded_1 = self.encoder1(encoded)
        encoded_2 = self.encoder2(encoded_1)

        q = self.q_net(torch.cat([encoded, encoded_2], dim=-1))

        return q, mask