import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import MLP, GNN

class Encoder(nn.Module):
    def __init__(self,
                 space_size = 3,
                 state_embedding_dim = 128
                 ):
        super(Encoder, self).__init__()


        self.state_embedding_dim = state_embedding_dim

        self.fv1 = MLP(input_size = space_size + 1, output_size=state_embedding_dim, act = "SiLU", n_hidden=1, layer_norm=False)

    def RotaryEmbedding(self, x, device):

        base = 10000
        width = 64

        theta = 1. / (base ** (torch.arange(0, width, 2).float() / width)).to(device)

        idx_theta = torch.einsum('bsi,d->bsd', x, theta)
        cos_item = torch.cos(idx_theta)
        sin_item = torch.sin(idx_theta)

        embedding = torch.cat([cos_item, sin_item], dim=-1)

        return embedding

    def forward(self, node_pos, areas):

        # 1. pos embedding
        pos_enc = self.RotaryEmbedding(node_pos, node_pos.device)

        # # 2. encoder
        en_in = torch.cat((node_pos, areas), dim = -1) # dim = [B, N, 4]
        v_node = self.fv1(en_in)

        return v_node, pos_enc

class AttentionBlock(nn.Module):
    def __init__(self,
                n_token = 64,
                token_dim = 128,
                n_heads = 4,
                ):
        super(AttentionBlock, self).__init__()


        self.Q = nn.Parameter(1/(token_dim**0.5) * torch.randn(n_token, token_dim),requires_grad=True)

        self.n_heads = n_heads
        self.attention = nn.MultiheadAttention(embed_dim=token_dim, num_heads=n_heads, batch_first=True)

    def forward(self, V):

        # 1. 切片权重计算--> softmax(q k^T)
        weights = torch.einsum('mc, bnc -> bmn', self.Q, V)
        weights = F.softmax(weights, dim=-1)

        # 2. 切片加权和--> weights * v
        W_1 = torch.einsum('bmn, bnc -> bmc', weights, V)

        # 3. Attention
        W2, _ = self.attention(W_1, W_1, W_1)

        # 4. 全局加权和--> weights * W2
        V_Global = torch.einsum('bmn, bmc -> bnc', weights, W2)

        return V_Global

class mixer_block(nn.Module):
    def __init__(self,
                state_embedding_dim = 128,
                att_embedding_dim = 256,
                n_head = 4,
                n_token = 64
                ):
        super(mixer_block, self).__init__()

        pos_emb_dim = 64
        self.gnn = GNN(node_size = state_embedding_dim + pos_emb_dim, edge_size=state_embedding_dim, output_size = state_embedding_dim, layer_norm=True)

        #######################
        self.ln1 = nn.LayerNorm(att_embedding_dim)
        self.ln2 = nn.LayerNorm(att_embedding_dim)

        self.linear = nn.Linear(att_embedding_dim, att_embedding_dim)

        self.MHA = AttentionBlock(
            n_token = n_token,
            token_dim = att_embedding_dim,
            n_heads = n_head)

    def forward(self, V, E, edges, pos_enc):

        # 1. message passing
        v_inpt = torch.cat([V, pos_enc], dim=-1)
        v, e = self.gnn(v_inpt, E, edges)
        V = V + v
        E = E + e

        # 2. attention block
        W_1 = self.MHA(self.ln1(V))
        W_2 = V + W_1
        W_3 = W_2 + self.linear(self.ln2(W_2))

        return W_3, E

class Mixer(nn.Module):
    def __init__(self,
                N= 3,
                space_size = 2,
                state_embedding_dim = 128,
                att_embedding_dim = 256,
                n_head = 4,
                n_token = 64
                ):
        super(Mixer, self).__init__()

        self.fe = MLP(input_size = space_size + 1, output_size=state_embedding_dim, act = "SiLU", n_hidden=1, layer_norm=False)

        self.block_num = N
        self.mixer_blocks = nn.ModuleList([])
        for i in range(self.block_num):
            self.mixer_blocks.append(
                mixer_block(
                    state_embedding_dim = state_embedding_dim,
                    att_embedding_dim = att_embedding_dim,
                    n_head = n_head,
                    n_token = n_token)
                )

    def forward(self, V, edges, node_pos, pos_enc):

        # 1.
        ########################
        edges = edges.long()

        # Get edges attr
        senders = torch.gather(node_pos, -2, edges[..., 0].unsqueeze(-1).repeat(1, 1, node_pos.shape[-1]))
        receivers = torch.gather(node_pos, -2, edges[..., 1].unsqueeze(-1).repeat(1, 1, node_pos.shape[-1]))

        distance = receivers - senders
        norm = torch.sqrt((distance ** 2).sum(-1, keepdims=True))
        E = torch.cat([distance, norm], dim=-1)
        E = self.fe(E)

        # 2.
        ########################
        for i in range(self.block_num):
            V, E = self.mixer_blocks[i](V, E, edges, pos_enc)

        return V

class Decoder(nn.Module):
    def __init__(self,
                 state_embedding_dim = 128,
                 state_size = 1
                 ):
        super(Decoder, self).__init__()

        self.final_mlp_node = nn.Sequential(
            nn.Linear(state_embedding_dim, state_embedding_dim), nn.PReLU(),
            nn.Linear(state_embedding_dim, state_size)
        )

    def forward(self, V):

        final_state_node = self.final_mlp_node(V)

        return final_state_node

class time_stepping(nn.Module):
    def __init__(self,
                N_block =3,
                state_size = 1,
                space_size = 3,
                state_embedding_dim = 128,
                att_embedding_dim = 256,
                n_head = 4,
                n_token = 64,
                ):

        super(time_stepping, self).__init__()


        self.encoder = Encoder(
            space_size = space_size,
            state_embedding_dim = state_embedding_dim,
            )

        self.mixer = Mixer(
            N= N_block,
            space_size = space_size,
            state_embedding_dim = state_embedding_dim,
            att_embedding_dim = att_embedding_dim,
            n_head = n_head,
            n_token = n_token
        )

        self.decoder = Decoder(
            state_embedding_dim = state_embedding_dim,
            state_size = state_size
            )

    def forward(self, node_pos, areas, edges):

        # 1. Encoder
        V, pos_enc  = self.encoder(node_pos, areas)

        # 2. mixer
        V = self.mixer(V, edges, node_pos, pos_enc)

        # 3. decoder
        final_state_node = self.decoder(V)

        return final_state_node

class Model(nn.Module):
    def __init__(self,
                N_block = 3,
                state_size = 1,
                space_size = 3,
                state_embedding_dim = 128,
                att_embedding_dim = 256,
                n_head = 4,
                n_token = 64,
                ):
        super(Model, self).__init__()

        self.time_stepping = time_stepping(
            N_block = N_block,
            state_size = state_size,
            space_size = space_size,
            state_embedding_dim = state_embedding_dim,
            att_embedding_dim = att_embedding_dim,
            n_head = n_head,
            n_token = n_token
        )

    def forward(self, node_pos, areas, edges):

        next_state = self.time_stepping(node_pos, areas, edges)

        return next_state