"""


Version: 0.1
Author: lk
Date: 2022-03-08 18:41
"""
import numpy as np
import torch
import torch.nn as nn
import dgl
import torch.nn.functional as F
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling, SumPooling


class SElayer(nn.Module):
    def __init__(self, in_channels, se_channels):
        super().__init__()
        self.encoder_decoder = nn.Sequential(
            nn.Linear(in_channels, se_channels),
            nn.ELU(),
            nn.Linear(se_channels, in_channels),
            nn.Sigmoid()
        )

    def forward(self, x):
        x_global = torch.mean(x, dim=0)
        s = self.encoder_decoder(x_global)
        return s * x



class SElayer(nn.Module):
    def __init__(self, in_channels, se_channels):
        super().__init__()
        self.encoder_decoder = nn.Sequential(
            nn.Linear(in_channels, se_channels),
            nn.ELU(),
            nn.Linear(se_channels, in_channels),
            nn.Sigmoid()
        )

    def forward(self, x):
        x_global = torch.mean(x, dim=0)
        s = self.encoder_decoder(x_global)
        return s * x


class MLP(nn.Module):
    def __init__(self, num_layers, input_dim, hidden_dim, output_dim, use_selayer):
        super().__init__()
        self.num_layer = num_layers
        self.output_dim = output_dim
        self.linear_or_not = True
        self.hidden_dim=hidden_dim
        if num_layers < 1:
            raise ValueError("Number of layers should be positive!")
        elif num_layers == 1:
            self.linear = nn.Linear(input_dim, output_dim)
        else:
            self.linear_or_not = False

            self.linears = nn.ModuleList()
            self.batch_norms = nn.ModuleList()

            self.linears.append(nn.Linear(input_dim, hidden_dim))
            for i in range(num_layers - 2):
                self.linears.append(nn.Linear(hidden_dim, hidden_dim))
            self.linears.append(nn.Linear(hidden_dim, output_dim))

            for i in range(num_layers - 1):
                self.batch_norms.append(
                    SElayer(hidden_dim, int(np.sqrt(hidden_dim)))
                    if use_selayer
                    else nn.BatchNorm1d(hidden_dim)
                )

    def forward(self, x):
        if self.linear_or_not:
            return self.linear(x)
        else:
            h = x
            for i in range(self.num_layer - 1):
                h = self.linears[i](h)
                h = self.batch_norms[i](h)
                h = F.relu(h)
            return self.linears[-1](h)

class ApplyNodeFunc(nn.Module):

    def __init__(self,mlp,use_selayer):
        super().__init__()
        self.mlp =mlp
        self.use_selayer = use_selayer
        self.bn = (
            SElayer(mlp.output_dim,int(np.sqrt(mlp.output_dim)))
            if use_selayer
            else nn.BatchNorm1d(mlp.output_dim)
        )
    def forward(self,x):
        x = self.mlp(x)
        x = self.bn(x)
        return F.relu(x)


if __name__ == '__main__':
    x=torch.randn(5,12)
    mlp = MLP(6,12,128,5,True)
    app = ApplyNodeFunc(mlp,False)
    print(app(x))
