from torch import nn


class MLP(nn.Module):
    def __init__(self,input_dim,output_dim,relu = True,bias = True,dropout = False,norm = False,layers = []):
        super(MLP,self).__init__()
        mod = []
        incoming = input_dim
        for layer in range(len(layers)):
            outgoing = layers.pop(0)
            mod.append(nn.Linear(incoming,outgoing,bias = bias))

            incoming = outgoing
            if norm:
                mod.append(nn.LayerNorm(outgoing))
            if dropout:
                mod.append(nn.Dropout(p = 0.5))
            if relu:
                mod.append(nn.LeakyReLU(inplace=True))
        mod.append(nn.Linear(incoming,output_dim,bias=bias))
        if relu:
            mod.append(nn.LeakyReLU(inplace=True))
        self.mod = nn.Sequential(*mod)
    def forward(self,x):
        return self.mod(x)