import torch
from torch import nn


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        m.weight.data.normal_(0.0, 0.02)
        m.bias.data.fill_(0)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# 生成器网络
class MLP_Generator(nn.Module):
    def __init__(self,input_dim ,output_dim,layers =[]):
        super(MLP_Generator, self).__init__()
        # ndh size of the hidden units in discriminator D
        model = []
        in_dim = input_dim
        for layer in layers:
            out_dim = layer
            model.append(self.get_generator_block(in_dim, out_dim))
            in_dim = out_dim
        out_dim = output_dim
        model.append(nn.Linear(in_dim, out_dim))
        self.main = torch.nn.Sequential(
            # attSize 属性维度,nz noise for generation 噪声,ngh size of the hidden units in generator G 生成器 G 中隐藏单元的大小
            *model
                      # ,nn.Sigmoid()  # 最后不用Normalization 用一个线性层加一个Sigmoid

        )
        # apply(fn)的官网介绍，该方法会将fn递归的应用于模块的每一个子模块（.children()的结果）及其自身。
        # 典型的用法是，对一个model的参数进行初始化。
        # self.apply(weights_init)

    def forward(self, noise):
        # h = torch.cat((noise, att), 1)  # #按维数1拼接（横着拼）
        # h = self.main(h)
        h = self.main(noise)
        return h
        # return torch.sigmoid(h)
    def get_generator_block(self,input_dim, output_dim):
        return nn.Sequential(
            nn.Linear(input_dim, output_dim),
            nn.BatchNorm1d(output_dim),
            nn.ReLU(inplace=True)
        )


# 鉴别器网络
class MLP_Discriminator(nn.Module):
    def __init__(self, input_dim,output_dim=1,layers =[]):
        super(MLP_Discriminator, self).__init__()
        # ndh size of the hidden units in discriminator D
        model = []
        in_dim = input_dim
        for layer in layers:
            out_dim = layer
            model.append(self.get_discriminator_block(in_dim, out_dim))
            in_dim = out_dim
        model.append(nn.Linear(in_dim, output_dim))  # 不加激活函数 loss会加一个激活
        self.main = torch.nn.Sequential(
            *model
            # nn.Sigmoid()
        )
        # 应用weights_init函数随机初始化所有权重，mean= 0，stdev = 0.2
        # self.apply(weights_init)

    def forward(self, x):
        h = self.main(x)
        return h
        # return torch.sigmoid(h)

    def get_discriminator_block(self,input_dim, output_dim):
        return nn.Sequential(
            nn.Linear(input_dim, output_dim),
            nn.LeakyReLU(0.2, inplace=True)
        )
