import torch
import torch.nn as nn
import torch.nn.init as init


def _weights_init(m):
    classname = m.__class__.__name__
    if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
        init.kaiming_normal_(m.weight)
    elif classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


class Encoder(nn.Module):
    def __init__(self, opt):
        super(Encoder, self).__init__()
        self.gru_type = opt.gru_type
        self.rnn = nn.GRU(input_size=opt.z_dim, hidden_size=opt.hidden_dim, num_layers=opt.num_layer, batch_first=True)
        self.fc = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.sigmoid = nn.Sigmoid()
        self.apply(_weights_init)

    def forward(self, input, delta_pre, sigmoid=True):
        e_outputs, _ = self.rnn(input)  # e_outputs: (bsz, seq_len, hidden_size)
        H = self.fc(e_outputs)  # H: (bsz, seq_len, hidden_size*num_dir)
        if sigmoid:
            H = self.sigmoid(H)
        return H


class Recovery(nn.Module):
    """Recovery network from latent space to original space.

    Args:
      - H: latent representation
      - T: input time information

    Returns:
      - X_tilde: recovered data [bsz, seq_len, feature_dim]
    """
    def __init__(self, opt):
        super(Recovery, self).__init__()
        self.gru_type = opt.gru_type
        self.rnn = nn.GRU(input_size=opt.hidden_dim, hidden_size=opt.hidden_dim, num_layers=opt.num_layer,
                          batch_first=True)
        self.fc = nn.Linear(opt.hidden_dim, opt.z_dim)
        self.sigmoid = nn.Sigmoid()
        self.apply(_weights_init)

    def forward(self, input, delta_pre, sigmoid=True):
        # 如果为grui，beta不能为None
        r_outputs, _ = self.rnn(input)
        X_tilde = self.fc(r_outputs)
        if sigmoid:
            X_tilde = self.sigmoid(X_tilde)
        return X_tilde


class Generator(nn.Module):
    def __init__(self, opt):
        super(Generator, self).__init__()
        self.gru_type = opt.gru_type
        self.rnn = nn.GRU(input_size=opt.z_dim, hidden_size=opt.hidden_dim, num_layers=opt.num_layer,
                          batch_first=True)
        self.fc = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.sigmoid = nn.Sigmoid()
        self.apply(_weights_init)

    def forward(self, input, delta_pre, sigmoid=True):
        g_outputs, _ = self.rnn(input)
        E = self.fc(g_outputs)
        if sigmoid:
            E = self.sigmoid(E)
        return E


class Discriminator(nn.Module):
    def __init__(self, opt):
        super(Discriminator, self).__init__()
        self.gru_type = opt.gru_type

        self.rnn = nn.GRU(input_size=opt.hidden_dim, hidden_size=opt.hidden_dim, num_layers=opt.num_layer,
                          batch_first=True)
        self.fc = nn.Linear(opt.hidden_dim, 1)
        self.sigmoid = nn.Sigmoid()
        self.apply(_weights_init)

    def forward(self, input, delta_pre, sigmoid=False):  # 这里的sigmoid，不要用，cross_entropy自带sigmoid, BCEWithSigmoid也带sigmoid
        d_outputs, _ = self.rnn(input)  # 这里用的是 output[bsz, seq_len, hidden_dim]，不是最后一个隐藏单元的状态，
        Y_hat = self.fc(d_outputs)
        if sigmoid:
            Y_hat = self.sigmoid(Y_hat)
        return Y_hat


class Imputation(nn.Module):
    def __init__(self, opt, x):
        super(Imputation, self).__init__()

        self.z = torch.nn.Parameter(torch.tensor(x, dtype=torch.float32))

    def forward(self):
        return self.z
    # def __init__(self, opt):
    #     super(Imputation, self).__init__()
    #     self.z = torch.nn.Parameter(torch.randn(opt.batch_size, opt.seq_len, opt.z_dim))

    def forward(self):
        return self.z

