"""
model.py: Network Modules
(1) Encoder
(2) Recovery
(3) Generator
(4) Supervisor
(5) Discriminator
"""

import torch
import torch.nn as nn


def _weights_init(x):
    class_name = x.__class__.__name__

    if isinstance(x, nn.Linear):
        nn.init.xavier_uniform_(x.weight)
        if x.bias is not None:
            nn.init.constant_(x.bias, 0)
    elif class_name.find('LayerNorm') != -1:
        x.weight.data.normal_(1.0, 0.02)
        x.bias.data.fill_(0)


class GeneralModuleLayer(nn.Module):
    def __init__(self, gru_type, feature_size, input_size, hidden_size, output_size, dropout_rate, is_fc, is_ln, is_res):
        '''
        Args:
            gru_type:
            feature_size: 用于求beta_layer
            input_size:  本层的输入
            hidden_size (RNN_hidden_size):  隐藏单元个数，也是rnn输出的size
            output_size(layer_output_size):  fc输出的size
            dropout_rate:
        '''
        super(GeneralModuleLayer, self).__init__()
        self.is_fc = is_fc
        self.is_ln = is_ln
        self.is_res = is_res

        self.gru_type = gru_type
        self.input_size = input_size
        self.output_size = output_size

        if self.gru_type == 'grui':
            self.beta_layer = nn.Linear(feature_size, hidden_size)

        self.rnn = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=1, batch_first=True,
                          dropout=dropout_rate)
        # todo ~~就像transformer一样 暂时每个组件输出都是input_size，输入也是input_size~~
        # todo 现在input_szie不一定等于output_size，所以可能做不了残差
        self.fc = nn.Linear(hidden_size, output_size)
        self.rnn_ln = nn.LayerNorm(hidden_size, eps=1e-6)  # 输入rnn之前需要的ln，dim=input_size， 改为rnn之后再做归一化
        self.fc_ln = nn.LayerNorm(output_size, eps=1e-6)  # 输入到fc之前的ln，dim=hidden_size，改为fn在dp之后做归一化，因为ln+dp不一定效果大于2
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x, delta_pre):
        if self.gru_type == 'grui':
            rnn_forward = grui_forward
            beta = self.beta_layer(delta_pre)  # [bsz, seq_len, hidden_size]
            beta = torch.exp(-torch.maximum(beta, torch.tensor(0)))  # [bsz, seq_len, hidden_size]
        else:
            rnn_forward = gru_forward
            beta = None

        # 先norm在输入rnn，没必要，因为输入本身就是归一化后的数据，即使Normal的作用是让输入始终保持在固定的范围，入均值0方差1
        # y = self.rnn_ln(x)
        y = rnn_forward(self.rnn, x, beta)  # rnn 自己有dp  input_size -> hidden_size
        if self.is_ln == 'True':
            y = self.rnn_ln(y)
        # 由于rnn的输出和input维度不同，前者是hidden_size，后者是input_size，所以这里不能加残差

        # # 输入fc之前做fn todo 取消fc层,只加一个fc层
        if self.is_fc == 'True':
            y = self.fc(y)  # hidden_size->output_size
            y = self.dropout(y)
        if self.is_res == 'True':
            if self.input_size == self.output_size:
                y = x + y  # 残差
        if self.is_ln == 'True' and self.is_fc == 'True':
            y = self.fc_ln(y)  # ln输入到下一层之前ln

        return y


def grui_forward(rnn: nn.Module, x: torch.Tensor, beta: torch.Tensor):
    # beta : # [bsz, seq_len, hidden_size]
    bsz, seq_len, feature_dim = x.shape
    # hidden_state [num_layer=1, bsz, hidden_size]
    hidden_state = torch.zeros(rnn.num_layers, bsz, rnn.hidden_size, dtype=x.dtype, device=x.device)
    y = []
    # RNN，当前输入的（上一步输出）hidden_state要点乘beta衰减向量，进行衰减
    for i in range(seq_len):
        x_t = x[:, i:i+1, :]  # [bsz, 1, input_size]
        beta_t = beta[:, i, :].unsqueeze(0)  # [1, bsz, hidden_size]
        hidden_state = torch.multiply(hidden_state, beta_t)  # todo h不再是<1 这里还能用点乘吗，还是换一种衰减方式
        output, hidden_state = rnn(x_t, hidden_state)  # output [bsz, 1, hidden_size]
        y.append(output)  # [bsz, seq_len, hidden_size]

    y = torch.cat(y, dim=1)  # [bsz, seq_len, hidden_size]
    return y


def gru_forward(rnn: nn.Module, x: torch.Tensor, beta=None):
    y, _ = rnn(x)
    return y


class GeneralModule(nn.Module):
    def __init__(self, num_layer, gru_type, feature_size, input_size, hidden_size, output_size, dropout_rate,
                 final_output_size, is_fc, is_ln, is_res):
        '''

        Args:
            num_layer:
            gru_type:
            feature_size:
            input_size: 当前modul的输入
            hidden_size: 每一层中rnn的输出
            output_size: 每一层的输出，也是第二层起的输入
            dropout_rate:
            final_output_size:  module最终会有一层fc，这是fc的输出
            is_ln:
            is_fc:
            is_res:
        '''
        super(GeneralModule, self).__init__()
        self.num_layers = num_layer
        if num_layer == 1:
            self.layers = nn.ModuleList([GeneralModuleLayer(gru_type, feature_size,
                                                            input_size=input_size,
                                                            hidden_size=hidden_size,
                                                            output_size=output_size,
                                                            dropout_rate=dropout_rate,
                                                            is_fc=is_fc,
                                                            is_ln=is_ln,
                                                            is_res=is_res)])
        else:
            self.layers = nn.ModuleList(
                [GeneralModuleLayer(gru_type, feature_size,
                                    input_size=input_size,
                                    hidden_size=hidden_size,
                                    output_size=output_size,
                                    dropout_rate=dropout_rate,
                                    is_fc=is_fc,
                                    is_ln=is_ln,
                                    is_res=is_res)])
            for _ in range(1, self.num_layers):
                self.layers.append(GeneralModuleLayer(gru_type, feature_size,
                                                      input_size=output_size,
                                                      hidden_size=hidden_size,
                                                      output_size=output_size,
                                                      dropout_rate=dropout_rate,
                                                      is_fc=is_fc,
                                                      is_ln=is_ln,
                                                      is_res=is_res))

        self.final_fc = nn.Linear(output_size, final_output_size)
        self.sigmoid = nn.Sigmoid()
        self.apply(_weights_init)

    def forward(self, inputs, delta_pre, sigmoid=True):
        output = inputs
        for layer in self.layers:
            output = layer(output, delta_pre)
        output = self.final_fc(output)
        # self.last_norm(output)
        if sigmoid:  # 这个有用
            output = self.sigmoid(output)
        return output


class Encoder(GeneralModule):
    def __init__(self, opt):
        # hidden_dim就没有残差，因为输入和输出的维度不同，=z_dim有残差
        layer_output_size = opt.hidden_dim if opt.layer_size == 'hidden_size' else opt.z_dim
        args = {
            'num_layer': opt.num_layer,
            'gru_type': opt.gru_type,
            'feature_size': opt.z_dim,
            'input_size': opt.z_dim,
            'hidden_size': opt.hidden_dim,
            'output_size': layer_output_size,
            'dropout_rate': opt.dropout,
            'final_output_size': layer_output_size,
            'is_fc': opt.is_fc,
            'is_ln': opt.is_ln,
            'is_res': opt.is_res
        }
        super(Encoder, self).__init__(**args)


class Recovery(GeneralModule):
    def __init__(self, opt):
        layer_output_size = opt.hidden_dim if opt.layer_size == 'hidden_size' else opt.z_dim
        args = {
            'num_layer': opt.num_layer,
            'gru_type': opt.gru_type,
            'feature_size': opt.z_dim,
            'input_size': layer_output_size,
            'hidden_size': opt.hidden_dim,
            'output_size': layer_output_size,
            'dropout_rate': opt.dropout,
            'final_output_size': opt.z_dim,
            'is_fc': opt.is_fc,
            'is_ln': opt.is_ln,
            'is_res': opt.is_res
        }
        super(Recovery, self).__init__(**args)


class Generator(GeneralModule):
    def __init__(self, opt):
        layer_output_size = opt.hidden_dim if opt.layer_size == 'hidden_size' else opt.z_dim
        args = {
            'num_layer': opt.num_layer,
            'gru_type': opt.gru_type,
            'feature_size': opt.z_dim,
            'input_size': opt.z_dim,
            'hidden_size': opt.hidden_dim,
            'output_size': layer_output_size,
            'dropout_rate': opt.dropout,
            'final_output_size': layer_output_size,
            'is_fc': opt.is_fc,
            'is_ln': opt.is_ln,
            'is_res': opt.is_res
        }
        super(Generator, self).__init__(**args)


class Supervisor(GeneralModule):
    def __init__(self, opt):
        layer_output_size = opt.hidden_dim if opt.layer_size == 'hidden_size' else opt.z_dim
        args = {
            'num_layer': opt.num_layer,
            'gru_type': opt.gru_type,
            'feature_size': opt.z_dim,
            'input_size': layer_output_size,
            'hidden_size': opt.hidden_dim,
            'output_size': layer_output_size,
            'dropout_rate': opt.dropout,
            'final_output_size': layer_output_size,
            'is_fc': opt.is_fc,
            'is_ln': opt.is_ln,
            'is_res': opt.is_res
        }
        super(Supervisor, self).__init__(**args)


class Discriminator(GeneralModule):
    def __init__(self, opt):
        layer_output_size = opt.hidden_dim if opt.layer_size == 'hidden_size' else opt.z_dim
        args = {
            'num_layer': opt.num_layer,
            'gru_type': opt.gru_type,
            'feature_size': opt.z_dim,
            'input_size': layer_output_size,
            'hidden_size': opt.hidden_dim,
            'output_size': layer_output_size,
            'dropout_rate': opt.dropout,
            'final_output_size': 1,
            'is_fc': opt.is_fc,
            'is_ln': opt.is_ln,
            'is_res': opt.is_res
        }
        super(Discriminator, self).__init__(**args)

        # self.final_fc = nn.Linear(opt.z_dim, 1)
        # self.apply(_weights_init)

    def forward(self, inputs, delta_pre, sigmoid=False):
        output = super(Discriminator, self).forward(inputs, delta_pre, sigmoid)  # sigmoid=false,
        return output
        # return self.final_fc(output)


class Imputation(nn.Module):
    def __init__(self, opt, x):
        super(Imputation, self).__init__()

        self.z = torch.nn.Parameter(torch.tensor(x, dtype=torch.float32))

    def forward(self):
        return self.z
    # def __init__(self, opt):
    #     super(Imputation, self).__init__()
    #     self.z = torch.nn.Parameter(torch.randn(opt.batch_size, opt.seq_len, opt.z_dim))
