import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from parameters import get_lfads_params

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
random_seed = 42
generator1 = torch.Generator().manual_seed(random_seed)


class LFADS(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, factor_size, controller_size):
        """
        :param input_size: 数据输入大小，即通道数或者神经元数。例：input_size=67
        :param hidden_size: 隐藏层大小。例：hidden_size=128
        :param output_size: 数据输出大小。例：output_size=67
        :param factor_size: 潜在因子大小。例：factor_size=128
        :param controller_size: 控制因子大小。例：controller_size=128
        """
        super(LFADS, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.factor_size = factor_size
        self.controller_size = controller_size

        # Encoder RNN
        self.encoder_rnn = nn.GRU(input_size, hidden_size)
        self.encoder_mean = nn.Linear(hidden_size, factor_size)
        self.encoder_logvar = nn.Linear(hidden_size, factor_size)

        # Decoder RNN
        self.decoder_rnn = nn.GRU(factor_size + controller_size, hidden_size)
        self.decoder_output = nn.Linear(hidden_size, output_size)

        # Controller RNN
        self.controller_rnn = nn.GRU(factor_size, controller_size)
        self.controller_output = nn.Linear(controller_size, factor_size)

    # 对潜在因子的分布进行采样的
    def reparameterize(self, mean, logvar):
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        return mean + eps * std

    # 计算kl散度
    def kl_loss(self, mean, logvar):
        return -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())

    def forward(self, input_data):
        """
        :param input_data: 输入的神经信号数据：spike发放率，大小为[data_length,1,num_neurons]。例：[120,1,67],120为时间长，67为神经元数
        :return:dec_outputs: 输出的对应标签：速度，大小为[data_length,output_size]。例：[120,67],120为时间长，67为所设置的output_size。
        kl_loss: 模型中计算的kl散度损失，与模型损失一同作为总损失。
        """
        # Encoder RNN
        enc_outputs, enc_hidden = self.encoder_rnn(input_data)
        enc_mean = self.encoder_mean(enc_hidden)
        enc_logvar = self.encoder_logvar(enc_hidden)
        enc_z = self.reparameterize(enc_mean, enc_logvar)
        kl_loss = self.kl_loss(enc_mean, enc_logvar)

        # Controller RNN
        con_outputs, con_hidden = self.controller_rnn(enc_z)
        con_factor = self.controller_output(con_hidden)

        # Decoder RNN
        dec_hidden = con_hidden
        dec_outputs = []
        for i in range(input_data.size(0)):
            dec_input = torch.cat([enc_z, con_factor], dim=-1)
            dec_output, dec_hidden = self.decoder_rnn(dec_input, dec_hidden)
            dec_outputs.append(dec_output)
        dec_outputs = torch.stack(dec_outputs, dim=0)
        dec_outputs = self.decoder_output(dec_outputs)
        dec_outputs = dec_outputs.squeeze(axis=0)

        return dec_outputs, kl_loss

# 训练lfads
def train_lfads(train_dataloader, params):
    """
    :param train_dataloader: 训练集，torch中dataloader形式,里面单个数据维度是三维的（1,data_length,num_neurons）,标签维度是二维的（data_length,num_dimension）
    :param params: 训练过程所用到的超参数，是一个字典。
    :return 用于提取新的特征的效果最优的lfads模型
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 获取参数
    num_epochs = params['num_epochs']
    learning_rate = params['learning_rate']
    input_size = params['input_size']
    hidden_size = params['hidden_size']
    output_size = params['output_size']
    factor_size = params['factor_size']
    controller_size = params['controller_size']

    # 构建LFADS模型
    model = LFADS(input_size=input_size, hidden_size=hidden_size, output_size=output_size, factor_size=factor_size, controller_size=controller_size).to(device)
    # 模型损失函数选择MSELoss
    criterion = nn.MSELoss()
    # 模型优化器选择Adam
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 两个列表，aligner_list为每个epoch所记录下的LFADS，loss_all_list为每个epoch训练所记录的损失
    aligner_list = []
    loss_all_list = []

    # 模型训练过程
    for epoch in range(num_epochs):
        model.train()
        loss_all_trial = 0.0
        for i, (inputs, labels) in enumerate(train_dataloader):
            inputs = inputs[0].float().to(device)

            # 梯度置为0
            optimizer.zero_grad()
            # 获得模型的输出以及kl散度损失
            outputs, kl_loss = model(inputs)
            # 计算MSE损失
            recon_loss = criterion(outputs, inputs)
            # 总损失为kl散度损失加上MSE损失
            loss = recon_loss + kl_loss
            # 总损失反向传播
            loss.backward()
            # 更新优化器参数
            optimizer.step()
            # 损失累加
            loss_all_trial += loss.item()

        epoch_loss = loss_all_trial / len(train_dataloader)
        print('Epoch %d loss: %.3f' % (epoch + 1, epoch_loss))

        aligner_list.append(model.state_dict())
        loss_all_list.append(epoch_loss)

    # 获得效果最好的LFADS模型所在的位置
    IDX = np.argmin(loss_all_list)
    print('The aligner has been well trained on the %dth epoch' % (IDX + 1))

    return aligner_list[IDX]


if __name__ == '__main__':
    # 输入为dataloader的形式的数据集,里面单个数据维度是三维的（1,data_length,num_neurons）,标签维度是二维的（data_length,num_dimension）
    train_dataloader = 'train_dataloader'

    # 获取lfads所需要的参数
    params = get_lfads_params()

    # 开始训练获得效果最好的lfads对齐模型
    aligner = train_lfads(train_dataloader, params)
