import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import r2_score
from parameters import get_lstm_params


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        """
        :param input_size: 数据输入大小，即通道数或者神经元数。例：input_size=128
        :param hidden_size: 隐藏层大小。例：hidden_size=128
        :param num_layers: LSTM层的层数。例：num_layers=2
        :param output_size: 数据输出大小，对应速度或者行为轨迹。例：output_size=2
        """
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        """
        :param x: 输入的神经信号数据：spike发放率，大小为[data_length,1,num_neurons]。例：[120,1,67],120为时间长，67为神经元数
        :return: 输出的对应标签：速度，大小为[data_length,output_size]。例：[120,2],120为时间长，2为x和y方向的速度,也是模型中设置的output_size。
        """
        print(x.shape)
        h0 = torch.randn(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.randn(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        out, _ = self.lstm(x, (h0, c0))
        out = out[:, -1, :]
        out = self.fc(out)
        return out


# 训练过程
def train_model(inputs, labels, model, criterion, optimizer, loss_all):
    """
    :param inputs: 输入的训练集数据，由torch的dataloader导入。
    :param labels: 输入的训练集数据对应的标签，由torch的dataloader导入
    :param model: 训练时所用的模型。例：model = LSTMModel(input_size,hidden_size,num_layers,output_size).to(device)
    :param criterion: 模型所需的损失函数。例：criterion = nn.MSELoss()
    :param optimizer: 模型所需的优化器。例：optimizer = optim.Adam(model.parameters(),lr=learning_rate)
    :param loss_all: 总损失，在每个epoch开始时设置为0。例：loss_all=0.0
    :return: outputs:模型所预测的标签
    loss_all:模型训练的总损失
    """
    # 模型训练输出
    outputs = model(inputs)
    # 梯度置为0
    optimizer.zero_grad()
    # 计算模型的MSE损失
    loss = criterion(outputs, labels)
    # 损失进行反向传播
    loss.backward()
    # 优化器参数更新
    optimizer.step()
    # 损失累加
    loss_all += loss

    return outputs, loss_all


# 训练lstm
def train_lstm(train_dataloader, test_dataloader, params):
    """
    :param train_dataloader: 训练集，torch中dataloader形式,里面单个数据维度是三维的（data_length,1,num_neurons）,标签维度是二维的（data_length,num_dimension）
    :param test_dataloader: 测试集，torch中dataloader形式,里面单个数据维度是三维的（data_length,1,num_neurons）,标签维度是二维的（data_length,num_dimension）
    :param params: 训练过程所用到的超参数，是一个字典。
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 获取参数
    input_size = params['input_size']
    hidden_size = params['hidden_size']
    num_layers = params['num_layers']
    output_size = params['output_size']
    learning_rate = params['learning_rate']
    num_epochs = params['num_epochs']

    # 构建LSTM模型
    model = LSTMModel(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                      output_size=output_size).to(device)
    # 模型损失函数选择MSELoss
    criterion = nn.MSELoss()
    # 模型优化器选择Adam
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练过程
    for epoch in range(num_epochs):
        model.train()
        loss_all = 0.0
        # 记录训练集评估指标r2分数的列表
        r2_scores = []
        for i, (inputs, labels) in enumerate(train_dataloader):
            # 训练集输入的数据以及对应的标签
            inputs = inputs[0].float().to(device)
            labels = labels[0].float().to(device)

            outputs, loss_all = train_model(inputs, labels, model, criterion, optimizer, loss_all)

            # 将真实标签与预测标签转换成numpy形式
            y_pred = outputs.cpu().detach().numpy()
            y_true = labels.cpu().numpy()
            y_pred = y_pred.reshape(-1, 2)
            y_true = y_true.reshape(-1, 2)

            # 计算r2分数
            r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
            r2_scores.append(r2)

        print('Epoch %d loss: %.3f, r2_score: %.3f' % (
            epoch + 1, loss_all / len(train_dataloader), np.mean(r2_scores)), end=',')

        # 测试
        model.eval()
        # 记录测试集评估指标r2分数的列表
        test_r2 = []
        with torch.no_grad():
            for inputs, labels in test_dataloader:
                # 测试集输入的数据以及对应的标签
                inputs = inputs[0].float().to(device)
                labels = labels[0].float().to(device)

                # 模型测试预测的标签
                outputs = model(inputs)

                # 将真实标签与预测标签转换成numpy形式
                y_pred = outputs.cpu().detach().numpy()
                y_true = labels.cpu().numpy()
                y_pred = y_pred.reshape(-1, 2)
                y_true = y_true.reshape(-1, 2)

                # 计算r2分数
                r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
                test_r2.append(r2)

        print(f'Test r2_score: {np.mean(test_r2): .4f}')


if __name__ == '__main__':
    # 输入为dataloader的形式的训练集和测试集,里面单个数据维度是三维的（data_length,1,num_neurons）,标签维度是二维的（data_length,num_dimension）
    train_dataloader = 'train_dataloader'
    test_dataloader = 'test_dataloader'

    # 获取lstm所需要的参数
    params = get_lstm_params()

    # 开始训练lstm模型
    train_lstm(train_dataloader, test_dataloader, params)
