import torch
import torch.nn as nn
import torch.nn.functional
from sklearn.preprocessing import MinMaxScaler
import numpy as np


def create_inout_sequences(input_data, tw):
    inout_seq = []
    L = len(input_data)
    for i in range(L - tw):
        train_seq = input_data[i:i + tw]
        train_label = input_data[i + tw:i + tw + 1]  #预测time_step之后的第一个数值
        inout_seq.append((train_seq, train_label))  #inout_seq内的数据不断更新，但是总量只有tw+1个
    return inout_seq


class LSTM(nn.Module):
    def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):
        super().__init__()
        self.hidden_layer_size = hidden_layer_size

        self.lstm = nn.LSTM(input_size, hidden_layer_size)
        self.linear = nn.Linear(hidden_layer_size, output_size)

        self.hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size),
                            torch.zeros(1, 1, self.hidden_layer_size))

    def forward(self, input_seq):
        lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)
        predictions = self.linear(lstm_out.view(len(input_seq), -1))
        return predictions[-1]


def calculate_data(data):
    all_data = np.array(data['passengers'])  # 将passengers列的数据类型改为float
    print(all_data, len(all_data))
    # 划分测试集和训练集
    test_data_size = 12
    train_data = all_data[:-test_data_size]  # 除了最后12个数据，其他全取
    test_data = all_data[-test_data_size:]  # 取最后12个数据

    scaler = MinMaxScaler(feature_range=(-1, 1))

    train_data_normalized = scaler.fit_transform(train_data.reshape(-1, 1))
    # 将数据集转换为tensor，因为PyTorch模型是使用tensor进行训练的，并将训练数据转换为输入序列和相应的标签
    train_data_normalized = torch.FloatTensor(train_data_normalized).view(-1)

    train_window = 12  # 设置训练输入的序列长度为12，类似于time_step = 12
    train_inout_seq = create_inout_sequences(train_data_normalized, train_window)

    model = LSTM()
    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # 建立优化器实例

    epochs = 300
    count = 0
    for i in range(epochs):
        for seq, labels in train_inout_seq:
            count += 1
            print(count, i)
            # 清除网络先前的梯度值
            optimizer.zero_grad()  # 训练模型时需要使模型处于训练模式，即调用model.train()。缺省情况下梯度是累加的，需要手工把梯度初始化或者清零，调用optimizer.zero_grad()
            # 初始化隐藏层数据
            model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
                                 torch.zeros(1, 1, model.hidden_layer_size))
            # 实例化模型
            y_pred = model(seq)
            # 计算损失，反向传播梯度以及更新模型参数
            single_loss = loss_function(y_pred, labels)  # 训练过程中，正向传播生成网络的输出，计算输出和实际值之间的损失值
            single_loss.backward()  # 调用loss.backward()自动生成梯度，
            optimizer.step()  # 使用optimizer.step()执行优化器，把梯度传播回每个网络

    fut_pred = 12
    test_inputs = train_data_normalized[-train_window:].tolist()  # 首先打印出数据列表的最后12个值

    model.eval()  # 把training属性设置为false,使模型处于测试或验证状态
    for i in range(fut_pred):
        seq = torch.FloatTensor(test_inputs[-train_window:])
        with torch.no_grad():
            model.hidden = (torch.zeros(1, 1, model.hidden_layer_size),
                            torch.zeros(1, 1, model.hidden_layer_size))
            test_inputs.append(model(seq).item())
    # 打印最后的12个预测值
    # 由于对训练集数据进行了标准化，因此预测数据也是标准化了的
    # 需要将归一化的预测值转换为实际的预测值。通过inverse_transform实现
    actual_predictions = scaler.inverse_transform(np.array(test_inputs[train_window:]).reshape(-1, 1))
    print('结果：', actual_predictions)
    return actual_predictions.tolist()
