import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split

torch.manual_seed(777)

# datasets
path = '../../../../../large_data/DL1/stock/data-02-stock_daily.csv'
data = np.loadtxt(path, delimiter=',')
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data)
data = data[::-1]  # 逆序

x = data
y = data[:, -1:]

nb_input = 5
nb_output = 1
nb_neuron = 20
time_step = 7
layers_num = 1

x_data = []
y_data = []
for i in range(0, len(y) - time_step):
    x_ = x[i: i+time_step]
    y_ = y[i+time_step]
    x_data.append(x_)
    y_data.append(y_)

x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=False)

x_train = torch.Tensor(x_train)
x_test = torch.Tensor(x_test)
y_train = torch.Tensor(y_train)
y_test = torch.Tensor(y_test)

class LSTM(nn.Module):
    def __init__(self, nb_input, nb_output, nb_neuron, layers_num):
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(input_size=nb_input, hidden_size=nb_neuron, num_layers=layers_num, batch_first=True)
        self.fc = nn.Linear(in_features=nb_neuron, out_features=nb_output)

    def forward(self, inputs):
        x, states = self.lstm(inputs, None)
        x = np.squeeze(states[0])
        # x = x[:, -1]
        x = self.fc(x)
        return x

if __name__ == '__main__':
    model = LSTM(nb_input, nb_output, nb_neuron, layers_num)

    criterions = nn.MSELoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    for step in range(200):
        optimizer.zero_grad()

        h = model(x_train)

        loss = criterions(h, y_train)

        loss.backward()

        optimizer.step()

        print('Epoch:',step+1,'\t\tloss:{:.5f}'.format(loss.item()))

    test_predic = model(x_test)

    plt.plot(test_predic.data.numpy())
    plt.plot(y_test.data.numpy())
    plt.show()
