import torch as pt
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import sys
import matplotlib
# matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt


def sep(label = '', cnt=32):
    print('-' * cnt, label, '-' * cnt, sep='')


np.random.seed(1)
pt.manual_seed(1)
N_NEURONS = 20
ITERS = 400
ALPHA = 0.01

sep('cpu or gpu')
device = 'cuda:0' if pt.cuda.is_available() else 'cpu'
print(device)
print(device, file=sys.stderr)
device = pt.device(device)

sep('load data')
path = '../../../../../large_data/DL1/stock/data-02-stock_daily.csv'
data = pd.read_csv(path, header=1)
print(data.shape)
data = data[::-1]

sep('tidy data')
scaler = StandardScaler()
data = scaler.fit_transform(data)

x = []
y = []
M, N = data.shape
n_steps = 7
for i in range(0, M - n_steps):
    x.append(data[i:i + n_steps])
    y.append(data[i + n_steps, -1:])

x = np.float32(x)
y = np.float32(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, shuffle=False)
print('x_train', np.shape(x_train))
print('y_train', np.shape(y_train))
print('x_test', np.shape(x_test))
print('y_test', np.shape(y_test))
x_train = pt.Tensor(x_train).to(device)
y_train = pt.Tensor(y_train).to(device)
x_test = pt.Tensor(x_test).to(device)
y_test = pt.Tensor(y_test).to(device)

sep('model')


class MyManyToOneLstm(pt.nn.Module):

    def __init__(self, n_inputs, n_neurons, n_layers, **kwargs):
        # super(MyManyToOneLstm, self).__init__(**kwargs)
        super().__init__(**kwargs)
        self.lstm = pt.nn.LSTM(n_inputs, n_neurons, n_layers, batch_first=True)
        self.fc = pt.nn.Linear(n_neurons, 1)

    def forward(self, x):
        x, _ = self.lstm(x)  # m, n_steps, n_neuron
        x = x[:, -1]  # m, n_neuron
        x = self.fc(x)  # m, n_output(1)
        return x


model = MyManyToOneLstm(N, N_NEURONS, 1).to(device)
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)
criterion = pt.nn.MSELoss()

sep('train')
GROUP = int(np.ceil(ITERS / 20))
cost_his = np.zeros(ITERS)
for step in range(ITERS):
    model.train(True)
    optim.zero_grad()
    h_train = model(x_train)
    loss = criterion(h_train, y_train)
    loss.backward()
    optim.step()
    model.train(False)
    loss = loss.cpu().detach().item()
    cost_his[step] = loss
    if step % GROUP == 0 or step == ITERS - 1:
        print(f'#{step + 1}: cost = {loss}')

sep('test')
h_test = model(x_test)
h_test = h_test.cpu().detach().numpy()
y_test = y_test.cpu().detach().numpy()

sep('plot')
h_train = h_train.cpu().detach().numpy()
y_train = y_train.cpu().detach().numpy()

spr = 1
spc = 3
spn = 0
plt.figure(figsize=[14, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('cost in iterations')
plt.plot(cost_his, label='cost')
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('train')
plt.plot(h_train, label='hypothesis', alpha=0.5)
plt.plot(y_train, label='target', alpha=0.5)
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('test')
plt.plot(h_test, label='hypothesis', alpha=0.5)
plt.plot(y_test, label='target', alpha=0.5)
plt.legend()

print('Check and close the plotting window to go on ...')
plt.show()
print('Over')
