import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader

from model import TrainSet
from utils import readData, generate_df_affect_by_n_days
from model import RNN


n = 10
LR = 0.0001
EPOCH = 100
train_end = -500
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据集建立
df, df_all, df_index = readData('收盘',file_path="data/gzmt.csv", n=n, train_end=train_end)

df_all = np.array(df_all.tolist())
plt.plot(df_index, df_all, label='real-data')
df_numpy = np.array(df)
df_numpy_mean = np.mean(df_numpy)
df_numpy_std = np.std(df_numpy)
df_numpy = (df_numpy - df_numpy_mean) / df_numpy_std
df_tensor = torch.Tensor(df_numpy).to(device)
trainset = TrainSet(df_tensor)
trainloader = DataLoader(trainset, batch_size=16, shuffle=True)
rnn = RNN(n).to(device)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all cnn parameters
loss_func = nn.MSELoss()
for step in range(EPOCH):
    for tx, ty in trainloader:
        output = rnn(torch.unsqueeze(tx, dim=0))
        loss = loss_func(torch.squeeze(output), ty)
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # back propagation, compute gradients
        optimizer.step()
    print(step, loss)

torch.save(rnn.state_dict(), f'model/last.bin')
