
from model import LSTM
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import os
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader



LR = 0.0001
EPOCH = 1000
TRAIN_END=-300
DAYS_BEFORE=30
DAYS_PRED=7



rnn = LSTM()

if torch.cuda.is_available():
    rnn = rnn.cuda()

optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all cnn parameters
loss_func = nn.MSELoss()

best_loss = 1000


if not os.path.exists('weights'):
    os.mkdir('weights')    

for step in range(EPOCH):
    for tx, ty in train_loader:
        if torch.cuda.is_available():
            tx = tx.cuda()
            ty = ty.cuda() 
        
        output = rnn(torch.unsqueeze(tx, dim=2))             
        loss = loss_func(torch.squeeze(output), ty)        
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # back propagation, compute gradients
        optimizer.step()
        
        print('epoch : %d  ' % step, 'train_loss : %.4f' % loss.cpu().item())
        
    with torch.no_grad():
        for tx, ty in val_loader:
            if torch.cuda.is_available():
                tx = tx.cuda()
                ty = ty.cuda() 
        
            output = rnn(torch.unsqueeze(tx, dim=2))             
            loss = loss_func(torch.squeeze(output), ty)
        
            print('epoch : %d  ' % step, 'val_loss : %.4f' % loss.cpu().item())
        
        if loss.cpu().item() < best_loss:
            best_loss = loss.cpu().item()
            torch.save(rnn, 'weights/rnn.pkl'.format(loss.cpu().item()))
            print('new model saved at epoch {} with val_loss {}'.format(step, best_loss))