import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import sys
from tqdm import tqdm
from DataSetLoader import *
from model import *


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

train_dataset = ECGDataSet("./DataSetProcess/MyFFTTraining2017/")
test_dataset = ECGDataSet("./DataSetProcess/MyFFTValidation2017/")

BATCH_SIZE = 32

train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)

rnn_model = LSTMClassifier(5120, 512, 4, 4)
rnn_model.to(device)

optimizer = optim.Adam(rnn_model.parameters(), lr=1e-3, betas=(0.9, 0.99))
loss_function = nn.CrossEntropyLoss()

train_steps = len(train_loader)
val_num = len(test_dataset)

best_acc = 0.0
epochs = 6000
save_path = './bestNet.pth'

for epoch in range(epochs):
    rnn_model.train()
    running_loss = 0.0
    train_bar = tqdm(train_loader, file=sys.stdout)
    for step, data in enumerate(train_bar):
        ecg, labels = data
        optimizer.zero_grad()
        outputs = rnn_model(ecg.to(device))
        loss = loss_function(outputs, labels.to(device))
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1, epochs, loss)
    
    rnn_model.eval()
    acc = 0.0
    with torch.no_grad():
        val_bar = tqdm(test_loader, file=sys.stdout)
        for val_data in val_bar:
            val_ecg, val_labels = val_data
            outputs = rnn_model(val_ecg.to(device))
            predict_y = torch.max(outputs, dim=1)[1]
            acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

    val_accurate = acc / val_num
    print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' % (epoch + 1, running_loss / train_steps, val_accurate))

print('Finished Training')