import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from nets import network
import time


batch_size = 100
fnn_size = np.array([10, 500, 200, 200, 200])
learning_rate = 0.0001

model = network.Fwd(fnn_size, learning_rate)
model = model.cuda()

optimizer = optim.Adam(model.parameters(), lr=learning_rate)
epochs = 2000

loss_fn = nn.MSELoss()
loss_fn = loss_fn.cuda()

x_train = np.load('train_set/t_array_10layers_4sample.npy')
y_train = np.load('train_set/tran_array_10layers_4sample.npy')

train_dataset = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).float())
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# test_dataset = TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test).float())
# test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

time_stamp = time.strftime('%Y-%m-%d_%H-%M', time.localtime())
file = open('learning_curve/' + 'learning_curve' + time_stamp + '.txt', 'w')

for epoch in range(epochs):
    model.train()
    train_loss = 0
    for inputs, targets in train_dataloader:
        inputs = inputs.cuda()
        targets = targets.cuda()
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = loss_fn(outputs, targets)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
    train_loss = train_loss / len(train_dataloader)
    if (epoch+1) % 10 == 0 or epoch == 0:
        print(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}')
        file.write(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}\n')

    if (epoch+1) % 100 == 0:
        torch.save(model.state_dict(), 'model/fwd_nn/model_weights' + time_stamp + '.pth')
        print('model saved')

file.close()
torch.save(model.state_dict(), 'model/fwd_nn/model_weights' + time_stamp + '.pth')
print('model saved')
print('Training complete.')
