import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
from nets import network

batch_size = 200
fnn_size = np.array([20, 500, 200, 200, 200])
learning_rate = 0.0001
model = network.Fwd(fnn_size, learning_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
epochs = 6000
loss_fn = nn.MSELoss()

x_train, x_test, y_train, y_test = train_test_split(np.load('train_set/t_array_20layers_2sample.npy'),
                                                    np.load('train_set/tran_array_20layers_2sample.npy'),
                                                    test_size=0.5, random_state=50)
train_dataset = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).float())
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test).float())
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

for epoch in range(epochs):
    model.train()
    train_loss = 0
    for inputs, targets in train_dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = loss_fn(outputs, targets)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
    train_loss = train_loss / len(train_dataloader)
    if (epoch+1) % 10 == 0:
        print(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}')

    if (epoch+1) % 100 == 0:
        torch.save(model.state_dict(), 'model_weights_20layers_2sample.pth')
        print('model saved')
