import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import time
from nets import tandem_normalized

class BatchIdx:
    def __init__(self, size, batch_size, shuffle=False):
        self.idx = np.arange(size)
        self.size = size
        self.batch_size = batch_size
        self.batch_num = size // batch_size
        self.shuffle = shuffle
    
    def get_batch_idx(self):
        if self.shuffle:
            np.random.shuffle(self.idx)
        self.idx = self.idx[:self.batch_size*self.batch_num]
        return self.idx.reshape((self.batch_num,-1))

if __name__ == '__main__':
    batch_size = 512
    num_layers = 20
    inn_size = np.array([200, 500, 500, 200, num_layers])
    fnn_size = np.array([num_layers, 500, 200, 200, 200])
    learning_rate = 0.0001
    
    model = tandem_normalized.Tandem(inn_size, fnn_size, learning_rate)
    model.load_fnn('model/fwd_nn/model_weights_20layers_2024-08-03_20-03.pth')
    for param in model.fnn.parameters():
        param.requires_grad = False
    model = model.cuda()
    
    optimizer = optim.Adam(model.inn.parameters(), lr=learning_rate)
    epochs = 2000
    
    loss_fn = nn.MSELoss()
    loss_fn = loss_fn.cuda()
    
    x_train = np.load('train_set/tran_array_normalized_20layers_2samp_2024-08-03_19-45_rand.npy')
    y_train = np.load('train_set/tran_array_normalized_20layers_2samp_2024-08-03_19-45_rand.npy')
    data_size = len(x_train)
    batch_idx = BatchIdx(data_size, batch_size, shuffle=True)
    batch_num = batch_idx.batch_num
    
    x_train = torch.from_numpy(x_train).float().cuda()
    y_train = torch.from_numpy(y_train).float().cuda()
    
    time_stamp = time.strftime('%Y-%m-%d_%H-%M', time.localtime())
    
    file = open('learning_curve/' + f'learning_curve_{num_layers}layers_' + time_stamp + '_tandem' + '.txt', 'w')
    
    # train_dataset = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).float())
    # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    
    model.train()
    for epoch in range(epochs):
        train_loss = 0
        
        # for inputs, targets in train_dataloader:
        #     inputs = inputs.cuda()
        #     targets = targets.cuda()
        
        for batch_idx_list in batch_idx.get_batch_idx():
            inputs = x_train[batch_idx_list]
            targets = y_train[batch_idx_list]
    
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_fn(outputs, targets)
            loss.backward()
            optimizer.step()
            
            if (epoch+1) % 10 == 0 or epoch == 0:
                train_loss += loss.item()
                
        if (epoch+1) % 10 == 0 or epoch == 0:
            train_loss = train_loss / batch_num
            print(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.6f}')                        
            file.write(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.6f}\n')
    
        if (epoch+1) % 100 == 0:
            torch.save(model.state_dict(), f'model/tandem_nn/model_weights_{num_layers}layers_' + time_stamp + '_tandem' + '.pth')
            print('model saved')
    file.close()
    torch.save(model.state_dict(), f'model/tandem_nn/model_weights_{num_layers}layers_' + time_stamp + '_tandem' + '.pth')
    print('model saved')
    print('Training complete.')
