from torch.utils.data import DataLoader
import torch
from torch import nn
from train_step import train
from utils import (parser_args,
                   parser_from_config,
                   print_config,
                   seed_everything,
                   instaniate_from_config)
import os
from torch.utils.tensorboard import SummaryWriter

def main(args):
    print(f"Training config: {args.config}")
    config = parser_from_config(args.config)
    training_config = config['train']

    ### Configure PFNN ###
    model = instaniate_from_config(config['model'])

    ### Configure Training Parameters ###
    print_config(training_config)
    seed_everything(training_config['seed'])
    dset = instaniate_from_config(config['data'])
    dloader = DataLoader(dset,
                         batch_size=training_config['batch_size'],
                         shuffle=True, num_workers=training_config['num_workers'],
                         drop_last=True)
    device = torch.device(training_config['device'])  # Single GPU for now

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=training_config['lr'],
                                 weight_decay=training_config['weight_decay'])
    epochs = training_config['epochs']
    criterion = nn.MSELoss() # Default to MSE Loss

    ### Resume or not ###
    os.makedirs(training_config['log_dir'], exist_ok=True)
    if config.get('resume', None):
        checkpoint = torch.load(os.path.join('checkpoint-', config['resume'] + '.pt'))
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        print(f'Resume from epoch {start_epoch}')
    else:
        start_epoch = 0
        print('Start training')

    ### Traing ###
    writer = SummaryWriter(os.path.join(training_config['log_dir'], 'runs'))
    model.to(device)
    for epoch in range(start_epoch+1, epochs):
        train_loss = train(model,
                           dloader,
                           device,
                           optimizer,
                           criterion,
                           epoch)
        writer.add_scalar('train_loss', train_loss, epoch)
        if epoch % training_config['save_every'] == 0:
            state = {
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch,
            }
            torch.save(state, os.path.join(training_config['log_dir'], f'checkpoint-{epoch}.pth'))


if __name__ == '__main__':
    args = parser_args()
    main(args)