import torch
from model import SampleRNN, Predictor
from optim import gradient_clipping
from dataset import FolderDataset, DataLoader


default_params = {
    # model parameters
    'n_rnn': 1,
    'dim': 1024,
    'learn_h0': True,
    'q_levels': 256,
    'seq_len': 1024,
    'weight_norm': True,
    'batch_size': 128,
    'val_frac': 0.1,
    'test_frac': 0.1,

    # training parameters
    'keep_old_checkpoints': False,
    'datasets_path': 'datasets',
    'results_path': 'results',
    'epoch_limit': 1000,
    'resume': True,
    'sample_rate': 16000,
    'n_samples': 1,
    'sample_length': 80000,
    'loss_smoothing': 0.99,
    'cuda': True,
    'comet_key': None,
    
    'frame_sizes': [10],
}

def main(dataset='dataset', **params):
    params = dict(
        dataset=dataset,
        **params
    )

    results_path = r"C:\dev_spa\DMuse\samplernn-pytorch-master\datasets"
    # tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm']
    )
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = DataLoader(
            dataset,
            batch_size=params['batch_size'],
            seq_len=params['seq_len'],
            overlap_len=model.lookback,
            shuffle=(not eval),
            drop_last=(not eval)
        )
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']
    
    output = model(iter(data_loader).__next__())
    print(output)

    # trainer = Trainer(
    #     predictor, sequence_nll_loss_bits, optimizer,
    #     data_loader(0, val_split, eval=False),
    #     cuda=params['cuda']
    # )

    # checkpoints_path = os.path.join(results_path, 'checkpoints')
    # checkpoint_data = load_last_checkpoint(checkpoints_path)
    # if checkpoint_data is not None:
    #     (state_dict, epoch, iteration) = checkpoint_data
    #     trainer.epochs = epoch
    #     trainer.iterations = iteration
    #     predictor.load_state_dict(state_dict)

    trainer.run(params['epoch_limit'])
    

main(**default_params)
