import time
from argparse import ArgumentParser
from os import path as osp

from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader, Subset

from dataloading import Node2VecDataset
from model import Node2VecPredictor


def arg_parse():
    parser = ArgumentParser()
    parser.add_argument('--patience', type=int, default=20, help='patience of early stopping')
    parser.add_argument('--batch_size', type=int, default=8, help='batch size')

    parser = Trainer.add_argparse_args(parser)
    parser_model = parser.add_argument_group('model')
    parser_model.add_argument('--learning_rate', type=float, default=5e-3, help='learning_rate')
    parser_model.add_argument('--weight_decay', type=float, default=5e-3, help='weight_decay')
    parser_model.add_argument('--in_feats', type=int, default=128, help='input feats')
    parser_model.add_argument('--out_feats', type=int, default=1, help='output feats')
    parser_model.add_argument('--hid_feats', type=int, default=128, help='hidden feats')
    parser_model.add_argument('--num_mlp', type=int, default=3, help='number of mlp')

    parser_dataset = parser.add_argument_group('dataset')
    parser_dataset.add_argument('--root_dir', type=str, default='F:\\Python-projects\\DatasetAnalysis\\data\\dataset',
                              help='root dir')
    parser_dataset.add_argument('--dataset_name', type=str, default='topic', help='dataset name')
    parser_dataset.add_argument('--observation', type=float, default=2, help='observation time window')
    parser_dataset.add_argument('--save_dir', type=str, default='data', help='save dir')
    parser_dataset.add_argument('--follower_hop', type=int, default=0, help='hop of follow sample')
    parser_dataset.add_argument('--virtual_node', type=str, default=None, choices=['source', 'all', 'None'], help='nodes connect with virtual node')
    parser_dataset.add_argument('--reload_graphs', type=bool, default=False, help='reload graphs')
    parser_dataset.add_argument('--reload_tensors', type=bool, default=False, help='reload tensors')
    parser_dataset.add_argument('--save', type=bool, default=True, help='save graphs and tensors')

    args = parser.parse_args()
    return parser, args


if __name__ == '__main__':
    sample_rates = {
        'topic': 0.05,
        'repost': 0.05,
        'twitter': 1,
    }
    parser, args = arg_parse()
    dict_args = vars(args)
    start_time = time.time()
    arg_groups = {g.title: {arg.dest: dict_args[arg.dest] for arg in g._group_actions} for g in
                  parser._action_groups[3:]}

    model = Node2VecPredictor(**arg_groups['model'])
    dataset = Node2VecDataset(**arg_groups['dataset'], sample_rates=sample_rates)
    data_process_time = time.time()
    hp = f'{args.follower_hop}-hop'
    logger = TensorBoardLogger(save_dir=osp.join('lightning_logs', hp),
                               name=dataset.name, )
    hps = {'dataset': args.dataset_name,
                 'observation': args.observation,
                 'batch_size': args.batch_size,
                 'patience': args.patience,
                 }
    hps.update(arg_groups['model'])
    # logger.experiment.add_hparams(hps,metric_dict={})

    early_stopping = EarlyStopping(patience=args.patience, monitor='valid loss', mode='min')
    trainer = Trainer.from_argparse_args(args, callbacks=[early_stopping], log_every_n_steps=5, logger=logger)

    tot = len(dataset)
    cnt_train = int(tot * 0.7)
    cnt_valid = int(tot * 0.85)
    dataset_indices = list(range(tot))

    training_dataloader = DataLoader(Subset(dataset, dataset_indices[:cnt_train]),
                                     batch_size=args.batch_size)
    valid_dataloader = DataLoader(Subset(dataset, dataset_indices[cnt_train:cnt_valid]),
                                  batch_size=args.batch_size)
    test_dataloader = DataLoader(Subset(dataset, dataset_indices[cnt_valid:]),
                                 batch_size=args.batch_size)

    rst = trainer.tune(model, train_dataloader=training_dataloader, val_dataloaders=valid_dataloader)
    if rst:
        lr_finder = rst['lr_find']
        new_lr = lr_finder.suggestion()
        hps['learning_rate'] = new_lr
    logger.log_hyperparams(hps)
    trainer.fit(model, train_dataloader=training_dataloader, val_dataloaders=valid_dataloader)
    trainer.test(model, test_dataloaders=test_dataloader)
    end_time = time.time()
    print(f"data processing time cost: {data_process_time - start_time}")
    print(f"model running time cost: {end_time - data_process_time}")
    print(f"total time cost: {end_time-start_time}")
    # print(arg_groups)
