import argparse

import torch

from metric_trainer import MetricTrainer
from model import MetricNet

seed = 0
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True

parser = argparse.ArgumentParser(description='Self-Supervised Learning - BYOL - PyTorch')
parser.add_argument('-data', metavar='DIR', default='./data', help='path to dataset')
parser.add_argument('--epochs', default=4000, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--batch_size',
                    default=64,
                    type=int,
                    metavar='N',
                    help='mini-batch size (default: 256), this is the total '
                    'batch size of all GPUs on the current node when '
                    'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
                    '--learning_rate',
                    default=0.01,
                    type=float,
                    metavar='LR',
                    help='initial learning rate',
                    dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='Momentum (default: 0.9)', dest='momentum')
parser.add_argument('--weight_decay',
                    default=1e-4,
                    type=float,
                    metavar='W',
                    help='weight decay (default: 1e-4)',
                    dest='weight_decay')
parser.add_argument('--save_model', default=True, type=bool, help='To save model, True or False')
parser.add_argument('--gpus', default=2, type=int, help='Number of Gpus used during training (default: 0)')
parser.add_argument(
    '--num_workers',
    default=2,
    type=int,
    help=
    'how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0)'
)


def main():
    classes = 48
    args = parser.parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    sk_backbone = MetricNet().to(device)
    cad_backbone = MetricNet().to(device)
    if args.gpus > 0:
        sk_backbone = torch.nn.DataParallel(sk_backbone, device_ids=range(args.gpus))
        cad_backbone = torch.nn.DataParallel(cad_backbone, device_ids=range(args.gpus))
    sk_optimizer = torch.optim.SGD(list(sk_backbone.parameters()),
                                   lr=args.lr,
                                   weight_decay=args.weight_decay,
                                   momentum=args.momentum)
    cad_optimizer = torch.optim.SGD(list(cad_backbone.parameters()),
                                    lr=args.lr,
                                    weight_decay=args.weight_decay,
                                    momentum=args.momentum)
    sk_scheduler = torch.optim.lr_scheduler.StepLR(sk_optimizer, step_size=500, gamma=0.5)
    cad_scheduler = torch.optim.lr_scheduler.StepLR(cad_optimizer, step_size=500, gamma=0.5)
    trainer = MetricTrainer(sk_backbone=sk_backbone,
                            cad_backbone=cad_backbone,
                            sk_optimizer=sk_optimizer,
                            cad_optimizer=cad_optimizer,
                            sk_scheduler=sk_scheduler,
                            cad_scheduler=cad_scheduler,
                            device=device,
                            params=args,
                            classes=classes)
    trainer.train()
    trainer.save_features()


if __name__ == '__main__':
    main()