import argparse

import torch

from sketch_trainer import StudentTrainer
from model import ResNextCls

seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True

parser = argparse.ArgumentParser(description='Sketch training - PyTorch')
parser.add_argument('-data', metavar='DIR', default='./data', help='path to dataset')
parser.add_argument('--epochs', default=120, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--batch_size',
                    default=512,
                    type=int,
                    metavar='N',
                    help='mini-batch size (default: 256), this is the total '
                    'batch size of all GPUs on the current node when '
                    'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
                    '--learning_rate',
                    default=0.01,
                    type=float,
                    metavar='LR',
                    help='initial learning rate',
                    dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='Momentum (default: 0.9)', dest='momentum')
parser.add_argument('--weight_decay',
                    default=1e-4,
                    type=float,
                    metavar='W',
                    help='weight decay (default: 1e-4)',
                    dest='weight_decay')
parser.add_argument('--save_model', default=False, type=bool, help='To save model, True or False')
parser.add_argument('--gpus', default=2, type=int, help='Number of Gpus used during training (default: 0)')


def main():
    classes = 48
    args = parser.parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    backbone = ResNextCls(num_class=classes).to(device)
    if args.gpus > 0:
        backbone = torch.nn.DataParallel(backbone, device_ids=range(args.gpus))
    optimizer = torch.optim.SGD(list(backbone.parameters()),
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=args.momentum)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    trainer = StudentTrainer(backbone=backbone,
                             optimizer=optimizer,
                             scheduler=scheduler,
                             device=device,
                             params=args,
                             classes=classes)
    trainer.train(num_workers=2)
    # trainer.save_features('resnet-50')


if __name__ == '__main__':
    main()