import argparse
import datetime
import os
from pickle import FALSE, TRUE
import time

import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter

from detection.utils import dist_utils
from detection.config import cfg
from detection.data.build import build_data_loaders
from detection.engine.eval import evaluation
from detection.modeling.build import build_detectors
from detection import utils


def main(cfg, args):
    test_loaders = build_data_loaders(cfg.DATASETS.TESTS, transforms=cfg.INPUT.TRANSFORMS_TEST, is_train=False,
                                      distributed=args.distributed, num_workers=cfg.DATALOADER.NUM_WORKERS)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = build_detectors(cfg)
    model.to(device)

    work_dir = cfg.WORK_DIR
    checkpoint = torch.load(args.model, map_location='cpu')
    model.load_state_dict(checkpoint['model'])
    # model.load_state_dict(checkpoint)
    evaluation(model, test_loaders, device, types=cfg.TEST.EVAL_TYPES, output_dir=work_dir, viz=args.viz)
    return


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
    parser.add_argument("--config-file", default='configs/DA.yaml', help="path to config file", type=str)
    parser.add_argument('--model', default='work_dir/DA/model_epoch_10.pth', help='resume from checkpoint')
    parser.add_argument("--viz", default=False, help="visualize", action="store_true")

    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER)

    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    dist_utils.init_distributed_mode(args)

    # print(args)
    world_size = dist_utils.get_world_size()
    if world_size != 4:
        lr = cfg.SOLVER.LR * (float(world_size) / 4)
        print('Change lr from {} to {}'.format(cfg.SOLVER.LR, lr))
        cfg.merge_from_list(['SOLVER.LR', lr])

    # print(cfg)
    os.makedirs(cfg.WORK_DIR, exist_ok=True)
    main(cfg, args)
