# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import pprint

import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, DistributedSampler

import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.trainer import Trainer
# from core.function import validate
from utils.utils import create_logger, is_dist_avail_and_initialized, get_rank
from dataset.collater import collaterCOCO, collaterVCOCO
import dataset
import models

import datetime
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP

def parse_args():
    parser = argparse.ArgumentParser(description='Train keypoints network')
    # general
    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=True,
                        type=str)

    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    parser.add_argument('--modelDir',
                        help='model directory',
                        type=str,
                        default='')
    parser.add_argument('--logDir',
                        help='log directory',
                        type=str,
                        default='')
    parser.add_argument('--dataDir',
                        help='data directory',
                        type=str,
                        default='')
    parser.add_argument('--prevModelDir',
                        help='prev Model directory',
                        type=str,
                        default='')
    parser.add_argument("--local_rank", type=int, default=0)
    args = parser.parse_args()
    return args


def init_distributed_mode(args):
    args.rank = int(os.environ["RANK"])
    args.world_size = int(os.environ['WORLD_SIZE'])
    args.local_rank = int(os.environ['LOCAL_RANK'])

    torch.cuda.set_device(args.local_rank)
    print('| distributed init (rank {}), gpu {}'.format(args.rank, args.local_rank), flush=True)
    torch.distributed.init_process_group(backend="nccl", init_method="env://", timeout=datetime.timedelta(minutes=60*12))


def cleanup():
    dist.destroy_process_group()

def main():
    args = parse_args()
    update_config(cfg, args)

    init_distributed_mode(args)
    local_rank = args.local_rank
    global_rank = args.rank

    logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, 0, 'valid')

    if global_rank == 0:
        logger.info(pprint.pformat(args))
        logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Data loading code
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )
    valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])
    )

    sampler = DistributedSampler(valid_dataset) if is_dist_avail_and_initialized() else None
    print(f'=> global_rank: {get_rank()} | sampler=None: {sampler is None}')

    if cfg.DATASET.DATASET == 'coco':
        collate_fn_valid = collaterCOCO(0, cfg.DATASET.PATCH_MODE)
    else:   # VCOCO
        collate_fn_valid = collaterVCOCO()

    if cfg.TEST.USE_GT_BBOX:
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
            shuffle=False,
            num_workers=cfg.WORKERS,
            pin_memory=True,
            sampler=sampler,
            collate_fn=collate_fn_valid
        )
    else:
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=56,
            shuffle=False,
            num_workers=cfg.WORKERS,
            sampler=sampler,
            pin_memory=True
        )

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        ckpt_state_dict = torch.load(cfg.TEST.MODEL_FILE, map_location=torch.device('cpu'))
        model.load_state_dict(ckpt_state_dict, strict=True)   #  strict=False FOR UNSeen Resolutions
    else:
        model_state_file = os.path.join(
            final_output_dir, 'final_state.pth'
        )
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file, map_location=torch.device('cpu')))
    w, h = cfg.MODEL.IMAGE_SIZE

    model = model.cuda()
    ddp_model = DDP(model, device_ids=[local_rank])

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
    ).cuda()

    trainer = Trainer(args, cfg)
    # evaluate on validation set
    one_record = trainer.validate(valid_loader, valid_dataset, ddp_model, criterion,
                                    final_output_dir, global_rank)
    all_records = trainer.all_gather(one_record)
    
    if global_rank == 0:
        all_records = trainer.merge_dicts(all_records)
        perf_indicator = trainer.evaluate_kpts(all_records, valid_dataset, final_output_dir)
    cleanup()


if __name__ == '__main__':
    main()
