#!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
@author: xingwg
@license: (C) Copyright 2020-2025.
@contact: xingweiguo@chinasvt.com
@project: boya-reid
@file: test.py
@time: 2020/9/26 10:43
@desc:
"""
import os
import argparse
import glog
import datetime
import json
import numpy as np
import torch
import torch.nn as nn
from src.config import cfg
from src.dataset import make_dataloader
from src.model.make_model import make_model
from src.utils.meter import AverageMeter
from src.utils.metrics import R1_mAP

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file",
        default="",
        help="path to config file",
        type=str
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    glog.info(args)

    if args.config_file != "":
        glog.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            glog.info(config_str)
    glog.info("Running with config:\n{}".format(cfg))

    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID

    train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)
    model = make_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    device = "cuda"
    if device:
        model.to(device)
        if torch.cuda.device_count() > 1:
            glog.info('Using {} GPUs for inference'.format(torch.cuda.device_count()))
            model = nn.DataParallel(model)

    glog.info("Enter inferencing")
    model.eval()

    reranking_parameter = [10, 3, 0.6]
    evaluator = R1_mAP(
        num_query,
        max_rank=200,
        feat_norm=cfg.TEST.FEAT_NORM,
        reranking=cfg.TEST.RE_RANKING
    )

    model_name = cfg.MODEL.NAME + "_{}x{}_b{}_lr{}_epoch{}_{}{}_{}{}_std".format(
                        cfg.INPUT.SIZE_TRAIN[0],
                        cfg.INPUT.SIZE_TRAIN[1],
                        cfg.SOLVER.IMS_PER_BATCH,
                        cfg.SOLVER.BASE_LR,
                        cfg.SOLVER.MAX_EPOCHS,
                        cfg.MODEL.TRIPLET_LOSS_WEIGHT,
                        cfg.MODEL.METRIC_LOSS_TYPE,
                        cfg.MODEL.ID_LOSS_WEIGHT,
                        cfg.MODEL.ID_LOSS_TYPE,
                    )

    evaluator.reset()
    DISTMAT_PATH = os.path.join(cfg.OUTPUT_DIR, model_name, "distmat.npy")
    QUERY_PATH = os.path.join(cfg.OUTPUT_DIR, model_name, "query_path.npy")
    GALLERY_PATH = os.path.join(cfg.OUTPUT_DIR, model_name, "gallery_path.npy")

    for n_iter, (img, pid, camid, imgpath) in enumerate(val_loader):
        with torch.no_grad():
            img = img.to(device)
            if cfg.TEST.FLIP_FEATS == 'on':
                feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda()
                for i in range(2):
                    if i == 1:
                        inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda()
                        img = img.index_select(3, inv_idx)
                    f = model(img)
                    feat = feat + f
            else:
                feat = model(img)

            evaluator.update((feat, imgpath))

    data, distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter)
    np.save(DISTMAT_PATH, distmat)
    np.save(QUERY_PATH, img_name_q)
    np.save(GALLERY_PATH, img_name_g)

    data_all = {**data}
    nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    with open(os.path.join(cfg.OUTPUT_DIR, model_name, 'result_{}.json'.format(nowTime)), 'w', encoding='utf-8') as fp:
        json.dump(data_all, fp)
