from __future__ import division
from __future__ import print_function

import argparse
import datetime
import os
import os.path as osp
import sys
import time
import warnings

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from reidAlgorithm.reid.tools.generaltools import get_cfg

import models


def main(args):
    cfg = get_cfg()
    cfg.merge_from_file(args.config)
    cfg.MODE = "predictByVideo"
    cfg.params.RESUME = True
    cfg.params.RESULT_IMG_PATH = os.path.join(cfg.params.RESULT_PATH, "images")
    cfg.params.RESULT_JSON_PATH = os.path.join(cfg.params.RESULT_PATH, "json_files")
    cfg.params.QUERY_PATH = os.path.join(cfg.params.RS_FILE_PATH, "query")
    cfg.params.GALLERY_PATH = os.path.join(cfg.params.RS_FILE_PATH, "gallery")
    # -------------------------------
    if args.developer_mode:
        time_now = time.strftime("%Y%m%d-%H:%M", time.localtime())
        cfg.params.LOG_PATH = os.path.join(cfg.params.LOG_PATH, cfg.MODE, time_now)
        cfg.params.PREDICT_RESULT = os.path.join(cfg.params.PREDICT_RESULT, time_now)
    # -------------------------------

    cfg.freeze()
    set_random_seed(cfg.RANDOM_SEED)
    use_gpu = torch.cuda.is_available()
    log_name = 'log_predict.txt'
    logdir = osp.join(cfg.params.LOG_PATH, log_name)
    mkdir_if_missing(osp.dirname(logdir))
    mkdir_if_missing(cfg.params.RESULT_IMG_PATH)
    mkdir_if_missing(cfg.params.RESULT_JSON_PATH)
    sys.stdout = Logger(logdir)

    print('==========\nArgs:{}\n=========='.format(args))

    if use_gpu:
        print('Currently using GPU ')
        cudnn.benchmark = True
    else:
        warnings.warn('Currently using CPU, however, GPU is highly recommended')

    print('Initializing image data manager')
    dm = PredictImageDataManager(use_gpu, cfg)
    queryloader, galleryloader = dm.return_dataloaders()

    print('Initializing model: {}'.format(cfg.params.ARCH))

    model = models.Baseline(num_classes=93, last_stride=cfg.MODEL.LAST_STRIDE,
                            neck_feat=cfg.MODEL.NECK_FEAT, model_name=cfg.params.ARCH,
                            height=cfg.INPUT.HEIGHT, width=cfg.INPUT.WIDTH,
                            resume=True)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    model = nn.DataParallel(model).cuda() if use_gpu else model

    if cfg.params.RESUME and check_isfile(cfg.params.MODEL_PATH):
        resume_from_checkpoint(cfg.params.MODEL_PATH, model, is_eval=True)

    time_start = time.time()
    predict(cfg, model, queryloader, galleryloader, use_gpu)
    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))


def predict(cfg, model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf = []
        for batch_idx, (imgs, _, imgs_path) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
        qf = torch.cat(qf, 0)

        print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        gf, gpids, gpaths = [], [], []
        for batch_idx, (imgs, gids, imgs_path) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            gpids.extend(gids)
            gpaths.extend(imgs_path)
        gf = torch.cat(gf, 0)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, cfg.params.BATCH_SIZE))
    qf = F.normalize(qf, p=2, dim=1)
    gf = F.normalize(gf, p=2, dim=1)
    id_distmat, g_pids, img_dist, g_paths = compute_dis(qf, gf, gpids, gpaths)  # query和船舶ID之间的距离矩阵
    save_json_id_img_result(id_distmat, img_dist, (queryloader, g_paths), g_pids,
                            json_save_dir=cfg.params.RESULT_JSON_PATH, img_save_dir=cfg.params.RESULT_IMG_PATH)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="vesselreid Predicting")
    parser.add_argument('--config', type=str, default="config_chuanbo_vreid_predict.yml",
                        help='the file of train/test/infer')
    parser.add_argument('--config_file_path', type=str, default="",
                        help='the config file of train/test/infer from the AI plate!')
    parser.add_argument('--project_dir', type=str, default="",
                        help='project\'s root direction')
    parser.add_argument('--gpu-devices', default='0', type=str,
                        help='gpu device ids for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--developer_mode', default=False, type=bool,
                        help='true：中南用的开发者模式 false：提交给远望时的模式')
    args = parser.parse_args()
    args.project_dir = os.path.dirname(os.path.dirname(__file__))
    args.config = os.path.join(args.project_dir, args.config)
    print(args.config)
    # args.config = args.config_file_path  # 自己调试时注释掉该行
    main(args)
