# from __future__ import division
# from __future__ import print_function
#
# import argparse
# import datetime
# import os
# import os.path as osp
# import sys
# import time
# import warnings
# from collections import OrderedDict
#
# import torch
# import torch.backends.cudnn as cudnn
# import torch.nn as nn
# from torch.utils.data import DataLoader
#
# from reid import data_manager
# from reid.engine import tester
# from reid.models.PSTA import PSTA
# from reid.tools.generaltools import get_cfg, set_random_seed
# from reid.tools.loggers import Logger
# from reid.tools.torchtools import count_num_param
# from reid.utils import transforms as T
# from reid.utils.video_loader import VideoDataset
#
# sys.path.append(os.getcwd())
#
#
# def main(args):
#     torch.cuda.empty_cache()
#     cfg = get_cfg()
#     cfg.merge_from_file(args.config)
#     cfg.MODE = "test"
#     cfg.params.RESUME = True
#
#     # 中南大学开发时需配置pycharm中的运行参数 --developer_mode True
#     if args.developer_mode:
#         time_now = time.strftime("%Y%m%d-%H_%M", time.localtime())
#         cfg.params.LOG_PATH = os.path.join(cfg.params.LOG_PATH, cfg.MODE, time_now)
#
#     cfg.freeze()
#     set_random_seed(cfg.RANDOM_SEED)
#     use_gpu = torch.cuda.is_available()
#     log_name = 'log_test.txt'
#     logdir = osp.join(cfg.params.LOG_PATH, log_name)
#     os.makedirs(osp.dirname(logdir), exist_ok=True)
#     sys.stdout = Logger(logdir)
#
#     print('==========\nArgs:{}\n=========='.format(args))
#
#     if use_gpu:
#         print('Currently using GPU')
#         cudnn.benchmark = True
#     else:
#         warnings.warn('Currently using CPU, however, GPU is highly recommended')
#
#     print('\n初始化DataManager')
#     dataset = data_manager.init_dataset(mode=cfg.MODE,
#                                         cfg=cfg)
#
#     print('\n模型初始化')
#     model = PSTA(num_classes=625,
#                  pretrain_choice=cfg.MODEL.PRETRAIN_CHOICE,
#                  model_name=cfg.MODEL.NAME,
#                  seq_len=cfg.MODEL.SEQ_LEN)
#     print('Model size: {:.3f} M'.format(count_num_param(model)))
#     model = nn.DataParallel(model).cuda() if use_gpu else model
#
#     transform_test = T.Compose([
#         T.Resize(cfg.INPUT.SIZE_TEST),
#         T.ToTensor(),
#         T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#     ])
#     pin_memory = True if use_gpu else False
#
#     if cfg.params.TEST_SAMPLER == 'dense':
#         print('Build dense sampler')
#         queryloader = DataLoader(
#             dataset=VideoDataset(dataset=dataset.query,
#                                  seq_len=cfg.MODEL.SEQ_LEN,
#                                  sample=cfg.params.TEST_SAMPLER,
#                                  transform=transform_test,
#                                  max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
#             batch_size=1,
#             shuffle=False,
#             num_workers=cfg.DATALOADER.NUM_WORKERS,
#             pin_memory=pin_memory,
#             drop_last=False
#         )
#
#         galleryloader = DataLoader(
#             dataset=VideoDataset(dataset=dataset.gallery,
#                                  seq_len=cfg.MODEL.SEQ_LEN,
#                                  sample=cfg.params.TEST_SAMPLER,
#                                  transform=transform_test,
#                                  max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
#             batch_size=1,
#             shuffle=False,
#             num_workers=cfg.DATALOADER.NUM_WORKERS,
#             pin_memory=pin_memory,
#             drop_last=False,
#         )
#     else:
#         queryloader = DataLoader(
#             dataset=VideoDataset(dataset=dataset.query,
#                                  seq_len=cfg.MODEL.SEQ_LEN,
#                                  sample=cfg.params.TEST_SAMPLER,
#                                  transform=transform_test,
#                                  max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
#             batch_size=cfg.params.SEQS_PER_BATCH,
#             shuffle=False,
#             num_workers=cfg.DATALOADER.NUM_WORKERS,
#             pin_memory=pin_memory,
#             drop_last=False
#         )
#
#         galleryloader = DataLoader(
#             dataset=VideoDataset(dataset=dataset.gallery,
#                                  seq_len=cfg.MODEL.SEQ_LEN,
#                                  sample=cfg.params.TEST_SAMPLER,
#                                  transform=transform_test,
#                                  max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
#             batch_size=cfg.params.SEQS_PER_BATCH,
#             shuffle=False,
#             num_workers=cfg.DATALOADER.NUM_WORKERS,
#             pin_memory=pin_memory,
#             drop_last=False
#         )
#
#     start_time = time.time()
#
#     print("Loading checkpoint from '{}'".format(cfg.params.MODEL_PATH))
#     print("load model... ")
#     checkpoint = torch.load(cfg.params.MODEL_PATH)
#     new_state_dict = OrderedDict()
#     for k, v in checkpoint['state_dict'].items():
#         name = k[7:]
#         new_state_dict[name] = v
#     model.load_state_dict(new_state_dict, strict=False)
#
#     print("Evaluate...")
#     tester(model, queryloader, galleryloader, use_gpu, test_distance=cfg.params.TEST_DISTANCE)
#
#     elapsed = round(time.time() - start_time)
#     elapsed = str(datetime.timedelta(seconds=elapsed))
#     print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
#
#
# if __name__ == '__main__':
#     parser = argparse.ArgumentParser(description="vesselreid Testing")
#     parser.add_argument('--config', type=str, default="config_chuanbo_vreid_test.yml",
#                         help='the file of train/test/infer')
#     parser.add_argument('--config_file_path', type=str,
#                         default=r"Z:\Code_Pile\Programs\DataProcess\2023_lu_data_process_course-master\config_chuanbo_vreid_test.yml",
#                         help='the config file of train/test/infer from the AI plate!')
#     parser.add_argument('--project_dir', type=str, default="",
#                         help=r'project\'s root direction')
#     parser.add_argument('--gpu-devices', default='1', type=str,
#                         help='gpu device ids for CUDA_VISIBLE_DEVICES')
#     parser.add_argument('--developer_mode', default=True, type=bool,
#                         help='true：中南用的开发者模式 false：提交给远望时的模式')
#     args = parser.parse_args()
#     args.project_dir = os.path.dirname(os.path.dirname(__file__))
#     args.config = os.path.join(args.project_dir, args.config)
#     # args.config = args.config_file_path  # 自己调试时注释掉该行
#     main(args)

from __future__ import division
from __future__ import print_function
import chardet
import argparse
import datetime
import os
import os.path as osp
import sys
import time
import warnings
from collections import OrderedDict

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.utils.data import DataLoader

from reid import data_manager
from reid.engine import tester
from reid.models.PSTA import PSTA
from reid.tools.generaltools import get_cfg, set_random_seed
from reid.tools.loggers import Logger
from reid.tools.torchtools import count_num_param
from reid.utils import transforms as T
from reid.utils.video_loader import VideoDataset

sys.path.append(os.getcwd())

from yacs.config import CfgNode as CN
def main(args):
    torch.cuda.empty_cache()
    cfg = get_cfg()

    # 正常加载配置文件
    print("Loading config from file: {}".format(args.config))
    with open(args.config, 'rb') as f:
        rawdata = f.read()
        result = chardet.detect(rawdata)
        print(result['encoding'])
    with open(args.config, 'r', encoding='GB2312') as file:
        config_str = file.read()

        # 将配置文件内容转换为 CfgNode 实例
    config_node = CN.load_cfg(config_str)

    # 合并配置
    cfg.merge_from_other_cfg(config_node)

    cfg.MODE = "test"
    cfg.params.RESUME = True

    # 中南大学开发时需配置pycharm中的运行参数 --developer_mode True
    if args.developer_mode:
        time_now = time.strftime("%Y%m%d-%H%M", time.localtime())  # 使用 %H%M 来避免使用冒号
        cfg.params.LOG_PATH = os.path.join(cfg.params.LOG_PATH, cfg.MODE, time_now)

    cfg.freeze()
    set_random_seed(cfg.RANDOM_SEED)
    use_gpu = torch.cuda.is_available()
    log_name = 'log_test.txt'
    logdir = osp.join(cfg.params.LOG_PATH, log_name)
    os.makedirs(osp.dirname(logdir), exist_ok=True)
    sys.stdout = Logger(logdir)

    print('==========\nArgs:{}\n=========='.format(args))

    if use_gpu:
        print('Currently using GPU')
        cudnn.benchmark = True
    else:
        warnings.warn('Currently using CPU, however, GPU is highly recommended')

    print('\n初始化DataManager')
    dataset = data_manager.init_dataset(mode=cfg.MODE, cfg=cfg)

    print('\n模型初始化')
    model = PSTA(num_classes=625,
                 pretrain_choice=cfg.MODEL.PRETRAIN_CHOICE,
                 model_name=cfg.MODEL.NAME,
                 seq_len=cfg.MODEL.SEQ_LEN)
    print('Model size: {:.3f} M'.format(count_num_param(model)))
    model = nn.DataParallel(model).cuda() if use_gpu else model

    transform_test = T.Compose([
        T.Resize(cfg.INPUT.SIZE_TEST),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    pin_memory = True if use_gpu else False

    if cfg.params.TEST_SAMPLER == 'dense':
        print('Build dense sampler')
        queryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.query,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=1,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )

        galleryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.gallery,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=1,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False,
        )
    else:
        queryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.query,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=cfg.params.SEQS_PER_BATCH,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )

        galleryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.gallery,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=cfg.params.SEQS_PER_BATCH,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )

    start_time = time.time()

    print("Loading checkpoint from '{}'".format(cfg.params.MODEL_PATH))
    print("load model... ")
    checkpoint = torch.load(cfg.params.MODEL_PATH)
    new_state_dict = OrderedDict()
    for k, v in checkpoint['state_dict'].items():
        name = k[7:]
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict, strict=False)

    print("Evaluate...")
    tester(model, queryloader, galleryloader, use_gpu, test_distance=cfg.params.TEST_DISTANCE)

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="vesselreid Testing")
    parser.add_argument('--config', type=str, default="config_chuanbo_vreid_test.yml",
                        help='the file of train/test/infer')
    parser.add_argument('--config_file_path', type=str,
                        default="/mnt/data/xyc/projects/vesselreidproject/program/reidAlgorithm/config_chuanbo_vreid_test.yml",
                        help='the config file of train/test/infer from the AI plate!')
    parser.add_argument('--project_dir', type=str, default="",
                        help='project\'s root direction')
    parser.add_argument('--gpu-devices', default='1', type=str,
                        help='gpu device ids for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--developer_mode', default=True, type=bool,
                        help='true：中南用的开发者模式 false：提交给远望时的模式')
    args = parser.parse_args()
    args.project_dir = os.path.dirname(os.path.dirname(__file__))
    args.config = os.path.join(args.project_dir, args.config)
    # args.config = args.config_file_path  # 自己调试时注释掉该行
    main(args)

