import argparse
import numpy as np
import os.path as osp
import init_path
from dcr_reid.utils.utils import str2bool
from dcr_reid.utils.utils import tight_float_str as tfs
from dcr_reid.utils.utils import time_str
from dcr_reid.utils.utils import ReDirectSTD
from dcr_reid.utils.utils import set_devices_for_ml
from dcr_reid.utils.utils import set_seed
from dcr_reid.utils.utils import load_state_dict
from dcr_reid.utils.utils import save_ckpt
from dcr_reid.utils.utils import load_ckpt
from dcr_reid.utils.utils import adjust_lr_exp
from dcr_reid.utils.utils import adjust_lr_staircase
from dcr_reid.utils.utils import may_set_mode
from dcr_reid.utils.utils import AverageMeter
from dcr_reid.utils.utils import to_scalar
from dcr_reid.dataset import create_dataset
from dcr_reid.model.Model import Model
from dcr_reid.model.Model_1 import Model_1
from dcr_reid.model.PartialModel import PartialModel
from dcr_reid.model.TripletLoss import TripletLoss
from dcr_reid.model.loss import global_loss
from dcr_reid.model.loss import spatial_loss
from dcr_reid.log.show_logs import StepLog
from dcr_reid.log.show_logs import EpochLog
from dcr_reid.log.show_logs import Log2TensorBoard
import pprint
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import threading
import time


class Config(object):
    def __init__(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-d', '--sys_device_ids', type=eval, default=((0,1,2), (3,4,5)))
        parser.add_argument('--num_models', type=int, default=2)
        parser.add_argument('-r', '--run', type=int, default=1)
        parser.add_argument('--set_seed', type=str2bool, default=False)
        parser.add_argument('--dataset', type=str, default='market1501',
                            choices=['market1501', 'cuhk03', 'duke', 'combined'])
        parser.add_argument('--trainset_part', type=str, default='trainval',
                            choices=['trainval', 'train'])
        parser.add_argument('--partial_dataset', type=str, default='holistic',
                            choices=['Partial_REID', 'Partial_iLIDS', 'holistic'])
        parser.add_argument('--resize_h_w', type=eval, default=(384, 192))#256, 128
        parser.add_argument('--crop_prob', type=float, default=0)
        parser.add_argument('--crop_ratio', type=float, default=1)
        parser.add_argument('--ids_per_batch', type=int, default=32)
        parser.add_argument('--ims_per_id', type=int, default=4)

        parser.add_argument('--log_to_file', type=str2bool, default=True)
        parser.add_argument('--exp_dir', type=str, default='Data_backup')
        parser.add_argument('--model_weight_file', type=str, default='')
        parser.add_argument('--last_conv_stride', type=int, default=1, choices=[1, 2])
        parser.add_argument('--normalize_feature', type=str2bool, default=True)

        parser.add_argument('-gm', '--global_margin', type=float, default=0.3)
        parser.add_argument('-sm', '--spatial_margin', type=float, default=0.3)

        parser.add_argument('-glw', '--g_loss_weight', type=float, default=1.)
        parser.add_argument('-slw', '--s_loss_weight', type=float, default=1.)
        parser.add_argument('-idlw', '--id_loss_weight', type=float, default=1.)
        parser.add_argument('-pmlw', '--pm_loss_weight', type=float, default=1.)
        parser.add_argument('-gdmlw', '--gdm_loss_weight', type=float, default=1.)
        parser.add_argument('-sdmlw', '--sdm_loss_weight', type=float, default=1.)

        parser.add_argument('--only_test', type=str2bool, default=False)
        parser.add_argument('--resume', type=str2bool, default=False)
        parser.add_argument('--base_lr', type=float, default=2e-4)
        parser.add_argument('--lr_decay_type', type=str, default='exp',
                            choices=['exp', 'staircase'])
        parser.add_argument('--exp_decay_at_epoch', type=int, default=76)
        parser.add_argument('--staircase_decay_at_epochs', type=eval, default=(101, 201,))
        parser.add_argument('--staircase_decay_multiply_factor', type=float, default=0.1)
        parser.add_argument('--total_epochs', type=int, default=400)
        args = parser.parse_known_args()[0]

        self.sys_device_ids = args.sys_device_ids
        if args.set_seed:
            self.seed = 1
        else:
            self.seed = None

        # The experiments can be run for several times and performances be averaged.
        # `run` starts from `1`, not `0`.
        self.run = args.run
        ###########
        # Dataset #
        ###########

        # If you want to exactly reproduce the result in training, you have to set num of threads to 1.
        if self.seed is not None:
            self.prefetch_threads = 1
        else:
            self.prefetch_threads = 2

        self.dataset = args.dataset
        self.partial_dataset = args.partial_dataset
        self.trainset_part = args.trainset_part

        # Image Processing
        # Just for training set
        self.crop_prob = args.crop_prob
        self.crop_ratio = args.crop_ratio
        self.resize_h_w = args.resize_h_w

        # Whether to scale by 1/255
        self.scale_im = True
        self.im_mean = [0.486, 0.459, 0.408]
        self.im_std = [0.229, 0.224, 0.225]

        self.ids_per_per_batch = args.ids_per_batch
        self.ims_per_id = args.ims_per_id
        self.train_final_batch = True
        self.train_mirror_type = ['random', 'always', None][0]
        self.train_shuffle = True

        self.test_batch_size = 32
        self.test_final_batch = True
        self.test_mirror_type = ['random', 'always', None][2]
        self.test_shuffle = False

        dataset_kwargs = dict(
            name=self.dataset,
            resize_h_w=self.resize_h_w,
            scale=self.scale_im,
            im_mean=self.im_mean,
            im_std = self.im_std,
            batch_dims='NCHW',#PyTorch uses 'NCHW', while TensorFlow uses 'NHWC'
            num_prefetch_threads=self.prefetch_threads
        )

        self.prng = np.random
        if self.seed is not None:
            self.prng = np.random.RandomState(self.seed)
        self.train_set_kwargs = dict(
            part=self.trainset_part,
            ids_per_batch=self.ids_per_per_batch,
            ims_per_id=self.ims_per_id,
            final_batch=self.train_final_batch,
            shuffle=self.train_shuffle,
            crop_prob=self.crop_prob,
            crop_ratio=self.crop_ratio,
            mirror_type=self.train_mirror_type,
            testim='holistic',
            prng=self.prng
        )
        self.train_set_kwargs.update(dataset_kwargs)

        self.prng = np.random
        if self.seed is not None:
            self.prng = np.random.RandomState(self.seed)
        self.test_set_kwards = dict(
            part='test',
            batch_size=self.test_batch_size,
            final_batch=self.test_final_batch,
            shuffle=self.test_shuffle,
            mirror_type=self.test_mirror_type,
            testim=self.partial_dataset,
            prng=self.prng
        )
        self.test_set_kwards.update(dataset_kwargs)

        ###############
        # ReID Model  #
        ###############
        """
        The last block of ResNet has stride 2, we can set the stride to 1 so that the spatial resolution
        before global pooling is doubled
        """
        self.last_conv_stride = args.last_conv_stride
        # Whether to normalize feature to unit length along the channel dimension, before computing distance
        self.normalize_feature = args.normalize_feature

        self.local_conv_out_channels = 128
        self.global_margin = args.global_margin
        self.spatial_margin = args.spatial_margin

        # Identification Loss weight
        self.id_loss_weight = args.id_loss_weight
        # global loss weight
        self.g_loss_weight = args.g_loss_weight
        # spatial loss weight
        self.s_loss_weight = args.s_loss_weight

        ###############
        # Mutual Loss  #
        ###############
        # probability mutual loss weight
        self.pm_loss_weight = args.pm_loss_weight
        # global distance mutual loss weight
        self.gdm_loss_weight = args.gdm_loss_weight
        # spatial distance mutual loss weight
        self.sdm_loss_weight = args.sdm_loss_weight

        self.num_models = args.num_models
        # See method 'set_devices_for_ml' in 'dcr_reid/utils/utils.py' for details
        assert len(self.sys_device_ids) == self.num_models, 'You should specify device for each model.'

        #############
        # Training  #
        #############
        self.weight_decay = 0.0005

        # Initial learning rate
        self.base_lr = args.base_lr
        self.lr_decay_type = args.lr_decay_type
        self.exp_decay_at_epoch = args.exp_decay_at_epoch
        self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
        self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
        # Number of epochs to train
        self.total_epochs = args.total_epochs

        # How often (in batches) to log. If only need to log the average
        # information for each epoch, set this to a large value, eg 1e10
        self.log_steps = 1e10

        # Only test without training
        self.only_test = args.only_test

        self.resume = args.resume
        #######
        # Log #
        #######

        # If True,
        # 1) stdout and stderr will be redirected to file,
        # 2) training loss etc will be written to tensorboard,
        # 3) checkpoint will be saved
        self.log_to_file = args.log_to_file
        # The root dir of logs
        if args.exp_dir == '':
            self.exp_dir = osp.join(
                'exp/train',
                '{}'.format(self.dataset),
                ('nf_' if self.normalize_feature else 'not_nf_') +
                'gm_{}_'.format(tfs(self.global_margin)) +
                'sm_{}_'.format(tfs(self.spatial_margin)) +
                'glw_{}_'.format(tfs(self.g_loss_weight)) +
                'slw_{}_'.format(tfs(self.s_loss_weight)) +
                'idlw_{}_'.format(tfs(self.id_loss_weight)) +
                'pmlw_{}_'.format(tfs(self.pm_loss_weight)) +
                'gdmlw_{}_'.format(tfs(self.gdm_loss_weight)) +
                'sdmlw_{}_'.format(tfs(self.sdm_loss_weight)) +
                'base_lr_{}_'.format(tfs(self.base_lr)) +
                'lr_decay_type_'.format(self.lr_decay_type) +
                ('decay_at_{}_'.format(self.exp_decay_at_epoch)
                 if self.lr_decay_type == 'exp'
                 else 'decay_at_{}_factor_{}_'.format(
                    '_'.join([str(e) for e in args.staircase_decay_at_epochs]),
                    tfs(self.staircase_decay_multiply_factor))) +
                'total_{}_'.format(self.total_epochs),
                'run{}'.format(self.run)
            )
        else:
            self.exp_dir = args.exp_dir
        self.stdout_file = osp.join(
            self.exp_dir, 'stdout_{}.txt'.format(time_str()))
        self.stderr_file = osp.join(
            self.exp_dir, 'stderr_{}.txt'.format(time_str()))

        # Saving model weights and optimizer states, for resuming
        self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
        # Just for loading a pretrained model; no optimizer states if needed.
        self.model_weight_file = args.model_weight_file


def main():
    cfg = Config()
    if cfg.log_to_file:
        ReDirectSTD(cfg.stdout_file, 'stdout', False)
        ReDirectSTD(cfg.stderr_file, 'stderr', False)

    # Lazily create SummaryWriter
    writer = None
    print(cfg.sys_device_ids)
    TVTs, TMOs, relative_device_ids = set_devices_for_ml(cfg.sys_device_ids)

    if cfg.seed is not None:
        set_seed(cfg.seed)

    # Dump the configurations to log
    print('-' * 60)
    print('cfg.__dict__')
    pprint.pprint(cfg.__dict__)
    print('-' * 60)

    ###########
    # Dataset #
    ###########
    train_set = create_dataset(**cfg.train_set_kwargs)
    test_sets = []
    test_set_names = []
    if cfg.dataset == 'combined':
        for name in ['market1501', 'cuhk03', 'duke']:
            cfg.test_set_kwards['name'] = name
            test_sets.append(create_dataset(**cfg.test_set_kwards))
            test_set_names.append(name)
    else:
        test_sets.append(create_dataset(**cfg.test_set_kwards))
        test_set_names.append(cfg.partial_dataset)

    ###########
    # Models  #
    if cfg.partial_dataset == 'holistic':
        models = [Model(last_conv_stride=cfg.last_conv_stride, local_conv_out_channels=cfg.local_conv_out_channels,
                        num_classes=len(train_set.ids2labels)) for _ in range(cfg.num_models)]
        # Model wrappers
        model_ws = [DataParallel(models[i], device_ids=relative_device_ids[i]) for i in range(cfg.num_models)]
    else:
        models = [PartialModel(last_conv_stride=cfg.last_conv_stride, local_conv_out_channels=cfg.local_conv_out_channels,
                         num_classes=len(train_set.ids2labels)) for _ in range(cfg.num_models)]
        # Model wrappers
        model_ws = [DataParallel(models[i], device_ids=relative_device_ids[i]) for i in range(cfg.num_models)]

    #############################
    # Criteria and Optimizers   #
    #############################
    id_criterion = nn.CrossEntropyLoss()
    g_tri_loss = TripletLoss(margin=cfg.global_margin)
    s_tri_loss = TripletLoss(margin=cfg.spatial_margin)
    # The first argument to the Adam constructor tells the optimizer which Tensors it should update.
    optimizers = [optim.Adam(m.parameters(), lr=cfg.base_lr, weight_decay=cfg.weight_decay) for m in models]
    # Bind them together just to save some codes in the following usage.
    modules_optims = models + optimizers
    # modules_optims1 = [models, optimizers]


    ################################
    # May Resume Models and Optims #
    ################################
    if cfg.resume:
        resume_ep , scores = load_ckpt(modules_optims, cfg.ckpt_file)
    """Transfer Models and Optims to specified device. 
       Transferring optimizers if to cope with the case when you load the checkpoint to a new device"""
    for TMO, model, optimizer in zip(TMOs, models, optimizers):
        TMO([model, optimizer])

    ########
    # Test #
    ########
    def test(load_model_weight=False):
        if load_model_weight:
            if  cfg.model_weight_file != '':
                map_location = (lambda storage, loc: storage)
                sd = torch.load(cfg.model_weight_file, map_location=map_location)
                load_state_dict(model, sd)
                print('Loaded model weights from{}.'.format(cfg.model_weight_file))
            else:
                load_ckpt(modules_optims, cfg.ckpt_file)
        for i, (model_w, TVT) in enumerate(zip(model_ws, TVTs)):
            for test_set, name in zip(test_sets, test_set_names):
                test_set.set_feat_func(ExtractFeature(model_w, TVT))
                print('\n===============>Test Model#{} on dataset:{}<===============\n'.format(i+1,name))
                test_set.eval(normalize_feat=cfg.normalize_feature, to_re_rank=False, verbose=True)
    if cfg.only_test:
        test(load_model_weight=True)
        return
    ############
    # Training #
    ############
    # Storing things that can be accessed cross threads.
    ims_list = [None for _ in range(cfg.num_models)]
    labels_list = [None for _ in range(cfg.num_models)]
    done_list1 = [False for _ in range(cfg.num_models)]
    done_list2 = [False for _ in range(cfg.num_models)]
    probs_list = [None for _ in range(cfg.num_models)]
    g_dist_mat_list = [None for _ in range(cfg.num_models)]
    s_dist_mat_list = [None for _ in range(cfg.num_models)]
    """
    Two phases for each model:
    1) forward and single-model loss;
    2) further add mutual loss and backward
    The 2nd phase is only ready to start when the 1st is finished for all models
    """
    run_event1 = threading.Event()
    run_event2 = threading.Event()
    """
    This event is meant to be set to stop threads. However, with 'daemon' set to true when creating threads,
    manually stoping is unnecessary. I guess some main-thread variables required by sub-threads are destroyed
    when the main thread ends, thus the sub-threads throw errors and exit too. Real reason should be further explored.
    """
    exit_event = threading.Event()

    # The function to be called by threads.
    def thread_target(i):
        while not exit_event.isSet():
            # If the run event is not set, this thread just waits.
            if not run_event1.wait(0.001): continue
            """Phase 1: Forward and Separate Loss"""
            TVT = TVTs[i]
            model_w = model_ws[i]
            ims = ims_list[i]
            labels = labels_list[i]
            optimizer = optimizers[i]
            ims_t = TVT(torch.from_numpy(ims).float())
            ims_var = Variable(ims_t)
            labels_t = TVT(torch.from_numpy(labels).long())
            labels_var = Variable(labels_t)
            global_feature, spatial_feature,logits = model_w(ims_var)
            probs = F.softmax(logits, dim=1)
            log_probs = F.log_softmax(logits, dim=1)
            g_loss, p_inds, n_inds, g_dist_ap, g_dist_an, g_dist_mat = global_loss(g_tri_loss,global_feature,labels_t,normalize_feature=cfg.normalize_feature)
            if cfg.s_loss_weight == 0:
                s_loss, s_dist_mat = 0,0
            else:
                s_loss, s_dist_ap, s_dist_an, s_dist_mat = spatial_loss(s_tri_loss,spatial_feature,p_inds,n_inds,labels_t,normalize_feature=cfg.normalize_feature)
                # s_loss, s_dist_ap, s_dist_an = spatial_loss(s_tri_loss,spatial_feature,p_inds,n_inds,labels_t,normalize_feature=cfg.normalize_feature)
            id_loss = 0
            if cfg.id_loss_weight>0:
                id_loss = id_criterion(logits, labels_var)

            probs_list[i] = probs
            g_dist_mat_list[i] = g_dist_mat
            s_dist_mat_list[i] = s_dist_mat
            done_list1[i] = True
            # Waiting for event to be set, meanwhile checking if need to exit.
            while True:
                phase2_ready = run_event2.wait(0.001)
                if exit_event.isSet():
                    return
                if phase2_ready:
                    break

            """Phase 2: Mutual Loss and Backward"""
            # Probability Mutual Loss (KL Loss)
            pm_loss = 0
            if(cfg.num_models>1) and (cfg.pm_loss_weight>0):
                for j in range(cfg.num_models):
                    if j != i:
                        pm_loss += F.kl_div(log_probs, TVT(probs_list[j]).detach(), False)
                    pm_loss /= 1. * (cfg.num_models-1)*len(ims)
            # Global Distance Mutual Loss (L2 Loss)
            gdm_loss = 0
            if(cfg.num_models>1) and (cfg.gdm_loss_weight>0):
                for j in range(cfg.num_models):
                    if j!=i:
                        gdm_loss += torch.sum(torch.pow(g_dist_mat-TVT(g_dist_mat_list[j]).detach(),2))
                gdm_loss /= 1. *(cfg.num_models-1)*len(ims)*len(ims)
            # Spatial Distance Mutual Loss (L2 Loss)
            sdm_loss = 0
            if(cfg.num_models>1) and (cfg.sdm_loss_weight>0):
                for j in range(cfg.num_models):
                    if j!=i:
                        sdm_loss += torch.sum(torch.pow(s_dist_mat-TVT(s_dist_mat_list[j]).detach(), 2))
                sdm_loss /= 1. * (cfg.num_models-1)*len(ims)*len(ims)
            loss = g_loss*cfg.g_loss_weight + s_loss*cfg.s_loss_weight + id_loss*cfg.id_loss_weight + pm_loss*cfg.pm_loss_weight + gdm_loss*cfg.gdm_loss_weight+ sdm_loss*cfg.sdm_loss_weight
            # loss = g_loss*cfg.g_loss_weight + pm_loss*cfg.pm_loss_weight + gdm_loss*cfg.gdm_loss_weight
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            ##################################
            # Step Log For One of the Models #
            ##################################
            # Just record for the first model
            if i  == 0:
                # precision
                g_prec = (g_dist_an>g_dist_ap).data.float().mean()
                # the proportion of triplets that satisfy margin.
                g_m = (g_dist_an>g_dist_ap+cfg.global_margin).data.float().mean()
                g_d_ap = g_dist_ap.data.mean()
                g_d_an = g_dist_an.data.mean()
                # These meters are outer-scope objects.
                g_prec_meter.update(g_prec)
                g_m_meter.update(g_m)
                g_dist_ap_meter.update(g_d_ap)
                g_dist_an_meter.update(g_d_an)
                g_loss_meter.update(to_scalar(g_loss))
                if cfg.s_loss_weight>0:
                    #precision
                    s_prec = (s_dist_an>s_dist_ap).data.float().mean()
                    #the proportion of triplets that satisfy margin.
                    s_m = (s_dist_an>s_dist_ap+cfg.spatial_margin).data.float().mean()
                    s_d_ap = s_dist_ap.data.mean()
                    s_d_an = s_dist_an.data.mean()
                    s_prec_meter.update(s_prec)
                    s_m_meter.update(s_m)
                    s_dist_ap_meter.update(s_d_ap)
                    s_dist_an_meter.update(s_d_an)
                    s_loss_meter.update(to_scalar(s_loss))
                if cfg.id_loss_weight>0:
                    id_loss_meter.update(to_scalar(id_loss))
                if(cfg.num_models>1) and (cfg.pm_loss_weight>0):
                    pm_loss_meter.update(to_scalar(gdm_loss))
                if(cfg.num_models>1) and (cfg.sdm_loss_weight>0):
                    sdm_loss_meter.update(sdm_loss)
                loss_meter.update(to_scalar(loss))
            ###################
            # End Up One Step #
            ###################
            run_event1.clear()
            run_event2.clear()
            done_list2[i] = True

    threads = []
    for i in range(cfg.num_models):
        thread = threading.Thread(target=thread_target, args=(i,))
        # Set the thread in daemon mode, so that the main program ends normally.
        thread.daemon = True
        thread.start()
        threads.append(thread)
    start_ep = resume_ep if cfg.resume else 0

    for ep in range(start_ep, cfg.total_epochs):
        epoch_done = False
        g_prec_meter = AverageMeter()
        g_m_meter = AverageMeter()
        g_dist_ap_meter = AverageMeter()
        g_dist_an_meter = AverageMeter()
        g_loss_meter = AverageMeter()
        s_prec_meter = AverageMeter()
        s_m_meter = AverageMeter()
        s_dist_ap_meter = AverageMeter()
        s_dist_an_meter = AverageMeter()
        s_loss_meter = AverageMeter()
        id_loss_meter = AverageMeter()
        gdm_loss_meter = AverageMeter()
        sdm_loss_meter = AverageMeter()
        pm_loss_meter = AverageMeter()
        loss_meter = AverageMeter()
        # Adjust Learning Rate
        for optimizer in optimizers:
            if cfg.lr_decay_type == 'exp':
                adjust_lr_exp(optimizer, cfg.base_lr, ep+1, cfg.total_epochs, cfg.exp_decay_at_epoch)
            else:
                adjust_lr_staircase(optimizer, cfg.base_lr, ep+1, cfg.staircase_decay_at_epochs, cfg.staircase_decay_multiply_factor)
        may_set_mode(modules_optims, 'train')
        step = 0
        ep_st = time.time()
        while not epoch_done:
            step += 1
            step_st = time.time()
            ims, im_names, labels, mirrored, epoch_done = train_set.next_batch()
            for i in range(cfg.num_models):
                ims_list[i] = ims
                labels_list[i] = labels
                done_list1[i] = False
                done_list2[i] = False
            run_event1.set()
            # Waiting for phase1 done
            while not all(done_list1): continue
            run_event2.set()
            # Waiting for phase2 done
            while not all(done_list2): continue

            StepLog(ep=ep,num_models=cfg.num_models,step=step,step_st=step_st,log_steps=cfg.log_steps,
                    g_loss_weight=cfg.g_loss_weight,s_loss_weight=cfg.s_loss_weight,id_loss_weight=cfg.id_loss_weight,
                    pm_loss_weight=cfg.pm_loss_weight,gdm_loss_weight=cfg.gdm_loss_weight,sdm_loss_weight=cfg.sdm_loss_weight,
                    g_prec_meter=g_prec_meter,g_m_meter=g_m_meter,s_prec_meter=s_prec_meter,s_m_meter=s_m_meter,
                    g_dist_ap_meter=g_dist_ap_meter,g_dist_an_meter=g_dist_an_meter,s_dist_ap_meter=s_dist_ap_meter,s_dist_an_meter=s_dist_an_meter,
                    loss_meter = loss_meter,g_loss_meter=g_loss_meter,s_loss_meter=s_loss_meter,id_loss_meter=id_loss_meter,
                    pm_loss_meter=pm_loss_meter,gdm_loss_meter=gdm_loss_meter,sdm_loss_meter=sdm_loss_meter)
        EpochLog(ep=ep,ep_st=ep_st,num_models=cfg.num_models,g_loss_weight=cfg.g_loss_weight,s_loss_weight=cfg.s_loss_weight,id_loss_weight=cfg.id_loss_weight,
                 pm_loss_weight=cfg.pm_loss_weight,gdm_loss_weight=cfg.gdm_loss_weight,sdm_loss_weight=cfg.sdm_loss_weight,
                 g_prec_meter=g_prec_meter,g_m_meter=g_m_meter,s_prec_meter=s_prec_meter,s_m_meter=s_m_meter,
                 g_dist_ap_meter=g_dist_ap_meter,g_dist_an_meter=g_dist_an_meter,s_dist_ap_meter=s_dist_ap_meter,s_dist_an_meter=s_dist_an_meter,
                 loss_meter=loss_meter,g_loss_meter=g_loss_meter,s_loss_meter=s_loss_meter,id_loss_meter=id_loss_meter,
                 pm_loss_meter=pm_loss_meter,gdm_loss_meter=gdm_loss_meter,sdm_loss_meter=sdm_loss_meter)
        Log2TensorBoard(ep=ep,log_to_file=cfg.log_to_file,exp_dir=cfg.exp_dir,writer=writer,loss_meter=loss_meter,
                        g_loss_meter=g_loss_meter,s_loss_meter=s_loss_meter,id_loss_meter=id_loss_meter,
                        g_m_meter=g_m_meter,s_m_meter=s_m_meter,g_dist_ap_meter=g_dist_ap_meter,g_dist_an_meter=g_dist_an_meter,
                        s_dist_ap_meter=s_dist_ap_meter,s_dist_an_meter=s_dist_an_meter,
                        pm_loss_meter=pm_loss_meter,gdm_loss_meter=gdm_loss_meter,sdm_loss_meter=sdm_loss_meter,g_prec_meter=g_prec_meter,s_prec_meter=s_prec_meter)
        ep += 1

        #save ckpt
        if cfg.log_to_file:
            save_ckpt(modules_optims, ep+1, 0, cfg.ckpt_file)

    ########
    # Test #
    ########
    test(load_model_weight=False)


class ExtractFeature(object):
    '''A function to be called in the val/test  set, to extract features.
    Args: TVT: A callable to transfer images to specific device.
    '''
    def __init__(self, model, TVT):
        self.model = model
        self.TVT = TVT

    def __call__(self, ims):
        old_train_eval_model = self.model.training
        #Set eval mode. Force all BN layers use global mean and variance, also disable dropout.
        self.model.eval()
        ims = Variable(self.TVT(torch.from_numpy(ims).float()))
        global_feat, spatial_feat = self.model(ims)[:2]
        global_feat = global_feat.data.cpu().numpy()
        spatial_feat = spatial_feat.data.cpu().numpy()
        # Restore the model to its old train/eval mode.
        self.model.train(old_train_eval_model)
        return  global_feat, spatial_feat




if __name__ == '__main__':
    main()