# --------------------------------------------------------
# Pytorch Meta R-CNN
# Written by Anny Xu, Xiaopeng Yan, based on the code from Jianwei Yang
# --------------------------------------------------------
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import collections
import torch
import torch.nn as nn
import torch.optim as optim
import random

#from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from torch.autograd import Variable
import torch.utils.data as Data
from roi_data_layer.roidb import combined_roidb, rank_roidb_ratio, filter_class_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
    adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.resnet import resnet
import pickle
from datasets.metadata import MetaDataset, COCO_MetaDataset
from collections import OrderedDict

def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='Train Meta R-CNN network')
    # Define training data and Model
    parser.add_argument('--dataset', dest='dataset',
                        help='training dataset:coco2017,coco,pascal_07_12',
                        default='pascal_voc_0712', type=str)
    parser.add_argument('--net', dest='net',
                        help='metarcnn',
                        default='metarcnn', type=str)
    parser.add_argument('--frcn_joint', dest='frcn_joint',
                        help='FRCN+joint',
                        default=False, type=bool)
    # Define display and save dir
    parser.add_argument('--start_epoch', dest='start_epoch',
                        help='starting epoch',
                        default=1, type=int)
    parser.add_argument('--epochs', dest='max_epochs',
                        help='number of epochs to train',
                        default=21, type=int)
    parser.add_argument('--disp_interval', dest='disp_interval',
                        help='number of iterations to display',
                        default=20, type=int)
    parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
                        help='number of iterations to display',
                        default=10000, type=int)
    parser.add_argument('--save_dir', dest='save_dir',
                        help='directory to save models', default="./models",
                        type=str)
    # Define training parameters
    parser.add_argument('--nw', dest='num_workers',
                        help='number of worker to load data',
                        default=0, type=int)
    parser.add_argument('--cuda', dest='cuda', default=True, type=bool,
                        help='whether use CUDA')
    parser.add_argument('--bs', dest='batch_size',
                        help='batch_size',
                        default=1, type=int)
    parser.add_argument('--cag', dest='class_agnostic', default=False, type=bool,
                        help='whether perform class_agnostic bbox regression')
    parser.add_argument('--tfa', dest='tfa', default=False, type=bool,
                        help='improved two phase fine-tuning approach')
    parser.add_argument('--margin', dest='margin', default=False, type=bool,
                        help='use margin loss')
    parser.add_argument('--sim', dest='sim', default=False, type=bool,
                        help='loss improved by similarity')
    parser.add_argument('--att_tsne', dest='att_tsne', default=False, type=bool,
                        help='plot the tsne result of proposals')
    parser.add_argument('--margin_value', dest='margin_value', default=0, type=float,
                        help='margin value')
    parser.add_argument('--adaptive_margin', dest='adaptive_margin', default=False, type=bool,
                        help='adaptive margin value')
    parser.add_argument('--random_seed', dest='random_seed', default=0, type=int,
                        help='random seed')
    # Define meta parameters
    parser.add_argument('--meta_train', dest='meta_train', default=False, type=bool,
                        help='whether perform meta training')
    parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool,
                        help='whether perform adding meta loss')
    parser.add_argument('--phase', dest='phase',
                        help='the phase of training process',
                        default=1, type=int)
    parser.add_argument('--shots', dest='shots',
                        help='the number meta input of PRN network',
                        default=1, type=int)
    parser.add_argument('--meta_type', dest='meta_type', default=1, type=int,
                        help='choose which sets of metaclass')
    parser.add_argument('--save_checkpoint', dest='save_checkpoint', default=True, type=bool)
    parser.add_argument('--random_option', action='store_false', default=True,
                        help='random option')
    parser.add_argument('--all_base', dest='all_base', default=False, type=bool,
                        help='all_base')
    parser.add_argument('--au', dest='au', default=False, type=bool,
                        help='au')
    # config optimization
    parser.add_argument('--o', dest='optimizer',
                        help='training optimizer',
                        default="sgd", type=str)
    parser.add_argument('--lr', dest='lr',
                        help='starting learning rate',
                        default=0.001, type=float)
    parser.add_argument('--lr_decay_step', dest='lr_decay_step',
                        help='step to do learning rate decay, unit is epoch',
                        default=4, type=int)
    parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
                        help='learning rate decay ratio',
                        default=0.1, type=float)
    # set training session
    parser.add_argument('--s', dest='session',
                        help='training session',
                        default=1, type=int)
    # resume trained model
    parser.add_argument('--r', dest='resume',
                        help='resume checkpoint or not',
                        default=False, type=bool)
    parser.add_argument('--checksession', dest='checksession',
                        help='checksession to load model',
                        default=1, type=int)
    parser.add_argument('--checkepoch', dest='checkepoch',
                        help='checkepoch to load model',
                        default=10, type=int)
    parser.add_argument('--checkpoint', dest='checkpoint',
                        help='checkpoint to load model',
                        default=21985, type=int)
    # log and display
    parser.add_argument('--use_tfboard', dest='use_tfboard',
                        help='whether use tensorflow tensorboard',
                        default=False, type=bool)
    parser.add_argument('--log_dir', dest='log_dir',
                        help='directory to save logs', default='logs',
                        type=str)
    args = parser.parse_args()
    return args


class sampler(Sampler):
    def __init__(self, train_size, batch_size):
        self.num_data = train_size
        self.num_per_batch = int(train_size / batch_size)
        self.batch_size = batch_size
        self.range = torch.arange(0, batch_size).view(1, batch_size).long()
        self.leftover_flag = False
        if train_size % batch_size:
            self.leftover = torch.arange(self.num_per_batch * batch_size, train_size).long()
            self.leftover_flag = True

    def __iter__(self):
        rand_num = torch.randperm(self.num_per_batch).view(-1, 1) * self.batch_size
        self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
        self.rand_num_view = self.rand_num.view(-1)

        if self.leftover_flag:
            self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)

        return iter(self.rand_num_view)

    def __len__(self):
        return self.num_data


if __name__ == '__main__':
    #os.chdir('/home/wjh/Meta_R-CNN')
    args = parse_args()
    if args.frcn_joint:
        args.shots = 0
        args.meta_type = 0
    if args.dataset == "coco2017":
        args.imdb_name = "coco_2017_train"
        args.imdbval_name = "coco_2017_val"
        args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
    elif args.dataset == "coco":
        args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
        args.imdbval_name = "coco_2014_minival"
        args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
    elif args.dataset == "pascal_voc_0712":
        if args.phase == 1: # three types of base and novel classes splits
            if args.meta_type == 1:
                args.imdb_name = "voc_2007_train_first_split+voc_2012_train_first_split"
            elif args.meta_type == 2:
                args.imdb_name = "voc_2007_train_second_split+voc_2012_train_second_split"
            elif args.meta_type == 3:
                args.imdb_name = "voc_2007_train_third_split+voc_2012_train_third_split"
            elif args.meta_type == 0:
                args.imdb_name = "voc_2007_trainval"
        else:
            args.imdb_name = "voc_2007_base_shots+voc_2007_novel_shots" # the default sampled shots saved path of meta classes in the first phase
        args.imdbval_name = "voc_2007_test"
        args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
     # the number of sets of metaclass
    cfg.TRAIN.META_TYPE = args.meta_type
    
    cfg.USE_GPU_NMS = args.cuda
    if args.cuda:
        cfg.CUDA = True

    args.cfg_file = "cfgs/res101_ms.yml"
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    np.random.seed(cfg.RNG_SEED)
    if torch.cuda.is_available() and not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")
    if args.dataset == "pascal_voc_0712":
        if args.phase == 1:
            # First phase only use the base classes
            shots = 200
            if args.meta_type == 1:  #  use the first sets of base classes
                metaclass = cfg.TRAIN.BASECLASSES_FIRST
            if args.meta_type == 2:  #  use the second sets of base classes
                metaclass = cfg.TRAIN.BASECLASSES_SECOND
            if args.meta_type == 3:  #  use the third sets of base classes
                metaclass = cfg.TRAIN.BASECLASSES_THIRD
        else:
            # Second phase only use fewshot number of base and novel classes
            shots = args.shots
            if args.meta_type == 1:  #  use the first sets of all classes
                base_metaclass = cfg.TRAIN.BASECLASSES_FIRST
                novel_metaclass = cfg.TRAIN.NOVELCLASSES_FIRST
            if args.meta_type == 2:  #  use the second sets of all classes
                base_metaclass = cfg.TRAIN.BASECLASSES_SECOND
                novel_metaclass = cfg.TRAIN.NOVELCLASSES_SECOND
            if args.meta_type == 3:  #  use the third sets of all classes
                base_metaclass = cfg.TRAIN.BASECLASSES_THIRD
                novel_metaclass = cfg.TRAIN.NOVELCLASSES_THIRD
        # prepare meta sets for meta training
        if args.meta_train and args.meta_type > 0:
            # construct the input dataset of PRN network
            img_size = 224
            if args.phase == 1:
                img_set = [('2007', 'trainval'), ('2012', 'trainval')]
            else:
                img_set = [('2007', 'trainval')]

            if args.phase == 1:
                metadataset = MetaDataset('data/VOCdevkit2007',
                                        img_set, metaclass, img_size, shots=args.shots, shuffle=True, phase=args.phase, basenovel='base')

                metaloader = torch.utils.data.DataLoader(metadataset, batch_size=1, shuffle=False, num_workers=0,
                                                    pin_memory=False)
            
            if args.phase == 2:
                base_metadataset = MetaDataset('data/VOCdevkit2007',
                                        img_set, base_metaclass, img_size, shots=args.shots, shuffle=args.random_option, phase=args.phase, basenovel='base', epoch = 21, random_seed=args.random_seed, split=args.meta_type)

                base_metaloader = torch.utils.data.DataLoader(base_metadataset, batch_size=1, shuffle=False, num_workers=0,
                                                    pin_memory=False)
                novel_metadataset = MetaDataset('data/VOCdevkit2007',
                                            img_set, novel_metaclass, img_size, shots=args.shots, shuffle=args.random_option, phase=args.phase, basenovel='novel', epoch = 21, random_seed=args.random_seed, split=args.meta_type)
                novel_metaloader = torch.utils.data.DataLoader(novel_metadataset, batch_size=1, shuffle=False, num_workers=0,
                                                        pin_memory=False)
    

    imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)

    if args.phase == 2 and args.all_base and os.path.exists('data/cache/voc_2007_base_shots_gt_roidb.pkl'):
        os.remove('data/cache/voc_2007_base_shots_gt_roidb.pkl')
    else:
        train_size = len(roidb)
        print('{:d} roidb entries'.format(len(roidb)))
        sys.stdout.flush()
        
        sampler_batch = sampler(train_size, args.batch_size)
        dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, imdb.num_classes, training=True)
        dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
                                            sampler=sampler_batch, num_workers=args.num_workers, pin_memory=False)
    
    output_dir = args.save_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # initilize the network here
    if args.net == 'metarcnn':
        fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic,
                            meta_train=args.meta_train, meta_loss=args.meta_loss, margin=args.margin, sim=args.sim, margin_value=args.margin_value)
    fasterRCNN.create_architecture()

    # initilize the optimizer here
    lr = cfg.TRAIN.LEARNING_RATE
    lr = args.lr
    params = []
    for key, value in dict(fasterRCNN.named_parameters()).items():
        
        if value.requires_grad:
            if 'bias' in key:
                params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
                            'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
            else:
                params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
        else:
            continue
    if args.optimizer == "adam":
        lr = lr * 0.1
        optimizer = torch.optim.Adam(params)
    elif args.optimizer == "sgd":
        optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)

    if args.cuda:
        fasterRCNN.cuda()
    if args.au:
        fasterRCNN.au = True
        
    if args.resume:
        load_name = os.path.join(output_dir,
                                 '{}_metarcnn_{}_{}_{}.pth'.format(args.dataset, args.checksession,
                                                                   args.checkepoch, args.checkpoint))
        print("loading checkpoint %s" % (load_name))
        checkpoint = torch.load(load_name)
        args.session = checkpoint['session']
        args.start_epoch = checkpoint['epoch']
        # the number of classes in second phase is different from first phase
        if args.phase == 2:
            new_state_dict = OrderedDict()
            # initilize params of RCNN_cls_score and RCNN_bbox_pred for second phase
            RCNN_cls_score = nn.Linear(2048, imdb.num_classes)
            if args.class_agnostic:
                RCNN_bbox_pred = nn.Linear(2048, 4)
            else:
                RCNN_bbox_pred = nn.Linear(2048, 4 * imdb.num_classes)
            for k, v in checkpoint['model'].items():
                if not args.meta_loss:
                    if 'Meta_cls_score.weight' in k:
                        continue
                    if 'Meta_cls_score.bias' in k:
                        continue
                name = k
                new_state_dict[name] = v
                if 'RCNN_cls_score.weight' in k:
                    new_state_dict[name] = RCNN_cls_score.weight
                if 'RCNN_cls_score.bias' in k:
                    new_state_dict[name] = RCNN_cls_score.bias
                if 'RCNN_bbox_pred.weight' in k:
                    new_state_dict[name] = RCNN_bbox_pred.weight
                if 'RCNN_bbox_pred.bias' in k:
                    new_state_dict[name] = RCNN_bbox_pred.bias
            fasterRCNN.load_state_dict(new_state_dict)
        elif args.phase == 1:
            fasterRCNN.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr = optimizer.param_groups[0]['lr']

        if 'pooling_mode' in checkpoint.keys():
            cfg.POOLING_MODE = checkpoint['pooling_mode']
        print("loaded checkpoint %s" % (load_name))
    
    
    for epoch in range(args.start_epoch, args.max_epochs):
        fasterRCNN.train()
        loss_temp = 0
        start = time.time()
        fasterRCNN.epoch = epoch
        if args.adaptive_margin:
            if os.path.exists('/home/wjh/Meta_R-CNN/logs/gt_box_feature_%.0f.txt' % epoch):
                os.remove('/home/wjh/Meta_R-CNN/logs/gt_box_feature_%.0f.txt' % epoch)
            if os.path.exists('/home/wjh/Meta_R-CNN/logs/true_label_%.0f.txt' % epoch):
                os.remove('/home/wjh/Meta_R-CNN/logs/true_label_%.0f.txt' % epoch)

        if args.phase == 2 and args.all_base:
            base_metadataset = MetaDataset('data/VOCdevkit2007', img_set, base_metaclass, img_size, shots=args.shots, shuffle=args.random_option, phase=args.phase, basenovel='base', epoch=epoch, random_seed=args.random_seed, split=args.meta_type)

            base_metaloader = torch.utils.data.DataLoader(base_metadataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=False)
                                                    
            imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
            # filter roidb for the second phase
        
            roidb = filter_class_roidb(roidb, args.shots, imdb, args.dataset)
            ratio_list, ratio_index = rank_roidb_ratio(roidb)
            imdb.set_roidb(roidb)

            train_size = len(roidb)
            print('{:d} roidb entries'.format(len(roidb)))
            sys.stdout.flush()
            
            sampler_batch = sampler(train_size, args.batch_size)
            dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, imdb.num_classes, training=True)
            dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
                                                sampler=sampler_batch, num_workers=args.num_workers, pin_memory=False)
        
            if os.path.exists('data/cache/voc_2007_base_shots_gt_roidb.pkl'):
                os.remove('data/cache/voc_2007_base_shots_gt_roidb.pkl')
        
        fasterRCNN.margin_value = args.margin_value

        iters_per_epoch = int(train_size / args.batch_size)
        print('the number of iterations of each epoch is %.0f' % iters_per_epoch)

        if epoch % (args.lr_decay_step + 1) == 0:
            adjust_learning_rate(optimizer, args.lr_decay_gamma)
            lr *= args.lr_decay_gamma
        
        data_iter = iter(dataloader)
        if not args.frcn_joint:
            if args.phase == 1:
                meta_iter = iter(metaloader)
            elif args.phase == 2:
                meta_iter1 = iter(base_metaloader)
                meta_iter2 = iter(novel_metaloader)
        for step in range(iters_per_epoch):
            try:
                data = next(data_iter)
            except:
                data_iter = iter(dataloader)
                data = next(data_iter)
            
            im_data_list = []
            im_info_list = []
            gt_boxes_list = []
            num_boxes_list = []

            # initilize the tensor holder here.
            im_data = torch.FloatTensor(1)
            im_info = torch.FloatTensor(1)
            num_boxes = torch.LongTensor(1)
            gt_boxes = torch.FloatTensor(1)
            # ship to cuda
            if args.cuda:
                im_data = im_data.cuda()
                im_info = im_info.cuda()
                num_boxes = num_boxes.cuda()
                gt_boxes = gt_boxes.cuda()
            # make variable
            im_data = Variable(im_data)
            im_info = Variable(im_info)
            num_boxes = Variable(num_boxes)
            gt_boxes = Variable(gt_boxes)
            
            base_class_size = 0
            novel_class_size = 0

            if args.meta_train and not args.frcn_joint:
                if args.phase == 1:
                    try:
                        prndata, prncls = next(meta_iter)
                    except:
                        meta_iter = iter(base_metaloader)
                        prndata, prncls = next(meta_iter)
                    
                    im_data_list.append(Variable(torch.cat(prndata,dim=0).cuda()))
                    im_info_list.append(prncls)
                    
                elif args.phase == 2:    
                    try:
                        base_prndata, base_prncls = next(meta_iter1)
                    except:
                        meta_iter1 = iter(base_metaloader)
                        base_prndata, base_prncls = next(meta_iter1)
                    base_class_size = len(base_metaclass)

                    try:
                        novel_prndata, novel_prncls = next(meta_iter2)
                    except:
                        meta_iter2 = iter(novel_metaloader)
                        novel_prndata, novel_prncls = next(meta_iter2)
                    im_data_list.append(Variable(torch.cat((torch.cat(novel_prndata,dim=0),torch.cat(base_prndata,dim=0)),dim=0).cuda()))
                    im_info_list.append(base_prncls + novel_prncls)
                    novel_class_size = len(novel_metaclass)

                im_data.data.resize_(data[0].size()).copy_(data[0])
                im_info.data.resize_(data[1].size()).copy_(data[1])
                gt_boxes.data.resize_(data[2].size()).copy_(data[2])
                num_boxes.data.resize_(data[3].size()).copy_(data[3])
                im_data_list.append(im_data)
                im_info_list.append(im_info)
                gt_boxes_list.append(gt_boxes)
                num_boxes_list.append(num_boxes)

            else:
                im_data.data.resize_(data[0].size()).copy_(data[0])
                im_info.data.resize_(data[1].size()).copy_(data[1])
                gt_boxes.data.resize_(data[2].size()).copy_(data[2])
                num_boxes.data.resize_(data[3].size()).copy_(data[3])
                im_data_list.append(im_data)
                im_info_list.append(im_info)
                gt_boxes_list.append(gt_boxes)
                num_boxes_list.append(num_boxes)
            
            fasterRCNN.zero_grad()
            
            torch.backends.cudnn.enabled = False
            if not fasterRCNN.au:
                rpn_loss_cls, rpn_loss_box, \
                RCNN_loss_cls, RCNN_loss_bbox, \
                rois_label, meta_loss = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list)
            else:
                rpn_loss_cls, rpn_loss_box, \
                RCNN_loss_cls, RCNN_loss_bbox, \
                rois_label, meta_loss_base, meta_loss_novel, \
                RCNN_loss_cls_base, RCNN_loss_cls_novel, RCNN_loss_bbox_base, RCNN_loss_bbox_novel = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list)
            # import pdb;pdb.set_trace()
            if args.meta_train:
                if args.phase == 1:
                    loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_cls) / args.batch_size + sum(
                        RCNN_loss_bbox) / args.batch_size + meta_loss / len(metaclass)
                elif args.phase == 2:
                    if not args.au:
                        loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_cls) / args.batch_size + sum(
                            RCNN_loss_bbox) / args.batch_size + meta_loss / (base_class_size + novel_class_size)
                        
                        loss_temp += float(loss.data[0])
    
                        # backward
                        optimizer.zero_grad()
                        loss.backward()
                        # if args.net == "vgg16" or "res101":
                        #     clip_gradient(fasterRCNN, 10.)
                        optimizer.step()
                    else:
                        loss_base = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_cls_base) / args.batch_size + sum(
                            RCNN_loss_bbox_base) / args.batch_size + meta_loss_base / (base_class_size + novel_class_size)
                        loss_novel = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_cls_novel) / args.batch_size + sum(
                            RCNN_loss_bbox_novel) / args.batch_size + meta_loss_novel / (base_class_size + novel_class_size)
                        loss_temp += loss_base + loss_novel
                        optimizer.zero_grad()
                        loss_base.backward(retain_graph=True)
                        optimizer.step()
                        optimizer.zero_grad()
                        loss_novel.backward()
                        optimizer.step()

                        '''
                        if step % 2 == 0:
                            loss = loss_base + loss_novel * 0
                        else:
                            loss = loss_novel + loss_base * 0
                        '''
            else:
                loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
                    + RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
            
                loss_temp += float(loss.data[0])
    
                # backward
                optimizer.zero_grad()
                loss.backward()
                # if args.net == "vgg16" or "res101":
                #     clip_gradient(fasterRCNN, 10.)
                optimizer.step()
            torch.cuda.empty_cache() # error 77?
            torch.backends.cudnn.enabled = True
                # del loss, rpn_loss_box, rpn_loss_cls, meta_loss, RCNN_loss_cls, RCNN_loss_bbox

            if step % args.disp_interval == 0:
                end = time.time()
                if step > 0:
                    loss_temp /= args.disp_interval  # loss_temp is aver loss
                fg_cnt = torch.sum(rois_label.data.ne(0))
                bg_cnt = rois_label.data.numel() - fg_cnt

                print("[session %d][epoch %2d][iter %4d] loss: %.4f, lr: %.2e" \
                    % (args.session, epoch, step, loss_temp, lr))
                print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start))
                if args.au:
                    print("\t\t\tbase_loss: %.4f, novel_loss: %.4f" \
                        % (float(loss_base.data[0]), float(loss_novel.data[0])))
                '''
                if args.meta_train:
                    print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
                        % (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
                    print("\t\t\tmeta_loss %.4f" %(loss_metarcnn))
                else:
                    print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
                        % (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
                '''
                sys.stdout.flush() 

                loss_temp = 0
                start = time.time()

        if args.meta_train:
            if args.phase == 1:
                save_name = os.path.join(output_dir,
                                    '{}_{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net), shots, epoch,
                                                                iters_per_epoch))
            elif args.all_base:
                save_name = os.path.join(output_dir,
                                    '{}_{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net), shots, epoch,
                                                                200))
            else:
                save_name = os.path.join(output_dir,
                                    '{}_{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net), shots, epoch,
                                                                iters_per_epoch))
        else:
            save_name = os.path.join(output_dir, '{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net),
                                                                        epoch, 200))
        if args.phase == 2 and args.meta_train: # to extract the mean classes attentions of shots for testing
            torch.backends.cudnn.enabled = False
            class_attentions = collections.defaultdict(list)
            meta_iter1 = iter(base_metaloader)
            meta_iter2 = iter(novel_metaloader)
            for i in range(shots):
                prndata, prncls = next(meta_iter1)
                im_data_list = []
                im_info_list = []
                gt_boxes_list = []
                num_boxes_list = []
                im_data = torch.FloatTensor(1)
                if args.cuda:
                    im_data = im_data.cuda()
                im_data = Variable(im_data, volatile=True)
                im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
                im_data_list.append(im_data)
                attentions = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
                                                average_shot=True)
                for idx, cls in enumerate(prncls):
                    class_attentions[int(cls)].append(attentions[idx])
                    
                prndata, prncls = next(meta_iter2)
                im_data_list = []
                im_info_list = []
                gt_boxes_list = []
                num_boxes_list = []
                im_data = torch.FloatTensor(1)
                if args.cuda:
                    im_data = im_data.cuda()
                im_data = Variable(im_data, volatile=True)
                im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
                im_data_list.append(im_data)
                attentions = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
                                                average_shot=True)
                for idx, cls in enumerate(prncls):
                    class_attentions[int(cls)].append(attentions[idx])
            torch.backends.cudnn.enabled = True
            # calculate mean attention vectors of every class
            mean_class_attentions = {k: sum(v) / len(v) for k, v in class_attentions.items()}
            save_path = 'attentions'
            if not os.path.exists(save_path):
                os.mkdir(save_path)
            with open(os.path.join(save_path, 'split_' + str(args.meta_type) + '_shots_' + str(args.shots) + '_epoch_' + str(epoch) + '_mean_class_attentions.pkl'), 'wb') as f:
                pickle.dump(mean_class_attentions, f, pickle.HIGHEST_PROTOCOL)
            with open(os.path.join(save_path, 'split_' + str(args.meta_type) + '_shots_' + str(args.shots) + '_random_seed_' + str(args.random_seed) + '_epoch_' + str(epoch) + '_mean_class_attentions.pkl'), 'wb') as f:
                pickle.dump(mean_class_attentions, f, pickle.HIGHEST_PROTOCOL)
            print('save ' + str(epoch) + '_' + str(args.shots) + ' mean classes attentions done!')

        if args.save_checkpoint:
            save_checkpoint({
                'session': args.session,
                'epoch': epoch + 1,
                'model': fasterRCNN.state_dict(),
                'optimizer': optimizer.state_dict(),
                'pooling_mode': cfg.POOLING_MODE,
                'class_agnostic': args.class_agnostic,
            }, save_name)
        else:
            if epoch == args.max_epochs - 1:
                save_checkpoint({
                    'session': args.session,
                    'epoch': epoch + 1,
                    'model': fasterRCNN.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'pooling_mode': cfg.POOLING_MODE,
                    'class_agnostic': args.class_agnostic,
                }, save_name)
        f.close()
        